query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Get a rule violators who have been ejected.
def get_violators(self): return self.violators
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def affecteds(self):\n return [m for m in self.members if m.disease == PedigreeMember.AFFECTED]", "def get_victors(self):\n if self.is_game_over():\n scores = [p.get_score() for p in self.state.get_players()]\n if len(scores) == 0:\n return []\n max_score = max(scores)\n victors = []\n for p in self.state.get_players():\n if p.get_color() not in self.violators and p.get_score() == max_score:\n victors.append(self.players[p.get_color()])\n return victors\n else:\n return None", "def get_rules(self):\n return [phi for psi in self._Psi for phi in psi]", "def losses(self):\n return [g for g in self.games if g.winner is not self.team]", "def player_deaths(self):\n return self.deaths.filter(and_(Death.mindkey != 'null', Death.mindkey != None, Death.mindname != 'Manifested Ghost'))", "def teammates(self):\n return [\n p for p in self.roster.participants\n if p.participant_id != self.participant_id\n ]", "def end_effectors(self) -> list:\n S = self.parents\n return [[x, f\"q{x[1:]}\"] for x in S if S.out_degree(x) == 0]", "def get_all_volunteers(self):\n volunteers = []\n for user in User.objects.all():\n if not OcAuth(user.id).is_admin():\n volunteers.append(user)\n return volunteers", "def get_critics(self):\n actors = [ddpg_agent.critic for ddpg_agent in self.maddpg_agent]\n return actors", "def end_effectors(self) -> list:\n if not hasattr(self, \"_end_effectors\"):\n S = self.structure\n self._end_effectors = [\n [x, y]\n for x in S\n if S.out_degree(x) == 0\n for y in S.predecessors(x)\n if DIST in S[y][x]\n if S[y][x][DIST] < np.inf\n ]\n\n return self._end_effectors", "def end_effectors(self) -> list:\n if not hasattr(self, \"_end_effectors\"):\n S = self.structure\n self._end_effectors = [\n [x, y]\n for x in S\n if S.out_degree(x) == 0\n for y in S.predecessors(x)\n if DIST in S[y][x]\n if S[y][x][DIST] < np.inf\n ]\n\n return self._end_effectors", "def vulnerabilities(self) -> api.Vulnerabilities:\n return self._get_model(model=api.Vulnerabilities)", "def vitamins(self) -> List[RecipeObjectNutrientsCalories]:\n return self._vitamins", "def getActuators(self):\n return self.listener.actuators", "def filter_rules(self) -> list:\n return self.transform(self._tree), self._rules", "def getParticpants(self):\n return participants", "def get_tower_losses(self, tower, device):\n # Note: Network editor have to maintain each loss with 'loss' and 'vars' if it's a list.\n if isinstance(tower.loss, list):\n return tower.loss\n else:\n tower_vars = []\n trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n if self.replica:\n tower_vars = [var for var in trainable_vars if(var.name.startswith('tower_%d' % device))]\n else:\n tower_vars = trainable_vars\n\n return [{'loss': tower.loss, 'vars': tower_vars}]", "def getInterceptions(self):\n return self.interceptions", "def all_actions(self, player):\n return [m for m in MOVES if m.norm() == 1]", "def poses(self) -> List[T]:\n return self._poses", "def get_vacuum_subtracted_homo(self):\n h_v = []\n for i,v in enumerate(self.HOMO_Energies):\n h_v.append(v - self.vacuum[i])\n print(f\"HOMO-vac: {self.orbital_labels[i]}: {h_v[-1]}\")\n return h_v", "def has_victim(self):\n # first-party\n from tcex.api.tc.v3.victims.victim_filter import VictimFilter\n\n victims = VictimFilter(Tql())\n self._tql.add_filter('hasVictim', TqlOperator.EQ, victims, TqlType.SUB_QUERY)\n return victims", "def affecteds_names(self):\n return [m.name for m in self.affecteds]", "def get_players(self):\r\n return self.players.values()", "def get_losses(self):\n if self.loss is not None:\n return [self.loss]\n else:\n return []", "def getPlayers(self):\n return iter(self.players)", "def participants(self):\r\n return Participants(self)", "def get_vulnerable_items(self):\n\n results = []\n\n for action in self.__actions:\n if self.__driver.stopping:\n break\n\n items = action.get_action_items(self.__queue_item)\n\n for item in items:\n if self.__driver.stopping:\n break\n\n if item.get_hash() in self.scanned_hashes:\n continue\n\n self.scanned_hashes.append(item.get_hash())\n\n if self.__is_item_vulnerable(item):\n results.append(item)\n\n return results", "def players(self):\n return Player.objects.filter(team=self)", "def participants(self):\n return Participants(self)", "def damaged_cells(self):\n return self._hit_coors", "def get_editable_explorations(user_id):\n return [e for e in get_viewable_explorations(user_id)\n if e.is_editable_by(user_id)]", "def get_rules_at(self, level):\n if level >= len(self._Psi):\n return []\n return self._Psi[level]", "def get_player_squares(self, player: PlayerColor) -> List[Square]:\r\n return [square for square in self.squares.values() if\r\n square.state == SquareState.OCCUPIED\r\n and square.occupant.owner == player]", "def offres(self) -> QuerySet:\n return Offre.objects.filter(proposition__cagnotte=self, valide=True)", "def getMyArmies(self):\n r = []\n for army in self.__armies:\n if (army.getOwner() == 1):\n r.append(army)\n return r", "def potentials(self):\n return [load_node(item) for item in self.get_attribute('potentials')]", "def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbrosters.difference(rosters)\n return notactive", "def get_all(self):\n # s = torch.FloatTensor(self._states).to(device)\n # a = torch.FloatTensor(self._actions).to(device)\n # r = torch.FloatTensor(self._rewards).to(device)\n return self._episodes", "def getOpponents(self, gameState):\n\n if self.red:\n return gameState.getBlueTeamIndices()\n else:\n return gameState.getRedTeamIndices()", "def get_unbroken_instances(self):\n return self._get_cond_instance(cond=0)", "def getVehicles(self):\n return self.vehicles", "def players(self):\n return self._get_by_class(Player)", "def get_winners_of_game(self):\n return self.game_winners", "def rules(self):\n return self._alert_rules_client", "def interceptions(self):\n return self._interceptions", "def getEnemyGhost(self,gameState):\r\n enemyList = []\r\n for enemy in self.getOpponents(gameState):\r\n enemyState = gameState.getAgentState(enemy)\r\n if (not enemyState.isPacman) and enemyState.scaredTimer == 0:\r\n enemyPos = gameState.getAgentPosition(enemy)\r\n if enemyPos != None:\r\n enemyList.append(enemy)\r\n return enemyList", "def edibles(self):\n return self._edibles", "def actuators(self):\n return self._actuators", "def propietarios(self):\n return self.expedientepersona_set.filter(propietario=True)", "def episodes(self):\n episodes = []\n for season in self.seasons:\n episodes.extend(season.episodes)\n return episodes", "def get_events() -> list[Event]:\n g.ledger.changed()\n return [e for e in g.filtered.entries if isinstance(e, Event)]", "def nonplayer_deaths(self):\n return self.deaths.filter(or_(Death.mindkey == 'null', Death.mindkey == None))", "def weapons(self):\n return self._get_by_class(Weapon)", "def _get_episodes(self):\n return [series_episode for series_episode in SeriesEpisode.objects.filter(series=self)]", "def get_dead(self):\n return ReadingSet(set([x for x in self._set if not x.alive]))", "def episodes(self):\n episodes = []\n for series in self.series:\n episodes.extend(series.episodes)\n return episodes", "def getVotersToDial(self):\n return self.getVotersToContact().exclude(\n (Q(phone_number1='') | Q(wrong_phone_number1__gt=1)),\n (Q(phone_number2='') | Q(wrong_phone_number2__gt=1)))", "def get_participants(self):\n return self.participants_group.user_set.all()", "def zombies(self):\n return (_ for _ in self._zombie_list)", "def get_instance_essentials(self):\n ret = []\n for instance in self.all_instances:\n ret.append(instance.get_essentials())\n return ret", "def zombies(self):\n return (zombie for zombie in self._zombie_list)", "def all_rel_actions(self, player):\n return [m for m in MOVES if m.norm() == 1 and m.direction() != (0,-1)]", "def check_all_preservations(cls, bijection: BijectionType) -> Iterator[str]:\n return (stats.name for stats in cls._get_all() if stats.preserved_in(bijection))", "def comitentes(self):\n return self.expedientepersona_set.filter(comitente=True)", "def turtles(self):\n return self._turtles[:]", "def pred(self):\n return [ self.simple_reflection(i) for i in self.descents() ]", "def players(self) -> List[Player]:\n return [self.white_player, self.black_player]", "def probes(self):\r\n return probes.Probes(self)", "def get_remaining_events(index_disappeared,to_destroy):\n index_cp = index_disappeared[:]\n for i,deb,fin in to_destroy:\n index_cp = [(x,y,z) for x,y,z in index_cp if (x!=deb and x!=fin)]\n return index_cp", "def reactants(self):\n return [k for k, v in iteritems(self._metabolites) if v < 0]", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_equipment_from_inventory(self):\n return [x for x in self.inventory if x.is_equip()]", "def losses(self):\n for name in self._nodes:\n if isinstance(self._nodes[name], Loss):\n yield name", "def players(self):\n return self._get(\"players\")", "def others(state, alive=True):\n me = state['current_player']\n all_players = state['gladiators']\n others = {i: g for i, g in enumerate(all_players) if i != me}\n\n if alive:\n others = {i: g for i, g in others.items() if g['cur_hp'] > 0}\n\n return others", "def get_administrator_receivers(self):\n result = []\n services = self.find_tags(\"receiver\")\n for s in services:\n for action in s.findall(\"./intent-filter/action\"):\n if \"android.app.action.DEVICE_ADMIN_ENABLED\" in action.attrib.values():\n result.append(s.attrib['{http://schemas.android.com/apk/res/android}name'])\n # print(result)\n return result", "def get_agents(self):\n ret = []\n for i in self.all_instances:\n if i.instance_type == InstanceType.AGENT:\n ret.append(i)\n return ret", "def get_tautomers_of(chebi_ent):\n if hasattr(chebi_ent, 'OntologyParents'):\n return [ent.chebiId for ent in chebi_ent.OntologyParents if\n (ent.type == \"is tautomer of\")]\n else:\n return []", "def potential_mentors(self):\n import mysite.profile.controllers\n mentor_set = set(mysite.profile.controllers.people_matching(\n 'can_mentor', self.name))\n mentor_set.update(mysite.profile.controllers.people_matching(\n 'can_mentor', self.language))\n return mentor_set", "def getAvatars(self):\n return [s for s in self if isinstance(s, Avatar) and s not in self.kill_list]", "def get_people_invited(self, users):\n invited = []\n for user in users:\n if Room.verify_if_is_invited(user):\n invited.append(user)\n return invited", "def getPUsers(self):\n model = self.tvPUsers.get_model()\n result = []\n model.foreach(lambda model, path, iter, data:\n result.append(model.get(iter, 0)[0]), None)\n result.sort()\n return result", "def knobs(self):\n return self.Knobs(self)", "def etls(self):\r\n return self._etls", "def actors(self):\n return self._actors", "def _findRaven(self):\n\t\tresult = [device for device in comports()\n\t\t\tif device.pid == RAVEN_PID and device.vid == RAVEN_VID]\n\t\treturn result", "def next_autosteps(self) -> List[FlowNode]:\n return [node for predicate, node in self._current_step.children if predicate(self.ctx)]", "def get_exercises(self):\n exercises = set()\n for er in self.exercise_recordings:\n if er.exercise not in exercises:\n exercises.add(er.exercise)\n return list(exercises)", "def get_opponents(self, game_state):\n if self.red:\n return game_state.get_blue_team_indices()\n else:\n return game_state.get_red_team_indices()", "def diferencias(self):\n return self._diferencias", "def get_sensors(self):\n sensors = set()\n for er in self.exercise_recordings:\n for sensor in er.sensors:\n if sensor not in sensors:\n sensors.add(sensor)\n return list(sensors)", "def get_rules(self):\n # TODO: Implement\n self.traverse2(self.tree, [])\n return self.rules", "def getPlayers(self):\n players = []\n for pgp in self.sandboxplayergroupplayer_set.filter(quit=False):\n players.append(pgp.player)\n return players", "def get_toolchanges(move: Move, cave: Cave) -> List[Move]:\n distance, point, equipment = move\n tools = [t for t in [\"torch\", \"climbing\", \"neither\"] if t != equipment]\n return [(distance+7, point, e) for e in tools if cave.can_traverse(e, point)]", "def potential_new_obs(self) -> Set[GriddedPerm]:\n subobs: Set[GriddedPerm] = set()\n for ob in self._tiling.obstructions:\n subobs.update(ob.all_subperms(proper=True))\n subobs.remove(GriddedPerm.empty_perm())\n return subobs", "def getEssentialList(self):\n return self.essentials" ]
[ "0.6156995", "0.59065264", "0.5629059", "0.5474041", "0.53551775", "0.52045214", "0.5177488", "0.50929904", "0.5079941", "0.5059264", "0.5059264", "0.5011072", "0.4973407", "0.49627775", "0.4929745", "0.4920347", "0.4912848", "0.4911266", "0.4906332", "0.48964974", "0.4874137", "0.48723215", "0.48631057", "0.4855622", "0.48411027", "0.48322302", "0.48310977", "0.4827803", "0.48269886", "0.4825373", "0.47897106", "0.47784975", "0.47778884", "0.47761643", "0.47759217", "0.47708362", "0.47634086", "0.47573698", "0.47513336", "0.475052", "0.47477248", "0.47460333", "0.47364792", "0.47288537", "0.47261128", "0.4724279", "0.47167438", "0.47109506", "0.47106782", "0.47064927", "0.46985877", "0.46947917", "0.46920544", "0.46847796", "0.46828055", "0.46733272", "0.46724734", "0.46708828", "0.46625853", "0.4661126", "0.46482342", "0.46474192", "0.4645856", "0.46378133", "0.4627539", "0.46181917", "0.46181563", "0.46125996", "0.46111548", "0.46090242", "0.46018964", "0.4601506", "0.4601506", "0.4601506", "0.4601506", "0.46011597", "0.45988157", "0.45966458", "0.45865852", "0.45723316", "0.4567558", "0.45674324", "0.45668784", "0.45651972", "0.45639893", "0.45597753", "0.45573854", "0.45573354", "0.45571852", "0.45446438", "0.45347196", "0.4522911", "0.4521962", "0.45191815", "0.45095766", "0.45095453", "0.45093215", "0.45088172", "0.4507626", "0.4506632" ]
0.61693317
0
Get a copy of the current state.
def get_current_state(self): return deepcopy(self.state)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_state(self):\n return copy.deepcopy(self._state)", "def __getstate__(self):\n state = self.__dict__.copy()\n self.__cleanState__(state)\n return state", "def state(self):\n return self._state.copy()", "def get_state(self, deepcopy: bool = True):\n s = self.cache_ if hasattr(self, \"cache_\") else {}\n return copy.deepcopy(s) if deepcopy else s", "def clone_state(self):\n return self.strategy['state_handler'].clone(self.state)", "def __getstate__(self):\n state = self.__dict__.copy()\n del state['_view']\n return state", "def copy(self):\n return State([r[:] for r in self.values], empty_loc=self.empty_loc)", "def __getstate__(self):\n copy = self.__dict__.copy()\n copy['_workaround'] = None\n return copy", "def clone_full_state(self):\n state_ref = self.ale.cloneSystemState()\n state = self.ale.encodeState(state_ref)\n self.ale.deleteState(state_ref)\n return state", "def clone_state(self):\n state_ref = self.ale.cloneState()\n state = self.ale.encodeState(state_ref)\n self.ale.deleteState(state_ref)\n return state", "def copy(self):\n state = State(self.state_object, self.compute_dag)\n state.stage_id_map = self.stage_id_map.copy()\n return state", "def __getstate__(self):\n\n\t\tresult = self.__dict__.copy()\n\n\t\t# Do not pickle references to mutable objects.\n\t\tdel result['_device']\n\t\tdel result['resources']\n\n\t\treturn result", "def _state(self):\n return self.State(self.copy._array, self._turn, copy(self._score))", "def __copy__(self):\n new = FSMState(self.label(), self.word_out,\n self.is_initial, self.is_final,\n color=self.color,\n final_word_out=self.final_word_out,\n initial_probability=self.initial_probability)\n if hasattr(self, 'hook'):\n new.hook = self.hook\n return new", "def __getstate__(self):\n return self.__dict__", "def __getstate__(self):\n try: \n state = self.__dict__.copy()\n del state['_Funcs']\n return state\n except: \n return self.__dict__", "def copy(self):\n return self.mutate().simple_copy()", "def clone(self) -> 'State':\n return State(self.sim, state=self.get_state().copy())", "def _get_random_state(self):\n self._validate_random_state()\n return deepcopy(self.random_state)", "def __getstate__(self):\n state = self.__dict__\n state['_lock'] = None\n return state", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\r\n return copy.copy(self)", "def copy_model_state(model):\n model_state = deepcopy(model.state_dict())\n return model_state", "def copy (self):\n import copy\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def __getstate__(self):\n return self._", "def __getstate__(self):\n state: Dict[str, Any] = deepcopy(self.__dict__)\n del state['__db']\n return state", "def _copy_(self):\n return copy.copy(self)", "def __getstate__(self):\n\t\treturn self", "def copy(self):\n new_state = State(self.final)\n for symbol in self.children:\n child = self.children[symbol]\n new_state.add_child(child.copy(), symbol)\n return new_state", "def get_state(self) -> numpy.ndarray:\n if self.clone_seeds:\n return self.gym_env.unwrapped.clone_full_state()\n else:\n return self.gym_env.unwrapped.clone_state()", "def __copy__(self):\n return self.copy()", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state" ]
[ "0.8455732", "0.79240113", "0.78669524", "0.78233796", "0.76678777", "0.76537627", "0.7607961", "0.75942713", "0.7512252", "0.74859667", "0.74796313", "0.7326282", "0.7263944", "0.7236717", "0.7214216", "0.7181021", "0.7169202", "0.71566755", "0.715247", "0.7142748", "0.711405", "0.711405", "0.711405", "0.711405", "0.71017504", "0.70957786", "0.7092429", "0.70810586", "0.70810586", "0.70810586", "0.70745337", "0.7044669", "0.7042751", "0.7040793", "0.7032002", "0.70235896", "0.70172846", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055", "0.7008055" ]
0.81724
1
A decorator that wraps the passed in function and raises exception if headers with token is missing
def require_auth(function): @functools.wraps(function) def wrapper(self, *args, **kwargs): if not self.headers: raise LoginRequiredError return function(self, *args, **kwargs) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def token_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n token = request.headers['token']\n try:\n decoded = decode_token(token)\n except jwt.ExpiredSignatureError:\n return jsonify({\"message\": \"token expired\"}), 401\n except jwt.InvalidSignatureError:\n return jsonify({\"message\": \"Signature verification failed\"}), 401\n except jwt.InvalidTokenError:\n return jsonify({\"message\": \"Invalid Token verification failed\"}), 401\n except KeyError:\n return jsonify({\"message\": \"Missing token\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def token_required(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"validate token provided\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\"message\" : \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n except:\n return make_response(jsonify({\n \"message\" : \"kindly provide a valid token in the header\"}), 401)\n return f(*args, **kwargs)\n\n return decorated", "def token_required(func):\n def func_wrapper(self, *args, **kwargs):\n auth_token = self.request.headers.get('X-Auth-Token',\n self.request.get('token', ''))\n namespace = self.request.route_kwargs.get('namespace', '')\n try:\n token = base64.urlsafe_b64decode(str(auth_token))\n except TypeError:\n self.abort(412, 'Please update your token')\n try:\n token = auth_models.AuthToken.query(\n auth_models.AuthToken.token == token\n ).get()\n except datastore_errors.BadValueError:\n self.abort(401, 'Incorrect token')\n try:\n payload = jwt.decode(token.token, config.JWT_SECRET,\n algorithms=config.JWT_HASH_ALGORITHM)\n except (jwt.DecodeError, AttributeError):\n return self.abort(401)\n if payload['namespace'] != namespace:\n return self.abort(412, 'Token payload is incorrect.')\n return func(self, *args, **kwargs)\n return func_wrapper", "def requiresAuth(f):\n def decorated(*args, **kwargs):\n\n try:\n token = jwt_token_from_header()\n except AuthorizationError:\n abort(400, 'no autorization')\n\n try:\n token_decoded = jwt.decode(token, secret)\n args+=(token_decoded,)\n except jwt.ExpiredSignature:\n abort(401, 'token is expired')\n except jwt.DecodeError:\n abort(401, 'Error decoding signature')\n \n return f(*args, **kwargs)\n \n return decorated", "def authorized(fn):\n\n def _wrap(*args, **kwargs):\n if 'Authorization' not in request.headers:\n # Unauthorized\n print(\"No token in header\")\n abort(401)\n\n\n if key not in request.headers['Authorization']:\n # Unauthorized\n print(\"Key not in auth header\")\n abort(401)\n\n return fn(*args, **kwargs)\n return _wrap", "def validate_token(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"Tries to decode the JWT token using the SECRET KEY.\n\n Executes the original function if token is valid.\n\n Otherwise returns HTTP 401 to the Client.\n\n \"\"\"\n token = request.headers.get('token')\n\n try:\n jwt.decode(token, app.config['SECRET_KEY'])\n return func(*args, **kwargs)\n except jwt.DecodeError:\n message = 'Token is missing / invalid'\n except jwt.exceptions.ExpiredSignatureError:\n message = 'Token has expired'\n\n\n return Response(\n json.dumps({'error': message}),\n 401,\n mimetype='application/json'\n )\n\n return wrapper", "def check_token(fn):\n def response(self, *args, **kw):\n if not JWT_DISABLED:\n intoken = get_token_from_header()\n try:\n jwt.decode(intoken, SECRET_KEY)\n except jwt.exceptions.DecodeError:\n raise Error(FORBIDDEN)\n except jwt.ExpiredSignatureError:\n raise Error(UNAUTHORIZED, msg=\"Signature expired.\")\n except jwt.InvalidTokenError:\n raise Error(UNAUTHORIZED, msg=\"Invalid token.\")\n return fn(self, *args, **kw)\n return response", "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = request.headers.get('Authorization', default=None)\n\n if token is None or token == '':\n return jsonify({\n 'error': 'token missing from request'\n }), 401\n\n # Validate that this token is legit\n try:\n reddit_client = RedditClient(\n client_id=app.config['CLIENT_ID'],\n client_secret=app.config['CLIENT_SECRET'],\n token=token\n )\n\n authenticated_user = reddit_client.authenticated_user\n except RedditClientAuthenticationException:\n return jsonify({\n 'error': 'invalid token'\n }), 401\n except RedditClientException:\n return jsonify({\n 'error': 'invalid token'\n }), 401\n\n return f(*args, **kwargs)\n\n return decorated", "def decorated(*args, **kwargs):\n\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\n \"message\" : \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n driver = data['is_driver']\n except:\n return make_response(jsonify({\n \"message\" : \"kindly provide a valid token in the header\"}), 401)\n\n if not driver:\n return make_response(jsonify({\n \"message\" : \"you are not authorized to perform this function as a non-driver user\"}), 401)\n\n return f(*args, **kwargs)", "def validate_token(func):\n\n def wrapper(*args, **kwargs):\n # args[0] should be O365ManagementApi (self) because this function is\n # called from the O365ManagementApi class.\n try:\n if args[0].token.expiresOn < datetime.now():\n args[0].token = args[0].get_token()\n do_func = func(*args, **kwargs)\n return do_func\n except AttributeError as a:\n raise AttributeError(\"{0}: Existing token not valid or empty\".format(a))\n\n return wrapper", "def authenticate(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n access_token = request.headers.get('token', '')\n if access_token.strip(' '):\n decoded = decode_token(access_token)\n if decoded['status']:\n return func(*args, **kwargs)\n abort(http_status_code=401, message='Invalid token.Please login')\n abort(http_status_code=401,\n message='Token is missing')\n return wrapper", "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"Check if token is genuine\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if not token:\n return jsonify({\"message\":\"Token is missing!\"}), 401\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n current_user = User.query.filter_by(public_id=data['public_id']).first()\n except:\n return jsonify({\"message\":\"Token is invalid\"}), 401\n return f(current_user, *args, **kwargs)\n\n return decorated", "def token_required(func):\n @wraps(func)\n def decorator(*args,**kwargs):\n token = request.headers.get('x-access-token') or request.headers.get('X-Access-Token')\n\n if not token:\n abort(400,description=\"Token Missing\")\n \n try:\n data = jwt.decode(token,current_app.config['SECRET_KEY'],algorithms=[\"HS256\"])\n curr_user = Users.query.filter_by(public_key=data[\"public_key\"]).first()\n token = BlacklistToken.query.filter_by(token=token).first()\n if token:\n abort(401,description=\"Invalid Token\")\n except:\n abort(401,\"Invalid token\")\n return func(curr_user,*args,**kwargs)\n return decorator", "def jwt_required_extended(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n try:\n verify_jwt_in_request()\n except IndexError:\n return ErrorObject.create_response(\n ErrorObject, HTTPStatus.UNAUTHORIZED,\n 'No token provided in the format of \"Bearer <JWT>\"')\n token = get_jwt_identity()\n if token['is_user_token'] is False:\n from api.services.data_source_token import \\\n DataSourceTokenService\n _token_usage_counter_add(token['data_source_token']['id'])\n if not DataSourceTokenService.check_if_token_is_active(\n DataSourceTokenService, token['data_source_token']['id']):\n return ErrorObject.create_response(ErrorObject,\n HTTPStatus.FORBIDDEN,\n 'Token has been revoked')\n return fn(*args, **kwargs)\n\n return wrapper", "def decorator(*args, **kwargs):\n token = request.args.get('token') # taking token as an argument\n if not token:\n return jsonify({'message': \"Token is missing\"}), 403 # if token not provided in the url\n try:\n data = jwt.decode(token, app.config['SECRET_KEY']) # check if token decoded successfully\n user = functions.get_user(data)\n if user['Login'] == \"False\":\n return jsonify({'message': 'Token is expired'}), 403\n except:\n return jsonify({'message': 'Token is invalid'}), 403 # if token not decoded that means token is invalid\n return f(*args, **kwargs)", "def wrapper(*args, **kwargs):\n token = request.headers.get('token')\n\n try:\n jwt.decode(token, app.config['SECRET_KEY'])\n return func(*args, **kwargs)\n except jwt.DecodeError:\n message = 'Token is missing / invalid'\n except jwt.exceptions.ExpiredSignatureError:\n message = 'Token has expired'\n\n\n return Response(\n json.dumps({'error': message}),\n 401,\n mimetype='application/json'\n )", "def exception(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except requests.exceptions.HTTPError as e:\n log.error(e)\n except requests.exceptions.RequestException as e: \n log.error(e)\n return wrapper", "def require_authentication(f):\n def wrapper(*args, **kwargs):\n logger.info('Validating jwt')\n if request.method == 'POST':\n jwt_bearer = request.get_json()['jwt-bearer']\n logger.info(jwt_bearer)\n else:\n jwt_bearer = request.args['jwt-bearer']\n logger.info(jwt_bearer)\n if jwt_bearer:\n validate = requests.get(SERVICES['AUTHENTICATION']['VALIDATE'], params={'jwt': jwt_bearer}, headers={'Authorization':'Bearer ' + JWT}).json()\n if validate['ack'] == 'true':\n kwargs['service_name'] = validate['audience']\n return f(*args, **kwargs)\n return {'ack': 'false',\n 'msg': 'Authentication Requited.'}, 403\n return wrapper", "def token_required(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n #wrapper of current func\n\n #take csrf-token from cookies\n token = request.cookies.get('token')\n if not token:\n #returning login page\n flash(\"Authentication required\", category='danger')\n return redirect(url_for('login'))\n #decoding token\n try:\n uuid = jwt.decode(token, app.config['SECRET_KEY'], algorithms=[\"HS256\"])['user_id']\n except:\n #returning login page\n flash(\"Token timeout\", category='danger')\n return redirect(url_for('login'))\n #get current user\n user = User.query.filter_by(uuid=uuid).first()\n if not user:\n #returning login page\n flash(\"Profile error\", category='danger')\n return redirect(url_for('login'))\n return func(self, *args, **kwargs)\n\n return wrapper", "def wrapped(*args, **kwargs):\n validate_auth_header(request.headers)\n\n return func(*args, **kwargs)", "def decorated(*args, **kwargs):\n\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\"message\": \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n admin = data['is_admin']\n except:\n return make_response(jsonify({\"message\": \"kindly provide a valid token in the header\"}), 401)\n\n if not admin:\n return make_response(\n jsonify({\"message\": \"you are not authorized to perform this function as a non-admin user\"}), 401)\n\n return f(*args, **kwargs)", "def decorated(*args, **kwargs):\n if AUTH_HEADER not in request.headers:\n return 'Unauthorized', 401\n\n token = request.headers[AUTH_HEADER]\n try:\n if _udpate_session(token):\n return f(*args, **kwargs)\n else:\n return 'Unauthorized', 401\n except:\n return 'Server Error', 500", "def requires_auth(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n try:\n token, secret = get_auth_data()\n jwt.decode(token, secret)\n return f(*args, **kwargs)\n except:\n abort(401)\n return decorated", "def check_authentication(function_to_decorate):\r\n @wraps(function_to_decorate)\r\n def decorated_function(*args, **kwargs):\r\n if not hasattr(g, \"my\"):\r\n abort(401)\r\n return function_to_decorate(*args, **kwargs)\r\n return decorated_function", "def check_headers(f):\n def wrapped_f(*args, **kwargs):\n if request.method in ('POST', 'PATCH'):\n if request.headers['Content-Type'] != 'application/vnd.api+json':\n error = json.dumps(jsonapi_errors([{'source': '',\n 'detail': \"Content-Type header must be application/vnd.api+json\",\n 'title': 'InvalidRequestHeader',\n 'status': 415}]))\n return make_response(error, 415, {'Content-Type': 'application/vnd.api+json'})\n if request.headers.get('Accept') and request.headers['Accept'] != 'application/vnd.api+json':\n error = json.dumps(jsonapi_errors([{'source': '',\n 'detail': \"Accept header must be application/vnd.api+json\",\n 'title': 'InvalidRequestHeader',\n 'status': 406}]))\n return make_response(error, 406, {'Content-Type': 'application/vnd.api+json'})\n return f(*args, **kwargs)\n return wrapped_f", "def jwt_or_local_only(fn):\r\n @wraps(fn)\r\n def wrapper(*args, **kwargs):\r\n try:\r\n verify_jwt_in_request()\r\n except (CSRFError, FreshTokenRequired, InvalidHeaderError, NoAuthorizationError,\r\n UserLoadError) as ex:\r\n if request.remote_addr != '127.0.0.1':\r\n raise ex\r\n return fn(*args, **kwargs)\r\n return wrapper", "def includes_user(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'Authorization' not in request.headers or \\\n not request.headers['Authorization'].startswith('Bearer '):\n abort(401, \"missing or invalid authorization token\")\n else:\n token = request.headers['Authorization'][len('Bearer '):]\n u = User.verify_auth_token(token)\n if u is None:\n abort(401, \"missing or invalid authorization token\")\n else:\n return f(u, *args, **kwargs)\n return decorated_function", "def login_required_for_token(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if g.USER is None:\n return redirect(url_for(\"api_v1_login\", next=request.url))\n return f(*args, **kwargs)\n\n return decorated_function", "def decoratedCheckToken(*args, **kwargs):\n if \"token\" not in request.headers:\n raise InvalidUsage(\"Must pass a token!\")\n\n # Execute if the token matches\n logger.debug(\"Token: {0}\".format(request.headers[\"token\"]))\n if request.headers[\"token\"] == receiverParameters[\"apiToken\"]:\n return func(*args, **kwargs)\n\n # Note that it is invalid otherwise\n raise InvalidUsage(\"Invalid token!\")", "def jwt_protected(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n auth_header = request.headers.get('Authorization', '')\n token = auth_header.strip().replace('Bearer ', '')\n try:\n payload = jwt.decode(token, current_app.config['SECRET_KEY'], algorithms=['HS256'])\n entity_id = payload.get('sub')\n if AnimalCenter.query.get(entity_id) is None:\n raise CenterLookupError(f'Claimed center {entity_id} does not exist')\n g.entity_id = entity_id\n except (DecodeError, ExpiredSignatureError, CenterLookupError):\n abort(make_response(jsonify({'error': 'invalid auth credentials'}), 401))\n return fn(*args, **kwargs)\n return wrapper", "def check_csrf_token(func):\n def new_fn(self, req):\n if 'csrf_token' not in req.params:\n return exc.HTTPForbidden(\"You must provide a CSRF token\")\n\n csrf_token = req.params['csrf_token']\n if not security.valid_csrf_token(csrf_secret, csrf_token):\n return exc.HTTPForbidden(\"Invalid CSRF token\")\n\n return func(self, req)\n\n new_fn.exposed = True\n return new_fn", "def auth_guard(endpoint):\n\n @wraps(endpoint)\n def wrap(*args, **kwargs):\n try:\n # Gets user access token from header\n # Throws an exception if token expires\n auth = request.headers.get('Authorization')\n\n if auth is None:\n response = {\n \"error_message\": \"Access Token Required\"\n }\n return json.dumps(response), 500\n\n access_token = request.headers.get('Authorization').split(' ')[1]\n jwt.decode(access_token, os.getenv('JWT_SECRET'), algorithms=[\"HS256\"])\n\n return endpoint(*args, **kwargs)\n except jwt.ExpiredSignatureError:\n print('User access JWT has expired')\n return json.dumps({ 'error': 'Token Expired'}), 401\n\n return wrap", "def access_token_required(handler_method):\n\n def wrapper(self, *args, **kwargs):\n \"\"\" Verifies the existence and validity of an access token before calling the decorated\n handler\n\n Parameters:\n :param args: the arguments for the decorated function\n :param kwargs: the keyword arguments for the decorated function\n\n Returns:\n :return: the decorated function result if the access token was valid; otherwise it\n send an error response and returns None\n \"\"\"\n\n if self.request.method in ['GET', 'DELETE']:\n access_token = self.request.get('accessToken')\n else:\n try:\n access_token = loads(self.request.body).get('accessToken')\n except ValueError:\n access_token = None\n if access_token is None or len(access_token) is 0:\n self.write_error(401, 'No access token provided')\n return None\n try:\n application = get_application_key(access_token)\n except (TypeError, ValueError):\n self.write_error(401, 'Invalid access token')\n return None\n if application is not None:\n return handler_method(self, *args, **kwargs)\n else:\n self.write_error(401, 'Invalid access token')\n return None\n\n return wrapper", "def _token_header(token=None):\n if not token:\n return None\n\n message = '{token}:Ignored'.format(token=token)\n headers = {'Authorization': 'Basic {code}'.format(\n code=base64.b64encode(message))}\n return headers", "def requires_auth(logger, key, algorithms = 'HS256'):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n # get JWT token:\n token = get_token()\n # authentication:\n payload = decode_jwt(token, key, algorithms)\n except JWTError as e:\n # add to log:\n logger.error(e.error[\"description\"])\n # abort:\n abort(e.status_code, description=e.error[\"description\"])\n return f(payload, *args, **kwargs)\n return decorated_function\n return decorator", "def check_authorized(f):\n @functools.wraps(f)\n def wrapper(self, addr, request):\n if not self.sessions[addr].get(\"authorized\"):\n return Header.ERROR, Error.FORBIDDEN_REQUEST\n else:\n return f(self, addr, request)\n\n return wrapper", "def driver_required(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"validate token provided and ensures the user is an admin\"\"\"\n\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\n \"message\" : \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n driver = data['is_driver']\n except:\n return make_response(jsonify({\n \"message\" : \"kindly provide a valid token in the header\"}), 401)\n\n if not driver:\n return make_response(jsonify({\n \"message\" : \"you are not authorized to perform this function as a non-driver user\"}), 401)\n\n return f(*args, **kwargs)\n\n return decorated", "def function_call(f):\n def args_call(*args, **kw):\n handler = args[0]\n helper = RequestHelper(handler)\n if not users.get_current_user():\n helper.header(error_header, error_msg)\n helper.error(403)\n else:\n f(*args, **kw)\n return args_call", "def csrf_protection(fn):\n def protected(*args):\n if 'X-Requested-With' in request.headers:\n return fn(*args)\n else:\n return \"X-Requested-With header missing\", HTTPStatus.FORBIDDEN\n return protected", "def requires_contents(logger, key, algorithm = 'HS256'):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n # get contents:\n contents = get_contents()\n # authentication:\n token = encode_jwt(contents, key, algorithm)\n except JWTError as e:\n # add to log:\n logger.error(e.error[\"description\"])\n # abort:\n abort(e.status_code, description=e.error[\"description\"])\n return f(token, *args, **kwargs)\n return decorated_function\n return decorator", "def log_headers(f):\n def wrapper(fself, *arguments, **keywords):\n import logging\n\n for header_key, header_value in fself.request.headers.items():\n logging.info(header_key + \": \" + header_value)\n\n # Call the underlying function with the parameter added\n return f(fself, *arguments, **keywords)\n\n return wrapper", "def invalid_auth_token_header():\n headers = '{\"Host\":\"$host\",\"User-Agent\":\"$user_agent\",\"Date\":\"DATE\",'\n headers += '\"Accept\": \"application/json\",\"Accept-Encoding\": \"gzip\",'\n headers += '\"X-Project-ID\": \"$project_id\",'\n headers += '\"X-Auth-Token\": \"InvalidToken\"}'\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host,\n project_id=CFG.project_id,\n user_agent=CFG.user_agent)", "def decorated(*args, **kwargs):\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if not token:\n return jsonify({\"message\":\"Token is missing!\"}), 401\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n current_user = User.query.filter_by(public_id=data['public_id']).first()\n except:\n return jsonify({\"message\":\"Token is invalid\"}), 401\n return f(current_user, *args, **kwargs)", "def require_login_or_401(function):\n def wrap(request, *args, **kwargs):\n if request.user.is_anonymous:\n return Response({\"detail\": \"Must be logged in.\"}, status=401)\n return function(request, *args, **kwargs)\n return wrap", "def requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = get_token_auth_header()\n jsonurl = urlopen(\"https://\"+AUTH0_DOMAIN+\"/.well-known/jwks.json\")\n jwks = json.loads(jsonurl.read())\n unverified_header = jwt.get_unverified_header(token)\n rsa_key = {}\n for key in jwks[\"keys\"]:\n if key[\"kid\"] == unverified_header[\"kid\"]:\n rsa_key = {\n \"kty\": key[\"kty\"],\n \"kid\": key[\"kid\"],\n \"use\": key[\"use\"],\n \"n\": key[\"n\"],\n \"e\": key[\"e\"]\n }\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=ALGORITHMS,\n audience=API_AUDIENCE,\n issuer=\"https://\"+AUTH0_DOMAIN+\"/\"\n )\n except jwt.ExpiredSignatureError:\n raise AuthError({\"code\": \"token_expired\",\n \"description\": \"token is expired\"}, 401)\n except jwt.JWTClaimsError:\n raise AuthError({\"code\": \"invalid_claims\",\n \"description\":\n \"incorrect claims,\"\n \"please check the audience and issuer\"}, 401)\n except Exception:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Unable to parse authentication\"\n \" token.\"}, 401)\n\n _request_ctx_stack.top.current_user = payload\n return f(*args, **kwargs)\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Unable to find appropriate key\"}, 401)\n return decorated", "def requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = get_token_auth_header()\n jsonurl = urlopen(\"https://\"+AUTH0_DOMAIN+\"/.well-known/jwks.json\")\n jwks = json.loads(jsonurl.read())\n unverified_header = jwt.get_unverified_header(token)\n rsa_key = {}\n for key in jwks[\"keys\"]:\n if key[\"kid\"] == unverified_header[\"kid\"]:\n rsa_key = {\n \"kty\": key[\"kty\"],\n \"kid\": key[\"kid\"],\n \"use\": key[\"use\"],\n \"n\": key[\"n\"],\n \"e\": key[\"e\"]\n }\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=ALGORITHMS,\n audience=API_AUDIENCE,\n issuer=\"https://\"+AUTH0_DOMAIN+\"/\"\n )\n except jwt.ExpiredSignatureError:\n raise AuthError({\"code\": \"token_expired\",\n \"description\": \"token is expired\"}, 401)\n except jwt.JWTClaimsError:\n raise AuthError({\"code\": \"invalid_claims\",\n \"description\":\n \"incorrect claims,\"\n \"please check the audience and issuer\"}, 401)\n except Exception:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Unable to parse authentication\"\n \" token.\"}, 401)\n\n _request_ctx_stack.top.current_user = payload\n return f(*args, **kwargs)\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Unable to find appropriate key\"}, 401)\n return decorated", "def function_call(f):\n def args_call(*args, **kw):\n handler = args[0]\n helper = RequestHelper(handler)\n if not users.is_current_user_admin():\n helper.header(error_header, error_msg)\n helper.error(403)\n else:\n f(*args, **kw)\n return args_call", "def requires_bearer_auth(self, f: Callable):\n\n @functools.wraps(f)\n def decorated(*args, **kwargs):\n # Try to load user info from request (failure will raise appropriate exception).\n user = self.get_user_from_bearer_token(request)\n # If handler function expects a `user` argument: pass the user object\n if 'user' in f.__code__.co_varnames:\n kwargs['user'] = user\n return f(*args, **kwargs)\n\n return decorated", "def check(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n verify_jwt_in_request()\n token = get_jwt_identity()\n if argument.lower() == 'machine':\n if token['is_user_token'] is False:\n from api.services.data_source_token import \\\n DataSourceTokenService\n _token_usage_counter_add(token['data_source_token']['id'])\n if DataSourceTokenService.check_if_token_is_active(\n DataSourceTokenService,\n token['data_source_token']['id']) is False:\n return ErrorObject.create_response(\n ErrorObject, HTTPStatus.FORBIDDEN,\n 'Token has been revoked')\n else:\n return fn(*args, **kwargs)\n else:\n return ErrorObject.create_response(\n ErrorObject, HTTPStatus.FORBIDDEN,\n 'Unable to access this resource with provided token')\n elif argument.lower() == 'user':\n if token['is_user_token'] is False:\n _token_usage_counter_add(token['data_source_token']['id'])\n return ErrorObject.create_response(\n ErrorObject, HTTPStatus.FORBIDDEN,\n 'Unable to access this resource with provided token')\n else:\n return fn(*args, **kwargs)\n else:\n raise ValueError('Unsupported argument provided')\n\n return wrapper", "def check_csrf(f):\n\n @wraps(f)\n @require_login\n def wrapper(*args, **kwds):\n if \"token\" not in session:\n raise PicoException(\n \"Internal server error\",\n data={\"debug\": \"CSRF token not found in session\"},\n )\n submitted_token = request.headers.get(\"X-CSRF-Token\", None)\n if submitted_token is None:\n raise PicoException(\"CSRF token not included in request\", 403)\n if session[\"token\"] != submitted_token:\n raise PicoException(\"CSRF token is not correct\", 403)\n return f(*args, **kwds)\n\n return wrapper", "def inject_header(f):\n def oncall(*args, **kwargs):\n rv = f(*args, **kwargs)\n if rv.status_code == 200:\n rv.headers['X-Pingback'] = url_for('services/pingback',\n _external=True)\n return rv\n oncall.__name__ = f.__name__\n oncall.__module__ = f.__module__\n oncall.__doc__ = f.__doc__\n return oncall", "def decorator_func(func):\r\n @functools.wraps(func)\r\n def with_status_check(obj, *args, **kwargs):\r\n if obj.status not in valid_start_statuses:\r\n exception_msg = (\r\n u\"Error calling {} {}: status is '{}', must be one of: {}\"\r\n ).format(func, obj, obj.status, valid_start_statuses)\r\n raise VerificationException(exception_msg)\r\n return func(obj, *args, **kwargs)\r\n\r\n return with_status_check", "def jwt_permission(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n auth_token = request.headers.environ.get('HTTP_AUTHORIZATION', '').split(' ')\n if len(auth_token) < 2:\n abort(403, \"Authentication fails\")\n\n JwtAuth.decode_auth_token(auth_token[1])\n return func(*args, **kwargs)\n return wrapper", "def test_tolerate_decorated_function_raise_if_exception_is_not_found():\n def test_function():\n raise AttributeError()\n fn = tolerate(exceptions=[KeyError])(test_function)\n fn()", "def service(decorated_function):\n @wraps(decorated_function)\n def wrapper(*args, **kwargs):\n if 'Authorization' not in request.headers:\n raise UnauthorizedException(\"Unauthorized\")\n principal_id = _authenticate_jwt(request.headers['Authorization'])\n if principal_id != '00000000-0000-4000-8000-000000000000':\n raise ForbiddenException(\"This endpoint may only be called internally\")\n return decorated_function(*args, **kwargs)\n return wrapper", "def login_required(role):\n def real_decorator(f):\n def decorated_function(*args, **kwargs):\n if not request.headers.get('Authorization'):\n response = jsonify(message='Missing authorization header')\n response.status_code = 401\n return response\n\n try:\n payload = parse_token(request)\n except DecodeError:\n response = jsonify(message='Token is invalid')\n response.status_code = 401\n return response\n except ExpiredSignature:\n response = jsonify(message='Token has expired')\n response.status_code = 401\n return response\n except ValueError as error:\n response = jsonify(message=str(error))\n response.status_code = 401\n return response\n\n # pass user id to flask request global\n g.user_id = payload['sub']\n\n # Check for admin roles\n if role == 'admin':\n # find User\n user = User.query.filter(User.id == g.user_id).first()\n if not user or user.role != 'admin':\n response = jsonify(message='You are not authorized')\n response.status_code = 401\n return response\n\n # user = User\n return f(*args, **kwargs)\n\n return decorated_function\n return real_decorator", "def login_required(func):\n @wraps(func)\n def wrapped(*args, **kwargs):\n if not session.get('logged_in', False):\n abort(401)\n return func(*args, **kwargs)\n return wrapped", "def checkcsrf(func):\n @functools.wraps(func)\n @sessiondecorator\n def wrapper(*args, session = None, **kw):\n if \"X-CSRF-TOKEN\" not in session.cookies:\n getcsrf(session)\n return func(*args,session = session, **kw)\n return wrapper", "def authorized(func):\n\n def wrapper(*args, **kwargs) -> web.Response:\n \"\"\"Wrap decorated method.\n\n Args:\n *args (tuple): Tuple with nameless arguments,\n **kwargs (dict): Dict with named arguments.\n\n Returns:\n Result of called wrapped method.\n\n Raises:\n HTTPUnauthorized: 401 HTTP error, if user session is expired or not found.\n\n \"\"\"\n\n pass\n\n pass", "def decorate(func, *args, **kws):\n self.func = func\n def do_authenticate():\n auth_header = self.request.headers.get('Authorization', '')\n if auth_header.split():\n scheme, code = auth_header.split()\n else:\n scheme = 'Basic'\n code = ''\n if scheme != 'Basic':\n raise ValueError('The authentication scheme is not BASIC')\n if b64decode(code):\n user, password = b64decode(code).split(':')\n else:\n user = password = ''\n if self.user == user and self.password == password:\n # the request already had valid authentication header.\n return self.func(*args, **kws)\n resp = self.response\n resp.set_status(401)\n self.render('Auth')\n resp.headers['WWW-Authenticate'] = 'Basic realm=\"%s\"' % self.realm\n\n return do_authenticate", "def check_chief(function_to_decorate):\r\n @wraps(function_to_decorate)\r\n def decorated_function(*args, **kwargs):\r\n \tif g.my['rank'] > 15:\r\n \t\tabort(401)\r\n \treturn function_to_decorate(*args, **kwargs)\r\n return decorated_function", "def _catch_error(f):\n @wraps(f) \n def wrap(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n raise HTTPBadRequest(reason=e)\n return wrap", "def auth_required(f):\n\n @wraps(f)\n def _verify(*args, **kwargs):\n\n token = request.headers.get(\"Token\", \"\")\n api_key = request.headers.get(\"key\", \"\")\n\n invalid_msg = {\"error\": \"Ошибка доступа\", \"autheticated\": False}\n expired_msg = {\"error\": \"Истёкшая сессия\", \"autheticated\": False}\n\n session = Session()\n if token:\n try:\n data = jwt.decode(token, current_app.config[\"SECRET_KEY\"])\n user = session.query(User).filter_by(email=data[\"sub\"][\"email\"]).first()\n session.close()\n if not user:\n return jsonify({\"error\": \"User not found\"}), 404\n if user.banned:\n return jsonify({\"error\": \"Access denied\"}), 403\n return f(user, *args, **kwargs)\n except jwt.ExpiredSignatureError:\n return jsonify(expired_msg), 403\n except jwt.InvalidTokenError:\n return jsonify(invalid_msg), 403\n except Exception:\n traceback.print_exc()\n return jsonify({\"error\": \"Server error\"}), 500\n elif api_key:\n try:\n user = session.query(User).filter_by(api_key=api_key).first()\n session.close()\n if not user:\n return jsonify({\"error\": \"Wrong API key\"}), 400\n if user.banned:\n return jsonify({\"error\": \"Access denied\"}), 403\n return f(user, *args, **kwargs)\n except Exception:\n traceback.print_exc()\n return jsonify({\"error\": \"Server error\"}), 500\n\n return jsonify(invalid_msg), 403\n\n return _verify", "def _authenticated(func):\n @functools.wraps(func)\n def func_wrapper(self, *args, **kwargs):\n try:\n return func(self, *args, **kwargs)\n except exception.NotAuthorized:\n # Prevent recursion loop. After the self arg is the\n # resource_type arg from _issue_api_request(). If attempt to\n # login failed, we should just give up.\n if args[0] == 'login':\n raise\n\n # Token might've expired, get a new one, try again.\n self._login()\n return func(self, *args, **kwargs)\n return func_wrapper", "def self(decorated_function):\n @wraps(decorated_function)\n def wrapper(*args, **kwargs):\n if 'Authorization' not in request.headers:\n raise UnauthorizedException(\"Unauthorized\")\n principal_id = _authenticate_jwt(request.headers['Authorization'])\n if len(kwargs) == 0 or list(kwargs.values())[0] != principal_id:\n raise UnauthorizedException(\"You may only execute this action on yourself\")\n return decorated_function(*args, **kwargs)\n return wrapper", "def user_required(f):\n def decorator(*args, **kwargs):\n if \"user\" not in g:\n abort(401)\n return f(*args, **kwargs)\n return decorator", "def handle_stripe_exceptions(fn):\n @wraps(fn)\n def decorated_function(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except stripe.error.CardError:\n response: {\n 'error': 'Sorry your card was declined. Try again perhaps?'\n }\n return jsonify(response), 400\n except stripe.error.InvalidRequestError as e:\n return jsonify({'error': e}), 400\n except stripe.error.AuthenticationError:\n response = {\n 'error': 'Authentication with our payment gateway failed.'\n }\n return jsonify(response), 400\n except stripe.error.APIConnectionError:\n response = {\n 'error': 'Our payment gateway is experiencing connectivity issues, please try again.'\n }\n return jsonify(response), 400\n except stripe.error.StripeError:\n response = {\n 'error': 'Our payment gateway is having issues, please try again.'\n }\n return jsonify(response), 400\n\n return decorated_function", "def catch_assertions(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except AssertionError, e:\n logger.debug('Assertion failed: %s', e)\n abort(400)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def login_decorator(*args, **kwargs):\n if not session.get('logged_in'):\n abort(401)\n else:\n return f(*args, **kwargs)\n return login_decorator", "def known_exceptions(func):\n def helper(*args, **kwargs):\n \"\"\"Actual Decorator for handling known exceptions\"\"\"\n try:\n return func(*args, **kwargs)\n except (requests.exceptions.RequestException,\n json.JSONDecodeError,\n simplejson.scanner.JSONDecodeError) as err:\n return handle_specific_exception(err)\n except TypeError as err:\n success = False\n return RestReturn(success=success, message=err.args[0])\n return helper", "def accepterror(func):\n\n @wraps(func)\n def return_error(*args, **kwargs):\n if request.headers.get(\"Accept\", '*/*') not in ['*/*', 'application/json', 'application/xml']:\n return not_accept()\n\n return func()\n\n return return_error", "def raise_on_request_error(\n func: Callable[Concatenate[_T, _P], Awaitable[None]]\n) -> Callable[Concatenate[_T, _P], Coroutine[Any, Any, None]]:\n\n async def decorator(self: _T, *args: _P.args, **kwargs: _P.kwargs) -> None:\n \"\"\"Decorate.\"\"\"\n try:\n await func(self, *args, **kwargs)\n except RainMachineError as err:\n raise HomeAssistantError(\n f\"Error while executing {func.__name__}: {err}\",\n ) from err\n\n return decorator", "def token_handler(self, f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n server = self.server\n uri, http_method, body, headers = extract_params(True)\n credentials = f(*args, **kwargs) or {}\n log.debug('Fetched extra credentials, %r.', credentials)\n ret = server.create_token_response(uri, http_method, body, headers, credentials)\n return create_response(*ret)\n\n return decorated", "def _catch_error(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n raise HTTPBadRequest(reason=e)\n return wrap", "def from_header(key: str, allowed_values: Optional[Iterable] = None) -> Callable:\n\n def inner(r: Request):\n v = r.headers.get(key, \"\")\n\n # if allowed_values was supplied, return a blank string if\n # the value of the header does match any of the values.\n if allowed_values is not None and v not in allowed_values:\n return \"\"\n\n return v\n\n return inner", "def test_lti20_request_handler_bad_headers(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n self.xmodule.verify_lti_2_0_result_rest_headers = Mock(side_effect=LTIError())\r\n mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n self.assertEqual(response.status_code, 401)", "def not_logged_in(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'idToken' in session:\n return redirect(url_for('index'))\n else:\n return f(*args, **kwargs)\n return decorated_function", "def errors_wrapped(func):\n\n async def wrapped(self, *args, **kwargs):\n try:\n return await func(self, *args, **kwargs)\n except AuthenticationRequiredError as ex:\n logger.warning(f\"Trying to use unauth access: {ex}\")\n add_message(self.request, \"LogIn to continue.\")\n redirect(self.request, \"sign_in\")\n\n except BaseApplicationError as ex:\n message = getattr(ex, \"message\", None) or str(ex)\n details = getattr(ex, \"details\", None)\n if details:\n message = f\"{message}: {details}\"\n\n add_message(self.request, message, kind=\"error\")\n raise web.HTTPFound(self.request.path)\n\n return wrapped", "def api_key_required(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\treturn func(*args,**kwargs)\n\treturn decorated_view", "def auth_required(func):\n @wraps(func)\n def wrapper(request):\n if not request.user:\n return web.json_response({'status': 'error', 'message': 'auth required'}, status=401)\n return func(request)\n return wrapper", "def check_request(views_func):\n @wraps(views_func)\n def wrapper(*args, **kwargs):\n try:\n return views_func(*args, **kwargs)\n except (KeyError, ValueError) as ex:\n return HttpResponseBadRequest(str(ex))\n return wrapper", "def require_login(f):\n\n @wraps(f)\n def wrapper(*args, **kwds):\n if not api.user.is_logged_in():\n raise PicoException(\"You must be logged in\", 401)\n return f(*args, **kwds)\n\n return wrapper", "def catch_errors(f):\n\n @functools.wraps(f)\n def func(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except HorizonError as exception:\n print_failure(f\"{str.upper(f.__name__)} request failed to successfully execute. {exception.status_code if exception else None}\")\n if exception and exception.message:\n print_server_error_details(exception.message)\n raise\n\n return func", "def wrap_pecan_controller_exception(func):\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except exc.QinlingException as e:\n LOG.error('Error during API call: %s', six.text_type(e))\n return webob.Response(\n status=e.http_code,\n content_type='application/json',\n body=json.dumps(dict(faultstring=six.text_type(e))),\n charset='UTF-8'\n )\n\n return wrapped", "def user_required(func):\n def check_login(self, *args, **kwargs):\n if not get_current_user():\n raise endpoints.UnauthorizedException('Invalid token.')\n else:\n return func(self, *args, **kwargs)\n return check_login", "def _cache_headers_decorator(route_function, cache_headers):\n\n @functools.wraps(route_function)\n def decorated_function(*args, **kwargs):\n response = flask.make_response(\n route_function(*args, **kwargs)\n )\n\n if response.status_code == 200:\n # Only add caching headers to successful responses\n response.headers['Cache-Control'] = ', '.join(cache_headers)\n\n return response\n\n return decorated_function", "def validate_twilio_request(func):\n @wraps(func)\n def decorated_function(request, *args, **kwargs):\n # Create an instance of the RequestValidator class\n validator = RequestValidator(os.environ.get('TWILIO_AUTH_TOKEN'))\n\n # Validate the request using its URL, POST data,\n # and X-TWILIO-SIGNATURE header\n request_valid = validator.validate(\n request.build_absolute_uri(),\n request.POST,\n request.META.get('HTTP_X_TWILIO_SIGNATURE', ''))\n\n # Continue processing the request if it's valid, return a 403 error if\n # it's not\n if request_valid:\n return func(request, *args, **kwargs)\n else:\n return HttpResponseForbidden()\n return decorated_function", "def wrap_invalid_xcat_resp_data_error(function):\n\n @functools.wraps(function)\n def decorated_function(*arg, **kwargs):\n try:\n return function(*arg, **kwargs)\n except (ValueError, TypeError, IndexError, AttributeError,\n KeyError) as err:\n msg = _(\"Invalid xCAT response data: %s\") % str(err)\n raise ZVMException(msg)\n\n return decorated_function", "def decorated_function(*args, **kwargs):\n if 'idToken' in session:\n return f(*args, **kwargs)\n else:\n return redirect(url_for('login'))", "def xsrf_protected(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n non_xsrf_protected_verbs = ['options', 'head', 'get']\n if (self.request.method.lower() in non_xsrf_protected_verbs or\n self._RequestContainsValidXsrfToken()):\n return f(self, *args, **kwargs)\n else:\n try:\n self.XsrfFail()\n except Exception, e:\n self.handle_exception(e, self.app.debug)\n finally:\n self.session_store.save_sessions(self.response)\n return wrapper", "def api_auth(func):\n @wraps(func)\n def _decorator(request, *args, **kwargs):\n authentication = APIAuthentication(request)\n if authentication.authenticate():\n return func(request, *args, **kwargs)\n raise Http404\n return _decorator", "def reauthorization_check(cls, func):\n \n def wrapper(*args, **kwargs):\n #if an error occurs when running function, assume spotipy timing out error and refresh token \n try: \n return func(*args, **kwargs)\n except:\n print(\"Spotipy token may be expired... Refreshing token...\")\n args[0]._create_user_object()\n return func(*args, **kwargs)\n\n return wrapper", "def wrap_wsme_controller_exception(func):\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except exc.QinlingException as e:\n pecan.response.translatable_error = e\n\n LOG.error('Error during API call: %s', six.text_type(e))\n raise wsme_exc.ClientSideError(\n msg=six.text_type(e),\n status_code=e.http_code\n )\n\n return wrapped", "def login_required(func):\n def wrapper(self, request, *args, **kwargs):\n if not request.user.is_authenticated():\n raise ApiLoginRequired\n return func(self, request, *args, **kwargs)\n\n wrapper.requires_login = True\n return wraps(func, wrapper)", "def auth_required(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n user = g.user\n if not user:\n return response(False, 401, message='Authorization required.')\n return f(types.SimpleNamespace(**user), *args, **kwargs)\n return wrapper", "def test_headers(self):\n token = 'abc123'\n requests.get(self.url, auth=BearerAuth(token))\n self.assertEqual(httpretty.last_request().headers['Authorization'], 'Bearer {}'.format(token))", "def init_headers(token):\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + token\n }\n return headers", "def handle_errors(func):\n def wrapper(*args, **kwargs):\n try:\n response = func(*args, **kwargs)\n except Exception as e:\n response = jsonify({\"success\": False, \"message\": str(e)})\n return response\n wrapper.func_name = func.func_name\n return wrapper", "def authorized(fn):\n @wraps(fn)\n def _wrap(*args, **kwargs):\n if 'Authorization' not in request.headers:\n abort(401)\n return\n\n uow = inject.instance('UnitOfWorkManager')\n with uow.start() as tx:\n user = tx.users.get_by_token(request.headers['Authorization'])\n\n if user is None:\n # Unauthorized\n abort(401)\n return\n\n return fn(user=user, *args, **kwargs)\n return _wrap", "def client_required(f):\n @wraps(f)\n def client_decorator(*args, **kwargs):\n if session.get('logged_in') and session.get('type') == 'Client':\n return f(*args, **kwargs)\n else:\n abort(401)\n return client_decorator" ]
[ "0.8132438", "0.7699005", "0.7501329", "0.74680185", "0.7427506", "0.742177", "0.7420473", "0.7285155", "0.7139443", "0.7120114", "0.7101173", "0.70718706", "0.7041331", "0.7013722", "0.70107657", "0.70075893", "0.67916787", "0.6714881", "0.6710947", "0.6683059", "0.6663674", "0.66579056", "0.654992", "0.6540237", "0.65396965", "0.65309894", "0.64532954", "0.6429913", "0.6358883", "0.63387585", "0.6330037", "0.63261366", "0.6316507", "0.6243366", "0.6209355", "0.61933875", "0.61302465", "0.6108023", "0.60758144", "0.60669374", "0.6058622", "0.60364455", "0.6032543", "0.601666", "0.59673965", "0.59673965", "0.59577894", "0.5945372", "0.59269536", "0.5925683", "0.5925024", "0.5920926", "0.5918155", "0.5917335", "0.5913835", "0.5874879", "0.58598405", "0.5859712", "0.5855834", "0.58516514", "0.58447826", "0.5831078", "0.58249444", "0.5803188", "0.57969606", "0.57957435", "0.57925695", "0.5789321", "0.57877743", "0.5776762", "0.577018", "0.5766272", "0.57591975", "0.5753586", "0.5752806", "0.57346123", "0.5733397", "0.5729883", "0.5719875", "0.57183397", "0.57163805", "0.5708747", "0.57009584", "0.56905067", "0.5681167", "0.5678143", "0.5677473", "0.5676931", "0.566239", "0.565221", "0.56265837", "0.5626114", "0.5619538", "0.5617559", "0.56101096", "0.5591715", "0.5581811", "0.55755544", "0.5575209", "0.55713886" ]
0.7472541
3
Sigmoid backward (derivative) implementation
def sigmoid_backward(dA, Z): dsig = sigmoid(Z) * (1 - sigmoid(Z)) return dA * dsig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sigmoid_derivative(x):\n return x * (1-x)", "def sigmoid_derivative(x):\n return x * (1.0 - x)", "def derivative_sigmoid(x):\n return x * (1 - x)", "def derivative_sigmoid(x):\n return x * (1 - x)", "def sigmoid_derivative(x):\n\n return sigmoid(x) * (1 - sigmoid(x))", "def sigmoid_backward(dout, cache):\n dx, x = None, cache\n\n f = lambda x: 1/(1 + np.exp(-x)) # activation function (sigmoid)\n\n fun = f(x)\n\n dx = np.multiply(fun, (1-fun))\n dx = np.multiply(dx,dout)\n\n return dx", "def sigmoid_gradient(z):\n #derivative of sigmoid\n return z * (1 - z)", "def sigmoid_derivative(x):\n\n s = sigmoid(x)\n ds = s * (1 - s)\n\n return ds", "def sigmoid_backward(dA, internal_params):\n Z= internal_params\n dZ=np.multiply(sigmoid(Z)*(1-sigmoid(Z)),dA)\n # raise NotImplementedError\n return dZ", "def sigmoid_grad(x):\n sig = sigmoid(x)\n return np.multiply(sig, 1 - sig)", "def der_sigmoid(y):\n return y * (1 - y)", "def sigmoid_backward(dA, Z):\n\n s = 1 / (1 + np.exp(-Z))\n dZ = dA * s * (1 - s)\n\n assert (dZ.shape == Z.shape)\n\n return dZ", "def sigmoid_grad(z):\n return Sigmoid(z) * (1 - Sigmoid(z))", "def sigmoid_derivative(x):\r\n\r\n ### START CODE HERE ### (≈ 2 lines of code)\r\n s = 1.0 /(1 + 1/np.exp(x))\r\n ds = s*(1-s)\r\n ### END CODE HERE ###\r\n\r\n return ds", "def sigmoid_backward(dout, cache):\n dx = None\n y = cache\n ########################################################################\n # TODO: Implement the Sigmoid backward pass. #\n ########################################################################\n\n dx = dout * y * (1-y)\n\n ########################################################################\n # END OF YOUR CODE #\n ########################################################################\n return dx", "def sigmoid_backward(dA, cache):\n\n Z = cache\n s,_ = sigmoid(Z)\n dZ = dA * s * (1-s)\n return dZ", "def sigmoid_grad(self, X):\n var=self.sigmoid(X)\n return var*(1-var)", "def sigmoid_backward(a, z, g_z):\r\n exp_a = np.multiply(z, 1 - z)\r\n g_a = g_z * exp_a\r\n return g_a", "def grad_sigmoid(self):\r\n return self.sigmoid(self.x) * (1 - self.sigmoid(self.x))", "def sigmoid(z):\r\n \r\n return vSigmoid(z);", "def grad_sigmoid(self):\n grad = self.sigmoid(self.x) * (1 - self.sigmoid(self.x))\n return grad", "def sigmoid_backward(dA, cache):\n Z = cache\n s = 1.0 / (1.0 + np.exp(-Z))\n dZ = dA * s * (1 - s)\n return dZ", "def diff_sigmoid(z):\r\n diff_z = np.multiply(z, (1.0 - z))\r\n return diff_z\r\n pass", "def grad_sigmoid(self):\n return self.sigmoid(self.x)*(self.sigmoid(-self.x))\n raise NotImplementedError(\"Sigmoid gradient not implemented\")", "def sigmoid(z):\n return 1 / (1 + e ** -z)", "def __sigmoid(z, derivative=False):\n if derivative:\n return z * (1 - z)\n else:\n return 1 / (1 + np.exp(-z))", "def sigmoid(x):\n return 1 / (1 - (power(e,-x)))", "def sigmoid_backward(dA, cache):\n\n Z = cache\n\n s = 1 / (1 + np.exp(-Z))\n dZ = dA * s * (1 - s)\n\n assert (dZ.shape == Z.shape)\n\n return dZ", "def sigmoid_derivative(z):\n s = 1./(1. + np.exp(-z))\n ds = s * (1 - s)\n return ds", "def delta(z, a, y):\n\t\treturn np.subtract(a, y) * sigmoid_derivative(z)", "def hard_sigmoid_derivative(u):\r\n if u > 1:\r\n ro = 0\r\n elif u < 0:\r\n ro = 0\r\n else:\r\n ro = 1\r\n return ro", "def sigmoid_backward(self, dUpper, cache):\n out = cache\n #############################################################################\n # TODO: Implement the backward pass for the sigmoid function. #\n #############################################################################\n # Since cache is sigmoid function\n dsigmoid = dUpper * out * (1 - out)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dsigmoid", "def derivative_activation(z):\n return activation(z) * (1 - activation(z))", "def sigmoid(z): \n return 1/(1 + np.e**(-z))", "def delta(z, a, y):\n\t\treturn np.subtract(a, y) * sigmoid_derivative(z) ######## A MODIFIER", "def sigmoid(x, der=False):\n return sigmoid(x)*(1-sigmoid(x)) if der else 1 / (1 + np.exp(-x))", "def deriv_sigmoid(self,z):\n return np.exp(-z) / ( (1 + np.exp(-z)) ** 2 )", "def sigmoid(x):\n return 1 / (1 + exp(-x))", "def sigmoid(x):\n return 1 / (1 + exp(-x))", "def sigmoid(z):\n \n return 1 / (1 + np.exp(-z))#your code here", "def affine_sigmoid_backward(dout, cache):\n\n # fc_cache contains w (weights), x(input) , b(bias)\n # sigmoid cache contains the combination wx+b\n fc_cache, sigmoid_cache = cache\n da = sigmoid_backward(dout, sigmoid_cache)\n dx, dw, db = affine_backward(da, fc_cache)\n return dx, dw, db", "def gradient(cls, x):\n y = Sigmoid.apply(x)\n return np.multiply(y, 1 - y)", "def sigmoid(x):\n return 1 / (1 + (e**(-x))) #changed the '-' to a '+' because it didnt work otherwise\n #return 1 / (1 + math.exp(-x))", "def sigmoid(z):\n return 1 / (1 + np.exp(-1 * z))", "def sigmoid(x):\r\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1/(1+np.exp(-1*x))", "def sigmoid(x):\n return 1 / (1 + math.exp(-x))", "def sigmoid(x):\r\n #pred_x = (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))\r\n pred_x = 1.0 / (1.0 + np.exp(-x))\r\n return pred_x\r\n pass", "def sigmoid(z):\n return 1 / (1 + np.exp(-z))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(z):\n g = (1 + np.exp(-z))**-1\n return g", "def sigmoid(x):\n\treturn 1 / (1 + m.exp(-x))", "def sigmoid(z):\n return 1/(1+np.exp(-z))", "def sigmoid(x):\n return 1 / (1 * np.exp(-x))", "def sigmoid(t):\n return np.exp(t)/(1+np.exp(t))", "def sigmoid_forward(x):\n\n out = 1/(1+np.exp(-x))\n\n cache = x\n return out, cache", "def forward(X,W,b,V,d):\n H = sigmoid(X, W, b)\n Y = softmax(H, V, d)\n return H, Y", "def derivative(a, y, z):\n return (a - y) * Sigmoid.derivative(z)", "def sigmoid(z):\n return 1 / (1 + (np.exp(-z)))", "def sigmoid(x):\n return 1 / (1 + math.exp(-x))", "def backward(self, delta):\n if self.activation_type == \"sigmoid\":\n grad = self.grad_sigmoid()\n\n elif self.activation_type == \"tanh\":\n grad = self.grad_tanh()\n\n elif self.activation_type == \"ReLU\":\n grad = self.grad_ReLU()\n\n return grad * delta", "def sigmoid(x):\n\n return 1 / (1 + math.exp(-x))", "def sigmoid(x):\n\n return 1 / (1 + math.exp(-x))", "def sigmoid(x):\r\n\r\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1.0/(1.0+exp(-x))", "def sigmoid(t):\n return 1 / (1 + np.exp(-t))", "def sigmoid(X):\n return 1 / (1 + np.exp(-X))", "def sigmoid(X):\n return 1 / (1 + np.exp(-X))", "def hard_sigmoid(x):\r\n slope = 0.2\r\n shift = 0.5\r\n x = (x * slope) + shift\r\n x = tensor.clip(x, 0, 1)\r\n return x", "def sigmoid(x):\n return 1/(1 + math.exp(-x))", "def sigmoid(x):\n return 1. / (1. + np.exp(-x))", "def sigmoid(z):\n return 1.0 / (1 + np.exp(-z))", "def sigmoid(X,W,b):\n preActivation = np.dot(X, W) + b\n return (1.0)/(1.0 + np.exp(-preActivation))", "def sigmoid(x):\n return 1.0/(1 + np.exp(-x))", "def sigmoid(X):\n g = 1/(1 + np.exp(-X))\n return g", "def sigmoid(a, d = False):\n if d:\n return a*(1-a)\n else:\n return 1/(1+np.exp(-a))", "def sigmoid(z):\n\treturn 1.0/(1.0+np.exp(-z))", "def f1(phi, phi_o, d):\n return 1 - sigmoid_decay(phi, phi_o, d)", "def sigmoid(x):\n pos_mask = (x >= 0)\n neg_mask = (x < 0)\n z = np.zeros_like(x)\n z[pos_mask] = np.exp(-x[pos_mask])\n z[neg_mask] = np.exp(x[neg_mask])\n top = np.ones_like(x)\n top[neg_mask] = z[neg_mask]\n return top / (1 + z)", "def sigmoid_back_propagate(da, cache):\n z = cache\n s = 1 / (1 + np.exp(-z))\n dz = da * s * (1 - s)\n assert (dz.shape == z.shape)\n assert (da.shape == z.shape)\n return dz", "def sigmoid(z):\n return 1/(1 + numpy.exp(z))", "def sigmoid(x,shift=0,mult=1):\n return 1 / (1 + math.exp(-(x+shift)*mult))", "def sigmoid(z):\n # print \"sigmoid input:\", z\n return 1.0 / (1.0 + math.exp(- z))\n # return math.tanh(z)", "def sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x))", "def test_sigmoid_activation(self):\n self.assertEqual([0.5, 0.5], list(\n af.Sigmoid().output(np.array([0, 0]))))\n self.assertEqual([0.25, 0.25], list(\n af.Sigmoid().derivative(np.array([0, 0]))))", "def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)", "def sigmoid(z):\n\n ### START CODE HERE ### (≈ 1 line of code)\n s = 1 / (1 + np.exp(-z))\n ### END CODE HERE ###\n\n return s", "def backward_hidden_activation(self, Y, d):\n # y = tanh(x) ==> dy/dx = (1 - tanh(x)^2) = (1 - y^2)\n return d * (1 - Y ** 2)", "def sigmoid(x, exponent):\n \n return 1/(1+np.exp(-exponent*x))-0.5", "def sigmoid(inX):\n if inX < 0:\n return 1 - 1 / (1 + exp(inX))\n else:\n return 1 / (1 + exp(-inX))", "def sigmoid_function(z):\n\n return 1 / (1 + math.exp(-z))", "def sigmoid_forward(x):\n out = None\n ########################################################################\n # TODO: Implement the Sigmoid forward pass. #\n ########################################################################\n\n out = 1 / (1 + np.exp(-x))\n\n ########################################################################\n # END OF YOUR CODE #\n ########################################################################\n cache = out\n return out, cache", "def test_sigmoid_derivative_returns_correct_value(self):\n self.assertAlmostEqual(sigmoid_derivative(0), 0.25)\n self.assertAlmostEqual(sigmoid_derivative(1), 0.19661193324)", "def sigmoid(z):\n g = 1/(1 + np.exp(-z))\n return g", "def sigmoid(z):\n\n s = 1/(1+ np.exp(-z));\n return s;" ]
[ "0.85681444", "0.8473839", "0.831426", "0.831426", "0.8309925", "0.8075164", "0.80000556", "0.79696095", "0.79516095", "0.7931595", "0.7929404", "0.7801728", "0.7743298", "0.7714867", "0.7661006", "0.76579946", "0.7626087", "0.76228863", "0.7605027", "0.753502", "0.75197905", "0.74975383", "0.74847794", "0.7412464", "0.7398602", "0.7383305", "0.73784643", "0.73582405", "0.73499435", "0.73368925", "0.7317935", "0.7303306", "0.7296032", "0.7223162", "0.71860677", "0.7171013", "0.71693605", "0.7150883", "0.7150883", "0.71397066", "0.71054906", "0.70804816", "0.7061446", "0.7025216", "0.70250005", "0.6998838", "0.699853", "0.69971305", "0.69918644", "0.69848007", "0.69848007", "0.69848007", "0.69848007", "0.69848007", "0.69848007", "0.698051", "0.6979803", "0.6973266", "0.69664365", "0.6963236", "0.69560134", "0.6941649", "0.6938902", "0.6938535", "0.6938534", "0.69321334", "0.69316995", "0.69316995", "0.69282645", "0.6926937", "0.692133", "0.69200027", "0.69200027", "0.6918531", "0.6897719", "0.6894455", "0.68941826", "0.6889945", "0.68895483", "0.688949", "0.6885088", "0.6883841", "0.6878172", "0.6875264", "0.6871154", "0.68706053", "0.6864219", "0.68602663", "0.685863", "0.68573946", "0.68514746", "0.68502253", "0.68483156", "0.6848075", "0.68460256", "0.6845371", "0.68394387", "0.6837133", "0.6834048", "0.68303835" ]
0.82721937
5
RELU backward (derivative) implementation
def relu_backward(dA, Z): dZ = np.array(dA, copy=True) dZ[Z <= 0] = 0 return dZ
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relu_backward(dout, cache):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n x=cache\n dout[x<=0]=0\n dx=dout\n return dx", "def relu_backward(dout, cache):\n dx, x = None, cache\n dx = dout\n dout[x <= 0] = 0.0\n return dx", "def relu_backward(dA, Z):\n\n dZ = np.array(dA, copy=True) # just converting dz to a correct object.\n\n # When z <= 0, you should set dz to 0 as well.\n dZ[Z <= 0] = 0\n\n assert (dZ.shape == Z.shape)\n\n return dZ", "def backwards(delta,params,name='',activation_deriv=sigmoid_deriv):\n # everything you may need for this layer\n W = params['W' + name]\n b = params['b' + name]\n X, pre_act, post_act = params['cache_' + name]\n # your code here\n # do the derivative through activation first\n # then compute the derivative W,b, and X\n \n delta_pre = activation_deriv(post_act) * delta\n # (in_dim, out_dim) = (in_dim, examples) @ (examples, out_dim)\n grad_W = X.transpose() @ delta_pre\n grad_b = np.sum(delta_pre, axis=0, keepdims=True) # (1, out_dim)\n # (examples, in_dim) = (examples, out_dim) @ (out_dim, in_dim)\n grad_X = delta_pre @ W.transpose()\n\n # store the gradients\n params['grad_W' + name] = grad_W\n params['grad_b' + name] = grad_b\n return grad_X", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n dx = np.where(x<=0, 0, 1) * dout\n \n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def relu_backward(dout, x):\n ############################################################################\n # TODO: Implement the ReLU backward pass. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n judge = x>0\n dx = dout*judge\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return dx", "def linear_backward(dZ, cache):\n pass", "def relu_backward(dA, cache):\n\n Z = cache\n dZ = np.array(dA, copy=True) # just converting dz to a correct object.\n\n # When z <= 0, you should set dz to 0 as well.\n dZ[Z <= 0] = 0\n\n assert (dZ.shape == Z.shape)\n\n return dZ", "def backward(self):\n gradient = blah\n return gradient", "def backward(self):\n gradient = blah\n return gradient", "def backward_pass(self, delta):\r\n self.d_x = np.dot(delta, self.w.T)\r\n self.d_b = np.matmul(np.ones((1, delta.shape[0])), delta)\r\n self.d_w = np.dot(self.x.T, delta)\r\n return self.d_x", "def backward(self, delta):\n if self.activation_type == \"sigmoid\":\n grad = self.grad_sigmoid()\n\n elif self.activation_type == \"tanh\":\n grad = self.grad_tanh()\n\n elif self.activation_type == \"ReLU\":\n grad = self.grad_ReLU()\n\n return grad * delta", "def relu_backward(dout, cache):\r\n x = cache\r\n dx = dout * (x > 0)\r\n return dx", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n #print(dout)\n dx = np.empty_like(dout)\n np.copyto(dx, dout)\n dx[x < 0] = 0\n #print(dx)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n dx = dout.copy()\n dx[x<=0] = 0\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def rbackwardsolve(A, b, d):\n\n n = len(b)\n if np.iscomplexobj(A) or np.iscomplexobj(b):\n A = A.astype('complex128')\n b = b.astype('complex128')\n x = b.copy()\n x[n-1] = b[n-1] / A[n-1, n-1]\n\n for k in range(n-2, -1, -1):\n uk = min(n-1, k+d)\n x[k] = (b[k] - np.dot(A[k, k+1:uk+1], x[k+1:uk+1])) / A[k, k]\n\n return x", "def relu_backward(dA, internal_params):\n\n Z = internal_params\n dZ = np.array(dA, copy=True)\n\n dZ[Z <= 0] = 0\n\n return dZ", "def relu_backward(dA, cache):\n Z = cache\n dZ = np.array(dA, copy=True)\n dZ[Z <= 0] = 0\n return dZ", "def relu_backward(dA, cache):\n\n Z = cache\n dZ = np.array(dA, copy=True)\n dZ[Z <= 0] = 0\n return dZ", "def _backward(self, w=None):\n grad = self.w # Should be I * self.w . We keep a vector for simplicity\n\n # Left multiply input `w` with normalizer gradient\n return w * grad if w is not None else grad", "def backward(ctx, grad_output):\n loss, reg, u, lbda = ctx.saved_tensors\n\n device = u.device\n\n # do clever computations\n eps = 1e-10\n grad, = torch.autograd.grad(loss, u, only_inputs=True,\n retain_graph=True)\n x = (u - eps * grad).data\n lbda = lbda.data\n\n prox_x = check_tensor(\n np.array([prox_tv.tv1_1d(xx, eps * lbda) for xx in x]),\n device=device,\n )\n grad_u = (u - prox_x) / eps\n grad_lbda = reg.clone()\n return (torch.ones(0), grad_u, grad_lbda)", "def _backward(self):\n if self.units[0].value > 0:\n self.units[0].gradient += 1 * self.utop.gradient\n else:\n self.units[0].gradient += 0 * self.utop.gradient", "def leakrelu_backward(dA, Z):\n\n # When z <= 0, dz = 0.01\n derivative = np.ones(Z.shape)\n derivative[Z < 0] = 0.01\n\n dZ = dA * derivative\n\n assert (dZ.shape == Z.shape)\n\n return dZ", "def backward(self, upstream_grad):\n\n # derivative of Cost w.r.t W\n self.dW = np.dot(upstream_grad, self.A_prev.T)\n\n # derivative of Cost w.r.t b, sum across rows\n self.db = np.sum(upstream_grad, axis=1, keepdims=True)\n\n # derivative of Cost w.r.t A_prev\n self.dA_prev = np.dot(self.params['W'].T, upstream_grad)", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n dx = dout * (x > 0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n dx = dout * (x > 0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def _backward(self, w):\n # Gradient sign depends on input label (0/1)\n if w is not None:\n return w[0] * -self.w + w[1] * self.w\n else:\n raise ValueError(\"w cannot be set as None.\")", "def rnn_step_backward(dnext_h, cache):\n dx, dprev_h, dWx, dWh, db = None, None, None, None, None\n ##############################################################################\n # TODO: Implement the backward pass for a single step of a vanilla RNN. #\n # #\n # HINT: For the tanh function, you can compute the local derivative in terms #\n # of the output value from tanh. #\n ##############################################################################\n\n x, next_h, prev_h, Wx, Wh, b = cache\n # this is because in vanilla RNN h = tanh(z) and derivative of next_h = tanh(z) = 1-z*z;\n dz = (1-next_h*next_h)*dnext_h\n # THIS ERROR IS SPREAD AMONG THE\n # np.dot(x, Wx) + np.dot(prev_h, Wh) + b)\n dx = np.dot(dz,Wx.T)\n dprev_h = np.dot(dz,Wh.T)\n db = np.sum(dz,axis=0)\n dWx = np.dot(x.T,dz)\n dWh = np.dot(prev_h.T,dz)\n #d(tanh) = 1- tanh*tanh\n\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dx, dprev_h, dWx, dWh, db", "def backward_pass(self, delta):\n\n a = config['learning_rate']\n y = config['momentum_gamma']\n m = config['momentum']\n l = config['L2_penalty']\n\n # print(\"shape of delta incoming: \", delta.shape, \"shape of x: \", self.x.shape)\n self.d_x = delta.T @ self.x\n # print(\"SHAPE OF GRADIENT: \", self.d_x.shape)\n\n # gradient momentum\n self.w_inc = (a * self.d_x.T) + (y * self.d_v) - l * self.w\n \n # saving \n if m:\n self.d_v = self.w_inc\n else:\n self.d_v = np.zeros(self.w.shape)\n\n # backprop for bias weights\n x_0 = np.ones([len(delta), 1])\n\n self.d_b = delta.T @ x_0\n\n # print(\"shape of BIAS GRAD: \", self.d_b.shape)\n\n self.d_w = delta @ self.w.T\n # print(\"shape of w.T: \", self.w.T.shape, \"shape of RETURN delta: \", self.d_w.shape)\n #print(self.w.shape)\n return self.d_w", "def backward(self, d_output=None):\n if d_output is None:\n d_output = 1.0\n backpropagate(VariableWithDeriv(self, d_output))", "def backward_pass(self, grad):\n pass", "def backward_D(self):\n self.loss_D.backward()", "def backward(self, gradient):\n #TODO\n pass", "def backward(self, gradient):\n #TODO\n pass", "def backward(self, upstream_grad):\n # Multiplies upstream gradient with local gradient to get the derivative\n # of Cost\n self.dZ = upstream_grad * self.A*(1-self.A)", "def backward_differences(f, h, x):\n\treturn (f(x) - f(x - h)) / h", "def _poputil_recompute_backward(op, grads):\n return grads", "def fc_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the affine backward pass. #\n ########################################################################### \n N = x.shape[0]\n x2d = x.reshape(N, -1)\n \n dx = dout.dot(w.T)\n dx = dx.reshape(x.shape)\n dw = x2d.T.dot(dout)\n db = dout.sum(axis=0) #add from top to down\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def linear_backward_calculation(dZ, internal_params):\n\n A_prev, W, b = internal_params\n nb = A_prev.shape[1]\n\n ### START CODE HERE ### (≈ 3 lines of code)\n dW =np.multiply((np.dot(dZ, A_prev.T)),1/nb)\n db = np.multiply ((np.sum(dZ, axis=1, keepdims=True),1/nb))\n dA_prev = np.dot(W.T, dZ)\n # raise NotImplementedError\n return dA_prev,dW,db", "def _core_calc_degrad(self,bd,Ld) :\n\t\tdegrad = np.dot(Ld,bd) # Do matrix multiplication \n\t\tdegrad = np.exp(degrad) # Exponentiate to convert log to real\n\t\treturn degrad", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n dx = dout * ((x > 0).astype(int))\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def backward(self, next_layer_weights, next_layer_delta):\n delta = np.dot(next_layer_weights.T, next_layer_delta)\n delta = delta * self.mask * self.activation_derivative(self.z)\n self.delta = delta\n return delta", "def backward(self,x, y):\n # TODO\n self.delta[self.L-1]=self.a[self.L-1]-y\n le=len(self.delta)\n for i in range(le-2,-1,-1):\n cx= self.w[i+1].T@self.delta[i+1]\n self.delta[i]=self.phi_d(self.z[i])*cx\n for i in range(1,self.L):\n self.dw[i]=np.asmatrix(self.delta[i]).T@np.asmatrix(self.a[i-1])\n self.db[i]=self.delta[i]", "def backward(ctx, grad_output_var):\n xmin = 0\n xmax = 1\n grad_output = grad_output_var.data\n gamma_mu,kappa,uTx,x = ctx.saved_tensors\n n = kappa.size()[0]\n nx = grad_output.size()[2]\n u = 1/nx**2*torch.linspace(1,nx,nx)\n norm_u = torch.norm(u)**2\n torch_u = u.view(1,1,-1)+torch.zeros(n,1,nx)#broadcast\n denom = (xmin-kappa)*(xmax-kappa)\\\n -(kappa-uTx)*(xmin+xmax-2*kappa)\\\n -2*gamma_mu*norm_u \n #\n idx = (denom.abs()>1e-7)\n ind = (denom.abs()>1e-7)+ torch.zeros(n,1,nx)#broadcasting\n ind = ind>0\n denom[~idx] = denom[~idx]+1\n grad_input_gamma_mu = (2*kappa-(xmin+xmax))/denom*torch_u\n coeff = (xmax-kappa)*(xmin-kappa)/denom - 1\n grad_input_u = torch.eye(nx) \\\n +coeff*torch.matmul(torch_u.view(1,1,-1,1),u.view(1,-1))/norm_u\n # if denom is very small, it means that gamma_mu is very small and u is very close to one of the bounds,\n # there is a discontinuity when gamma_mu tends to zero, if 0<u<1 the derivative wrt x is approximately equal to \n # 1 and the derivative wrt gamma_mu is approximated by 10^3 times the error 2kappa-xmin-xmax\n grad_input_gamma_mu[~ind] = 0*grad_input_gamma_mu[~ind]+1e3*(2*kappa[~idx]-(xmin+xmax))\n grad_input_u[~ind] = 0*grad_input_u[~ind]+1\n \n grad_input_gamma_mu = grad_input_gamma_mu*grad_output#.sum(1).sum(1).unsqueeze(1).unsqueeze(2)\n grad_input_u = grad_input_u*grad_output\n \n # safety check for numerical instabilities\n if (grad_input_gamma_mu!=grad_input_gamma_mu).any():\n print('there is a nan in grad_input_gamma_mu')\n if (x!=x).any():\n print('there is a nan in x')\n sys.exit()\n if (grad_input_u!=grad_input_u).any():\n print('there is a nan in grad_input_u')\n sys.exit()\n \n grad_input_gamma_mu = Variable(grad_input_gamma_mu,requires_grad=True)\n grad_input_u = Variable(grad_input_u,requires_grad=True)\n \n return grad_input_gamma_mu, grad_input_u, None", "def rnn_step_backward(self, grad_next, cache):\n\n th, h_prev, x = cache\n dz = grad_next * (1 - th**2)\n dh_prev = np.dot(dz, self.W.T)\n self.dW += np.dot(h_prev.T, dz) / grad_next.shape[0]\n self.dU += np.dot(x.T, dz) / grad_next.shape[0]\n self.db += np.sum(dz, axis=0) / grad_next.shape[0]\n\n return dh_prev", "def relu_backward(self, dUpper, cache):\n x = cache\n #############################################################################\n # TODO: Implement the ReLU backward pass. #\n #############################################################################\n x = np.array(x , copy=True)\n x[x <= 0] = 0\n x[x > 0] = 1\n drelu = dUpper * x\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return drelu", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def __backward(self, dA, cache, derivative_activate_fn):\n A_prev, W, b, Z, D = cache\n\n m = A_prev.shape[1]\n\n # Mask\n dA = np.multiply(dA, D) / self.keep_prob\n\n dZ = dA * derivative_activate_fn(Z)\n dW = (1.0 / m) * np.dot(dZ, A_prev.T)\n db = (1.0 / m) * np.sum(dZ, axis=1, keepdims=True)\n dA_prev = np.dot(W.T, dZ)\n\n\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n assert (dA_prev.shape == A_prev.shape)\n\n return dA_prev, dW, db", "def conv_relu_backward_naive(dout, cache):\n\tconv_cache, relu_cache = cache\n\tda = relu_backward(dout, relu_cache)\n\tdx, dw, db = conv_backward_naive(da, conv_cache)\n\treturn dx, dw, db", "def backward(self, gradient):\n raise NotImplementedError()", "def forward(h, n, u, v, f, dt, dx, dy, du, dv, dn, beta=0, eps=0, gamma=0, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # forward euler and forward/backward timestep\n beta = np.float32(beta)\n mu = np.float32(mu)\n \n du1, du0 = du[:2]\n dv1, dv0 = dv[:2]\n dn0 = dn[0]\n \n dndt_x(h, n, u, v, dx, dy, dn0) # calculate dndt and put it into dn0\n \n n1 = n + ( dn0 )*dt\n \n dudt_x(h, n, f, u, v, dx, dy, du0, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dvdt_x(h, n, f, u, v, dx, dy, dv0, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dudt_x(h, n1, f, u, v, dx, dy, du1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dvdt_x(h, n1, f, u, v, dx, dy, dv1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n \n u1 = u + ( beta*du1 + (one-beta)*du0 )*dt\n v1 = v + ( beta*dv1 + (one-beta)*dv0 )*dt\n \n n, u, v = n1, u1, v1\n \n du = [du1, du0, du0, du0]\n dv = [dv1, dv0, dv0, dv0]\n dn = [dn0, dn0, dn0]\n return n1, u1, v1, du, dv, dn", "def backward(self, delta_W_next):\n delta = delta_W_next * self._act.a_prime(self._z)\n delta_W = np.dot(delta, self._W.T)\n grad_w = np.dot(self._X.T, delta)\n grad_b = np.array(([np.sum(delta, axis=0)]))\n return grad_w, grad_b, delta_W", "def FiniteDiff(u, dx, d):\n \n n = u.size\n ux = np.zeros(n, dtype=u.dtype)\n \n if d == 1:\n for i in range(1,n-1):\n ux[i] = (u[i+1]-u[i-1]) / (2*dx)\n \n ux[0] = (-3.0/2*u[0] + 2*u[1] - u[2]/2) / dx\n ux[n-1] = (3.0/2*u[n-1] - 2*u[n-2] + u[n-3]/2) / dx\n return ux\n \n if d == 2:\n for i in range(1,n-1):\n ux[i] = (u[i+1]-2*u[i]+u[i-1]) / dx**2\n \n ux[0] = (2*u[0] - 5*u[1] + 4*u[2] - u[3]) / dx**2\n ux[n-1] = (2*u[n-1] - 5*u[n-2] + 4*u[n-3] - u[n-4]) / dx**2\n return ux\n \n if d == 3:\n for i in range(2,n-2):\n ux[i] = (u[i+2]/2-u[i+1]+u[i-1]-u[i-2]/2) / dx**3\n \n ux[0] = (-2.5*u[0]+9*u[1]-12*u[2]+7*u[3]-1.5*u[4]) / dx**3\n ux[1] = (-2.5*u[1]+9*u[2]-12*u[3]+7*u[4]-1.5*u[5]) / dx**3\n ux[n-1] = (2.5*u[n-1]-9*u[n-2]+12*u[n-3]-7*u[n-4]+1.5*u[n-5]) / dx**3\n ux[n-2] = (2.5*u[n-2]-9*u[n-3]+12*u[n-4]-7*u[n-5]+1.5*u[n-6]) / dx**3\n return ux\n \n if d > 3:\n return FiniteDiff(FiniteDiff(u,dx,3), dx, d-3)", "def backward(self, d_out):\n # TODO: Implement backward pass\n # Compute both gradient with respect to input\n # and gradients with respect to W and B\n # Add gradients of W and B to their `grad` attribute\n\n # It should be pretty similar to linear classifier from\n # the previous assignment\n \n dW = np.dot(self.X.T, d_out);\n dB = np.dot(np.ones((1, d_out.shape[0])), d_out);\n \n d_input = np.dot(d_out, self.W.value.T);\n #print(\"self.X = \", self.X);\n #print(\"self.W.grad.T = \", self.W.grad.T);\n #print(\"dW.T = \", dW.T);\n \n self.W.grad += dW;\n self.B.grad += dB;\n \n return d_input;", "def backward(self):\n if self.d_out_d_in is None:\n raise Exception(\"Haven't computed the loss!\")\n return self.d_out_d_in", "def backward(self, residuals):\n in_channel, out_channel, kernel_size, a = self.weights.shape\n dw = np.zeros_like(self.weights) \n \n for i in range(in_channel):\n for o in range(out_channel):\n dw[i, o] += inv_conv2(self.in_val[:,:,i], \n residuals[:,:,o], \n self.stride)\n\n self.db += residuals.sum(axis=1).sum(axis=0)\n self.dw += dw \n gradient_x = np.zeros_like(self.in_val)\n \n for i in range(in_channel):\n for o in range(out_channel):\n gradient_x[:,:,i] += conv_delta(residuals[:,:,o] \n , self.weights[i][o]\n , self.stride\n , self.in_val.shape[0])\n \n return gradient_x", "def backward(self, delta):\n self.d_x = np.dot(delta, np.transpose(self.w))\n self.d_w = np.dot(np.transpose(self.x), delta)/self.x.shape[0]\n self.d_b = np.mean(delta, axis=0).reshape((1, delta.shape[1]))\n return self.d_x\n\n raise NotImplementedError(\"Backprop for Layer not implemented.\")", "def fc_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the affine backward pass. #\n ###########################################################################\n dx = np.dot(dout, w.T)\n dw = np.dot(x.T, dout)\n db = np.average(dout, axis = 0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def _double_backward_trick(ys, xs, v):\n # The value of ys_grad is not important, it can be any random value in\n # theory, but it's required to set stop_gradient=False.\n ys_grad = _zeros_like_with_grad(ys)\n xs_grad = _grad(ys, xs, ys_grad)\n return _grad(xs_grad, ys_grad, v)", "def test_correct_backward_order1(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 1, \"backward\")\r\n assert np.allclose(coeffs, [1, -1])\r\n assert np.allclose(shifts, [0, -1])", "def backward_D(self):\n base_function._unfreeze(self.net_D)\n #print(self.input_P2.shape, self.img_gen.shape)\n self.loss_dis_img_gen = self.backward_D_basic(self.net_D, self.input_P2, self.img_gen)", "def test_subtracting_constant():\n a = fwd.Variable()\n assert equals((a-1).derivative_at(a, {a: 0.0}), 1.0)\n assert equals((1-a).derivative_at(a, {a: 0.0}), -1.0)", "def linear_backward(dZ, cache):\n A_prev, W, b = cache\n m = A_prev.shape[1]\n\n ### START CODE HERE ### (≈ 3 lines of code)\n dW = (np.dot(dZ, A_prev.T))/m\n db = (np.sum(dZ, axis=1, keepdims=True))/m\n dA_prev = np.dot(W.T, dZ)\n ### END CODE HERE ###\n \n assert (dA_prev.shape == A_prev.shape)\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n \n return dA_prev, dW, db", "def rbacksolve(A, b, d):\n n = len(b)\n b[n - 1] /= A[n - 1,n - 1]\n for k in range(n-2,-1,-1):\n uk = array([n, k + d + 1]).min()\n b[k] = b[k] - dot(A[k,(k+1):uk], b[(k+1):uk])\n b[k] /= A[k,k]", "def linear_backward(dZ, cache):\n A_prev, W, b = cache\n m = A_prev.shape[1]\n\n dW = np.dot(dZ,A_prev.T) / m\n db = np.sum(dZ,axis = 1,keepdims = True) / m\n dA_prev = np.dot(W.T,dZ)\n\n return dA_prev,dW,db", "def backward(self, x_out, x_target):\r\n return 2*(x_out - x_target)", "def backward(ctx, grad_output):\n diff, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input = grad_input + diff\n return grad_input", "def backward(self, upstream_grad):\n # couple upstream gradient with local gradient, the result will be sent back to the Linear layer\n self.dZ = upstream_grad * self.A*(1-self.A)", "def backward(self, grad_output):\n raise NotImplementedError", "def affine_relu_backward(dout, cache):\n fc_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = affine_backward(da, fc_cache)\n return dx, dw, db", "def affine_relu_backward(dout, cache):\n fc_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = affine_backward(da, fc_cache)\n return dx, dw, db", "def affine_relu_backward(dout, cache):\n fc_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = affine_backward(da, fc_cache)\n return dx, dw, db", "def affine_relu_backward(dout, cache):\n fc_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = affine_backward(da, fc_cache)\n return dx, dw, db", "def rnn_backward(self, dh, cache):\n self.init_backprop()\n assert dh.shape[1:] == (self.sequence_length, self.hidden_size)\n dh = dh.transpose(1, 0, 2) # Switching to time major\n upstream_grad = np.zeros_like(dh[-1])\n for dh_item, cache_item in reversed(list(zip(dh, cache))):\n upstream_grad = self.rnn_step_backward(dh_item + upstream_grad, cache_item)\n\n return self.dU, self.dW, self.db", "def layer_backward(d_output, cache):\n\n # Unpack cache values\n x, w, z, output = cache\n\n # Compute derivatives (gradients)\n d_x, d_w = None, None\n\n return d_x, d_w", "def backward(self, d_out):\n # TODO: Implement backward pass\n # Compute both gradient with respect to input\n # and gradients with respect to W and B\n # Add gradients of W and B to their `grad` attribute\n\n # It should be pretty similar to linear classifier from\n # the previous assignment\n self.W.grad += np.dot(self.X.T, d_out)\n self.B.grad += np.sum(d_out, axis=0)[np.newaxis, :]\n return np.dot(d_out, self.W.value.T)", "def backward(ctx, dy):\n y = ctx.y\n if ctx.eagerly_discard_variables:\n del ctx.y\n for i in range(len(ctx.reversible_blocks) - 1, -1, -1):\n y, dy = ctx.reversible_blocks[i].backward_pass(y, dy, not ctx.eagerly_discard_variables)\n if ctx.eagerly_discard_variables:\n del ctx.reversible_blocks\n return dy, None, None", "def fc_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the affine backward pass. #\n ###########################################################################\n Xshape = x.shape\n x_flat = np.reshape(x,(x.shape[0],-1))\n dx = np.dot(dout,w.T)\n dw = x_flat.T.dot(dout)#np.matmul(x[...,None],dout[:,None,:]).sum(axis=0)\n db = np.sum(dout,axis=0)\n dx = np.reshape(dx,Xshape)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def rforwardsolve(A, b, d):\n n = len(b)\n b[0] /= A[0, 0]\n for k in range(1,n):\n lk = array([0,k-d]).max()\n b[k] = b[k] - dot(A[k, lk:k],b[lk:k])\n b[k] /= A[k, k]", "def linear_backward(dZ, cache):\n A_prev, W, b = cache\n m = A_prev.shape[1]\n\n dW = 1./m * np.dot(dZ,A_prev.T)\n db = 1./m * np.sum(dZ, axis = 1, keepdims = True)\n dA_prev = np.dot(W.T,dZ)\n \n assert (dA_prev.shape == A_prev.shape)\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n \n return dA_prev, dW, db", "def backward(self):\n assert self.cache is not None, \"Cannot backprop without forward first.\"\n prob, y = self.cache\n\n dX = prob - y\n if self.reduction == \"mean\":\n m, _ = prob.shape\n dX /= m\n\n # clear cache\n self.cache = None\n\n return dX", "def backward(self, weights):\n\n return self.lambd * weights", "def backward_gradient(\n self, input: np.ndarray, head_gradients: Dict[str, np.ndarray]\n ) -> np.ndarray:\n raise NotImplementedError", "def grad_ReLU(self):\n y = self.x\n y[y<=0] = 0\n y[y>0] = 1\n return y\n raise NotImplementedError(\"ReLU gradient not implemented\")", "def backward(self, z):\n return self.forward(z) * (1 - self.forward(z))", "def rnn_backward(dh, cache):\n dx, dh_prev, dWx, dWh, db = None, None, None, None, None\n ##############################################################################\n # TODO: Implement the backward pass for a vanilla RNN running an entire #\n # sequence of data. You should use the rnn_step_backward function that you #\n # defined above. #\n ##############################################################################\n \"\"\"\n x, next_h, prev_h, Wx, Wh, b = cache\n dz = (1-next_h*next_h)*dnext_h\n # THIS ERROR IS SPREAD AMONG THE\n # np.dot(x, Wx) + np.dot(prev_h, Wh) + b)\n dx = np.dot(dz,Wx.T)\n dprev_h = np.dot(dz,Wh.T)\n db = np.sum(dz,axis=0)\n dWx = np.dot(x.T,dz)\n dWh = np.dot(prev_h.T,dz)\n #d(tanh) = 1- tanh*tanh\n \"\"\"\n #pdb.set_trace()\n # dh is not result of forward prop\n # but\n N,T,H = dh.shape\n tmp_x, tmp_next_h, tmp_prev_h, tmp_Wx, tmp_Wh, tmp_b = cache[T-1]\n D = tmp_x.shape[1]\n\n\n dx = np.zeros((N,T,D))\n dh_prev = np.zeros((N,H))\n dWx = np.zeros((D,H))\n dWh = np.zeros((H,H))\n db = np.zeros((H))\n\n for i in reversed(list(range(0,T))):\n # current gradient at timestep is the upstream gradient (provided as input)\n # this may be coming from the Y as in the min_char_rnn.py (see line 59)\n # + downstream gradient provided by rnn_step_backward.\n dh_curr = dh[:,i,:] + dh_prev\n dx_, dh_prev, dWx_, dWh_, db_ = rnn_step_backward(dh_curr, cache[i])\n dWx += dWx_\n dWh += dWh_\n db += db_\n dx[:,i,:]=dx_\n\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dx, dh_prev, dWx, dWh, db", "def exterior_der(self):\n from utilities import format_unop_txt, format_unop_latex\n if self._exterior_derivative is None:\n vmodule = self._vmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n resu = vmodule.alternating_form(self._tensor_rank+1, name=rname, \n latex_name=rlname)\n for dom, rst in self._restrictions.iteritems():\n resu._restrictions[dom] = rst.exterior_der()\n self._exterior_derivative = resu\n return self._exterior_derivative", "def backward(self, top, propagate_down, bottom):\n for ib in range(2):\n if not propagate_down[ib]:\n continue\n ndim = bottom[0].data.shape\n count = ndim[0] * ndim[2] * ndim[3]\n if not self.count:\n bottom[ib].diff[ ... ] = np.zeros_like( bottom[0].data )\n continue\n if top[0].data < 1.\n bottom[ib].diff[ ... ] = np.abs( bottom[0].data - bottom[1].data )\n bottom[ib].diff[ ... ] *= ( 1 - 1.0*self.iter/self.maxiter )\n else:\n bottom[ib].diff[ ... ] = np.ones_like( bottom[ib].data )\n inop = bottom[0].data < bottom[1].data\n bottom[ib].diff[ inop ] *= -1\n \n # ingore false label and repair\n ignore = bottom[1].data <= 0.\n count -= np.sum(ignore)\n bottom[ib].diff[ignore] = 0.\n #normlist\n bottom[ib].diff[...] /= count", "def backward(ctx, de, dv):\n e, v, S = ctx.saved_tensors\n n, k = v.shape\n A = S.reshape(n, n)\n\n print('e=', e)\n vt = v.transpose(-2, -1)\n print('vt=', vt)\n print('de=', de)\n print('dv=', dv)\n\n if dv is None:\n A_bar = T.mm(v, T.mm(T.diag(de), vt))\n else:\n vtdv = T.mm(vt, dv)\n print('vtdv=', vtdv)\n F = T.ones_like(vtdv) * e\n F = (F - F.transpose(-2, -1)) ** -1\n F.diagonal().fill_(0)\n\n print('F=',F)\n\n A_bar = T.mm(v, T.mm(T.diag(de) + F * vtdv, vt))\n\n\n for i in range(k):\n break\n for j in range(k):\n if i < j:\n A_bar[i,j] *= 2\n elif i>j:\n A_bar[i,j] *= 0\n A_bar = (A_bar + A_bar.transpose(-2, -1))/2\n print('A_bar=', A_bar) \n return A_bar\n S_bar = A_bar.flatten()\n return S_bar", "def backward(self, grad, index):\n pass", "def fc_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the affine backward pass. #\n ###########################################################################\n N, d_out = dout.shape\n dx = np.matmul(dout, np.transpose(w))\n dw = np.matmul(np.transpose(x), dout)\n db = np.matmul(np.ones((1,dout.shape[0])), dout).reshape((-1,))\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def genfb(h, n, u, v, f, dt, dx, dy, du,dv,dn, beta=0.281105, eps=0.013, gamma=0.0880, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # generalized forward backward feedback timestep\n \n beta = np.float32(beta)\n eps = np.float32(eps)\n gamma = np.float32(gamma)\n mu = np.float32(mu)\n \n \n dn_m1,dn_m2,dn_m0 = dn # unpack\n dndt_x(h, n, u, v, dx, dy, dn_m0)\n \n# test_out = dn_m0.copy()\n# dndt(h, n, u, v, dx, dy, test_out)\n \n# test_dif = dn_m0-test_out\n# if np.max(np.abs(test_dif[1:-1,1:-1] )) >1E-5 :\n# test_dif[1:-1,5][np.abs(test_dif[1:-1,5] ) <1E-5]=0.0\n# print (\"dn diff 2\")\n# print (test_dif[:,5])\n \n #dn_m0[:]=test_out \n\n # must do the following before the u and v !\n n1 = n + ((p32+beta)* dn_m0 - (p5+beta+beta)* dn_m1+ (beta)* dn_m2)*dt\n #del dn_m2\n du_m0,du_m1,du_m2,du_p1 = du # unpack\n dudt_x(h, n1, f, u, v, dx, dy, du_p1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n\n dv_m0,dv_m1,dv_m2,dv_p1 = dv # unpack \n dvdt_x(h, n1, f, u, v, dx, dy, dv_p1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n \n# test_out = du_p1.copy()\n# dudt(h, n1, f, u, v, dx, dy, test_out)\n \n# test_dif = du_p1-test_out\n# if np.max(np.abs(test_dif[1:-1,5] )) >1E-5 :\n# test_dif[1:-1,5][np.abs(test_dif[1:-1,5] ) <1E-5]=0.0\n# print (\"du diff\")\n# print (test_dif[:,5])\n \n# #du_p1[:] = test_out\n\n# test_out = dv_p1.copy()\n# dvdt(h, n1, f, u, v, dx, dy, test_out)\n \n# test_dif = dv_p1-test_out\n# if np.max(np.abs(test_dif[1:-1,5] )) >1E-5 :\n# test_dif[1:-1,5][np.max(test_dif[1:-1,5] ) <1E-5]=0.0\n# print (\"dv diff\")\n# print (test_dif[:,5])\n \n #dv_p1[:] = test_out\n \n u1 = u+ ((p5+gamma+eps+eps)*du_p1 +(p5-gamma-gamma-eps-eps-eps)*du_m0 +gamma*du_m1+eps*du_m2)*dt\n # del du_m2\n v1 = v+ ((p5+gamma+eps+eps)*dv_p1 +(p5-gamma-gamma-eps-eps-eps)*dv_m0 +gamma*dv_m1+eps*dv_m2)*dt\n # del dv_m2\n\n\n \n \n dv = [ dv_p1,dv_m0,dv_m1,dv_m2 ]\n du = [ du_p1,du_m0,du_m1,du_m2 ]\n dn = [ dn_m0,dn_m1,dn_m2 ]\n# n[:,:], u[:,:], v[:,:], = n1, u1, v1\n return n1, u1, v1, du,dv,dn", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = layers.relu_backward(dout, relu_cache)\n dx, dw, db = layers.conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def derivative ( self ):\n return self.__derivative", "def backward(self, out_grad, input):\n raise NotImplementedError", "def SLE_DL(t, y):\n DyFun = SLEfun(y,C)\n Dygrand = tf.gradients(y, t)[0]\n return Dygrand - DyFun", "def directional_deriv(f, x, v, s=_DEFAULT_STEP):\n v = np.asarray(v)\n v = v / la.norm(v)\n return (f(x + s*v) - f(x)) / s", "def backward(self, d_out):\n # TODO: Implement backward pass\n # Compute both gradient with respect to input\n # and gradients with respect to W and B\n # Add gradients of W and B to their `grad` attribute\n\n # It should be pretty similar to linear classifier from\n # the previous assignment\n\n d_input = np.dot(d_out, self.W.value.T)\n self.W.grad = np.dot(self.X.T, d_out)\n self.B.grad = np.sum(d_out, axis=0, keepdims=True)\n\n return d_input", "def linear_backward(dZ: np.array, cache: Tuple) -> Tuple:\n A_prev, W, b = cache\n m = A_prev.shape[1] # number of examples\n\n dW = (np.dot(dZ, A_prev.T))/m\n db = (np.sum(dZ, axis=1, keepdims=True))/m\n dA_prev = np.dot(W.T, dZ)\n\n return dA_prev, dW, db" ]
[ "0.72977805", "0.715568", "0.7135363", "0.7078485", "0.6929383", "0.69284886", "0.6913763", "0.69110596", "0.69005686", "0.69005686", "0.6865896", "0.6833532", "0.682018", "0.6802795", "0.6802623", "0.6784269", "0.6782692", "0.67817545", "0.6776672", "0.6749691", "0.6739863", "0.6734109", "0.67146444", "0.66830087", "0.6654345", "0.6654345", "0.6646906", "0.66263556", "0.6612255", "0.66015893", "0.6577423", "0.6566949", "0.6562081", "0.6562081", "0.65414727", "0.6512904", "0.6509582", "0.6502363", "0.64866334", "0.64717007", "0.6456684", "0.6440265", "0.6440093", "0.6436286", "0.64315706", "0.6428848", "0.6424656", "0.6424656", "0.64236844", "0.6421168", "0.64044046", "0.64041257", "0.64040583", "0.6399648", "0.6396681", "0.6395365", "0.638468", "0.6366557", "0.6366203", "0.63626343", "0.6362061", "0.63514215", "0.63496536", "0.63348275", "0.6326577", "0.63257414", "0.6318417", "0.6309486", "0.6305414", "0.6300035", "0.62942386", "0.62942386", "0.62942386", "0.62942386", "0.6292874", "0.6289012", "0.6275543", "0.6274451", "0.6266348", "0.6264982", "0.6257768", "0.624401", "0.62417054", "0.623619", "0.6228144", "0.6227334", "0.6218639", "0.62161964", "0.62136966", "0.6211709", "0.61997277", "0.61850035", "0.6179115", "0.61765504", "0.6172492", "0.6168659", "0.61685216", "0.61652696", "0.6161821", "0.61603296" ]
0.69511014
4
Given a pool name, returns a storage driver.
def _init_driver(self, pool_id, pool_conf=None): if pool_id is not None: pool = self._pools_ctrl.get(pool_id, detailed=True) else: pool = pool_conf conf = utils.dynamic_conf(pool['uri'], pool['options'], conf=self._conf) storage = utils.load_storage_driver(conf, self._cache, control_driver=self.control) return pipeline.DataDriver(conf, storage, self.control)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_driver(self, pool_id, pool_conf=None):\n\n try:\n return self._drivers[pool_id]\n except KeyError:\n # NOTE(cpp-cabrera): cache storage driver connection\n self._drivers[pool_id] = self._init_driver(pool_id, pool_conf)\n\n return self._drivers[pool_id]", "def _get_pool(name=None, session=None):\n if session is None:\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n if name in pool_record.get(\"name_label\"):\n return pool\n return None", "def _get_pool_by_name(self, pool_name):\n pool_manager = PoolManager(organization_name=self._organization_name,\n project_name=self._project_name, creds=self._creds)\n pools = pool_manager.list_pools()\n return next((pool for pool in pools.value if pool.name == pool_name), None)", "def storage_backend_get_by_name(context, name, inactive=False):\n return _find_storage_backend(context, dict(name = name), True, None, inactive=inactive)", "def create_pool(self, device, tier, poolname):\n print \"Adding pool %s...\" % poolname\n pool = device.findRemoteStoragePool(StoragePoolPredicates.name(poolname))\n pool.setTier(tier)\n pool.save()\n return pool", "def get_pool(name):\n if name not in _CONNECTIONS:\n add_pool(name)\n return _CONNECTIONS[name]", "def _get_driver(self, driver_name):\n driver = lb_const.SERVICE_TYPE + driver_name\n return self.drivers[driver]", "def get_storage(self, name):\r\n if name not in self._storages:\r\n for suffix, engine in self.STORAGE_MAP.iteritems():\r\n if name.endswith(suffix):\r\n self._storages[name] = engine(self.get_filepath(name))\r\n break\r\n\r\n if name in self._storages:\r\n return self._storages[name]\r\n else:\r\n raise KeyError('{} does not have a valid suffix'.format(name))", "def get_backend(name):\n return _DEFAULT_PROVIDER.get_backend(name)", "def get_storage_backend(self):\n return self.client.info()['Driver']", "def storage_pool_get(context, storage_pool_id):\n return _storage_pool_get(context, storage_pool_id)", "def get_driver(driver_name):\n try:\n o = drivers[driver_name]\n if type(o) == str:\n exec 'd = %s()' % o\n else:\n d = o()\n return d\n except KeyError:\n raise ValueError('Unknown driver name: \"{0}\"'.format(driver_name))", "def _get_pool_path( self, pool_name ):\n\t\ttry:\n\t\t\treturn self.storage_pools[pool_name].path\n\t\texcept KeyError:\n\t\t\treturn ''", "def get_storage_provider(uri):\n for provider in ProviderFactory.get_storage_providers():\n try:\n supports = provider.supports_storage(uri) # type: ignore[union-attr]\n except BaseException as e:\n communication.warn(f\"Couldn't test provider {provider}: {e}\")\n else:\n if supports:\n return provider(uri=uri) # type: ignore[call-arg]\n\n raise errors.DatasetProviderNotFound(uri=uri)", "def get_default_pool():\n return 'tank'", "def get_storage(storage_dsn):\n storage_scheme = dsnparse.parse(storage_dsn).scheme\n storage_cls = STORAGE_REGISTRY.get(storage_scheme)\n if not storage_cls:\n logging.error(\"Can't find storage for given dsn.\")\n sys.exit(-1)\n return storage_cls(dsn=storage_dsn)", "def get_driver(drv):\n return GenericDriver.get_driver(drv)", "def _get_storage_backend(fq_classname):\n LOG.debug('Running _get_storage_backend with fq_classname [%s]'\n % fq_classname)\n\n if not fq_classname:\n return None\n\n (modname, clname) = fq_classname.rsplit('.', 1)\n # A test import of the backend storage class should have been undertaken\n # at app startup in django_drf_filepond.apps.ready so any failure\n # importing the backend should have been picked up then.\n mod = importlib.import_module(modname)\n storage_backend = getattr(mod, clname)()\n LOG.info('Storage backend instance [%s] created...' % fq_classname)\n\n return storage_backend", "def get_driver(driver_name):\n if driver_name == 'sqlite3':\n import sqlite3 as db_driver\n elif driver_name == 'cx_Oracle':\n import cx_Oracle as db_driver\n elif driver_name == 'pyodbc':\n import pyodbc as db_driver\n elif driver_name == 'pypyodbc':\n import pypyodbc as db_driver\n elif driver_name == 'psycopg2':\n import psycopg2 as db_driver\n elif driver_name == 'PyMySql':\n import PyMySql as db_driver\n elif driver_name == 'pymssql':\n import pymssql as db_driver\n else:\n # TODO: pick a better exception type and message\n raise ImportError\n return db_driver", "def get_pool(self, pool_name=None, pool_id=None):\n\n id_or_name = pool_id if pool_id else pool_name\n errormsg = \"Failed to get the pool {0} with error {1}\"\n\n try:\n obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id)\n\n if pool_id and obj_pool.existed:\n LOG.info(\"Successfully got the pool object %s\",\n obj_pool)\n return obj_pool\n if pool_name:\n LOG.info(\"Successfully got pool %s\", obj_pool)\n return obj_pool\n else:\n msg = \"Failed to get the pool with {0}\".format(\n id_or_name)\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n\n except Exception as e:\n msg = errormsg.format(id_or_name, str(e))\n LOG.error(msg)\n self.module.fail_json(msg=msg)", "def get_pool(self, name, dc, cluster):\n cluster_obj = self.get_cluster(cluster, dc)\n for rp in cluster_obj.resourcePool.resourcePool:\n if rp.name == name:\n return rp", "def lookup(self, queue, project=None):\n\n try:\n pool_id = self._pool_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n return self.get_default_pool(use_listing=False)\n\n return self.get_driver(pool_id)", "def get_backend_by_name(cls_str):\n # type: (str) -> Backend\n try:\n return globals()[cls_str]()\n except KeyError:\n raise InvalidBackendClass('Invalid backend class name: {cls}'.format(cls=cls_str))", "def find_module(self, name):\n if name in self.pool:\n return self.pool[name]\n else:\n return None", "def get_by_url(self, url, pool_name=None):\n\t\tif not pool_name:\n\t\t\treturn self.pool[url]\n\t\treturn getattr(self, pool_name)[url]", "def get_vm_in_pool_by_name(self, name, dc, cluster, pool):\n vms = self.get_all_vms_in_pool(pool, dc, cluster)\n for vm in vms:\n if vm.name == name:\n return vm", "def open_storage(data_source_name, db_type=\"dbm\", mode=None):\n try:\n klass, supports_mode = _storage_types[db_type]\n except KeyError:\n raise NoSuchClassifierError(db_type)\n try:\n if supports_mode and mode is not None:\n return klass(data_source_name, mode)\n else:\n return klass(data_source_name)\n except dbmstorage.error, e:\n if str(e) == \"No dbm modules available!\":\n print >> sys.stderr, \"\\nYou do not have a dbm module available \" \\\n \"to use. You need to either use a pickle (see the FAQ)\" \\\n \", use Python 2.3 (or above), or install a dbm module \" \\\n \"such as bsddb (see http://sf.net/projects/pybsddb).\"\n sys.exit()", "def get_pool(self):\n try:\n return self._pool\n except AttributeError:\n db_url = getattr(settings, self.name)\n self._pool = PostgresConnectionPool.for_url(db_url)\n return self._pool", "def _get_driver_from_dsn(self, dsn):\n\n return dsn.split(':')[0]", "def ParseStoragePool(resources, storage_pool, project, location):\n collection = 'compute.storagePools'\n params = {'project': project, 'zone': location}\n storage_pool_ref = resources.Parse(\n storage_pool, collection=collection, params=params\n )\n return storage_pool_ref", "def _create_volume_pool(self, pool_name):\n osd_map = self._rados_command('osd dump', {})\n\n existing_id = self._get_pool_id(osd_map, pool_name)\n if existing_id is not None:\n log.info(\"Pool {0} already exists\".format(pool_name))\n return existing_id\n\n osd_count = len(osd_map['osds'])\n\n # We can't query the actual cluster config remotely, but since this is\n # just a heuristic we'll assume that the ceph.conf we have locally reflects\n # that in use in the rest of the cluster.\n pg_warn_max_per_osd = int(self.rados.conf_get('mon_max_pg_per_osd'))\n\n other_pgs = 0\n for pool in osd_map['pools']:\n if not pool['pool_name'].startswith(self.POOL_PREFIX):\n other_pgs += pool['pg_num']\n\n # A basic heuristic for picking pg_num: work out the max number of\n # PGs we can have without tripping a warning, then subtract the number\n # of PGs already created by non-manila pools, then divide by ten. That'll\n # give you a reasonable result on a system where you have \"a few\" manila\n # shares.\n pg_num = ((pg_warn_max_per_osd * osd_count) - other_pgs) // 10\n # TODO Alternatively, respect an override set by the user.\n\n self._rados_command(\n 'osd pool create',\n {\n 'pool': pool_name,\n 'pg_num': int(pg_num),\n }\n )\n\n osd_map = self._rados_command('osd dump', {})\n pool_id = self._get_pool_id(osd_map, pool_name)\n\n if pool_id is None:\n # If the pool isn't there, that's either a ceph bug, or it's some outside influence\n # removing it right after we created it.\n log.error(\"OSD map doesn't contain expected pool '{0}':\\n{1}\".format(\n pool_name, json.dumps(osd_map, indent=2)\n ))\n raise RuntimeError(\"Pool '{0}' not present in map after creation\".format(pool_name))\n else:\n return pool_id", "def backend_pool_type(self) -> Optional[pulumi.Input[Union[str, 'BackendPoolType']]]:\n return pulumi.get(self, \"backend_pool_type\")", "def get_pool():\n app = get_app()\n return app['pool']", "def get_device_pool(arn=None):\n pass", "def get_driver(self, shard_id):\n\n try:\n return self._drivers[shard_id]\n except KeyError:\n # NOTE(cpp-cabrera): cache storage driver connection\n self._drivers[shard_id] = self._init_driver(shard_id)\n\n return self._drivers[shard_id]", "def by_name(cls, name):\n if name in cls._registry:\n result = cls._registry[name]\n else:\n result = cls._registry[name] = cls(bind=Session._datastores.get(name))\n return result", "def _instantiate_backend_from_name(name, options):\r\n # Parse backend name\r\n\r\n try:\r\n parts = name.split('.')\r\n module_name = '.'.join(parts[:-1])\r\n class_name = parts[-1]\r\n except IndexError:\r\n raise ValueError('Invalid event track backend %s' % name)\r\n\r\n # Get and verify the backend class\r\n\r\n try:\r\n module = import_module(module_name)\r\n cls = getattr(module, class_name)\r\n if not inspect.isclass(cls) or not issubclass(cls, BaseBackend):\r\n raise TypeError\r\n except (ValueError, AttributeError, TypeError, ImportError):\r\n raise ValueError('Cannot find event track backend %s' % name)\r\n\r\n backend = cls(**options)\r\n\r\n return backend", "def _determine_storage_repo(session, resource_pool, vm_):\n storage_repo = \"\"\n if \"storage_repo\" in vm_.keys():\n storage_repo = _get_sr(vm_[\"storage_repo\"], session)\n else:\n storage_repo = None\n if resource_pool:\n default_sr = session.xenapi.pool.get_default_SR(resource_pool)\n sr_record = session.xenapi.SR.get_record(default_sr)\n log.debug(\"storage repository: %s\", sr_record[\"name_label\"])\n storage_repo = default_sr\n else:\n storage_repo = None\n log.debug(\"storage repository: %s\", storage_repo)\n return storage_repo", "def driver_from_file(input_file):\n file_ext = os.path.splitext(input_file)[1].split(\".\")[1]\n try:\n driver = _file_ext_to_driver()[file_ext]\n except KeyError:\n raise errors.MapcheteDriverError(\n \"no driver could be found for file extension %s\" % file_ext)\n if len(driver) == 1:\n return driver[0]\n else:\n raise errors.MapcheteDriverError(\n \"error determining read driver from file %s\" % input_file)", "def newPool(name: str, superPool, types: [], cls):\n try:\n if name == \"colorholder\":\n superPool = P0(len(types), cls)\n return superPool\n elif name == \"abstractnode\":\n superPool = P1(len(types), cls)\n return superPool\n elif name == \"node\":\n superPool = P2(len(types), superPool, cls)\n return superPool\n \n elif name == \"subnode\":\n superPool = P3(len(types), superPool, cls)\n return superPool\n \n else:\n if superPool is None:\n superPool = BasePool(len(types), name, StoragePool.noKnownFields, StoragePool.noAutoFields, cls)\n else:\n superPool = superPool.makeSubPool(len(types), name, cls)\n return superPool\n finally:\n types.append(superPool)", "def _create_pool_vm(args):\n # check storage pool name unicity\n conn = libvirt.open(None)\n _sps = list()\n if conn:\n _sps = [sp for sp in conn.listAllStoragePools() if sp.name() == args.name]\n conn.close()\n else:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n\n if len(_sps) != 0:\n print(\"Storage pool with name [%s] already exists\" % args.name, file=sys.stderr)\n return 1\n\n if args.disk and args.netfshost:\n print(\"--disk and --host option are exclusive\", file=sys.stderr)\n return 1\n\n if not args.disk and not args.netfshost:\n print(\"Either --disk or --host must be specified.\", file=sys.stderr)\n return 1\n\n if args.netfshost and not args.path:\n print(\"Must specify the remote resource path with the --path option\", file=sys.stderr)\n return 1\n\n _pool_name = args.name\n if args.disk:\n return oci_utils.kvm.virt.create_fs_pool(args.disk, _pool_name)\n if args.netfshost:\n return oci_utils.kvm.virt.create_netfs_pool(args.netfshost, args.path, _pool_name)", "def get_default_pool(con):\n try:\n return con.floating_ip_pool_read(fq_name=conf.get('default_pool', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find pool.')\n return None", "def get_backend(self, name):\n if name == DATABASE_TYPE_MYSQL:\n ret = 2\n elif name == DATABASE_TYPE_POSTGRESQL:\n ret = 3\n elif name == DATABASE_TYPE_SQLITE:\n ret = 4\n # sqlcoder: this assignment fixes unicode problems for me with sqlite (windows, cp1252)\n # feel free to remove or improve this if you understand the problems\n # better than me (not hard!)\n Charset.not_needed1, Charset.not_needed2, Charset.not_needed3 = True, True, True\n else:\n raise ValueError('Unsupported database backend: %s' % self.supported_databases[name].db_server)\n\n return ret", "def load_backend(name, options=None):\n if name is None:\n assert options is None\n return get_default()\n if options is None:\n options = {}\n if name not in _backends:\n raise UnknownBackend(name)\n try:\n res = _backends[name]()(**options)\n except Exception as e:\n raise LoadingError(name) from e\n return res", "def get_plugin_loader(name):\n try:\n mgr = stevedore.DriverManager(namespace=PLUGIN_NAMESPACE,\n invoke_on_load=True,\n name=name)\n except RuntimeError:\n raise exceptions.NoMatchingPlugin(name)\n\n return mgr.driver", "def for_provider(provider_name):\n try:\n cls = _instance.providers_cls[provider_name]\n return StorageBuilder(provider_name, cls)\n except KeyError:\n raise ValueError('No provider implementation registered for name: %s' % provider_name)", "def dataset_by_name(name):\n return _datasets[name.lower()]", "def get_database(conn, name):\n\n if conn.hasDatabase(name) is False:\n return conn.createDatabase(name)\n\n return conn[name]", "def create_device_pool(pool_name, project_arn):\n\n new_device_pool = device_farm.create_device_pool(\n projectArn=project_arn,\n name=pool_name,\n description='it is edX device pool',\n maxDevices=1,\n rules=[\n {\n \"attribute\": \"PLATFORM\",\n \"operator\": \"EQUALS\",\n \"value\": '\"ANDROID\"'\n },\n {\n \"attribute\": \"OS_VERSION\",\n \"operator\": \"GREATER_THAN_OR_EQUALS\",\n \"value\": '\"9\"'\n },\n {\n \"attribute\": \"MANUFACTURER\",\n \"operator\": \"EQUALS\",\n \"value\": '\"Google\"'\n },\n {\n \"attribute\": \"AVAILABILITY\",\n \"operator\": \"EQUALS\",\n \"value\": '\"HIGHLY_AVAILABLE\"'\n },\n {\n \"attribute\": \"FLEET_TYPE\",\n \"operator\": \"EQUALS\",\n \"value\": '\"PUBLIC\"'\n }\n ]\n )\n if new_device_pool is not None:\n new_pool_name = new_device_pool['devicePool']['name']\n new_pool_arn = new_device_pool['devicePool']['arn']\n print('{} is created successfully'.format(pool_name))\n return new_pool_arn\n else:\n print('Problem creating {} device pool'.format(project_name))", "def driver(self):\n return self.rpc.call(MsfRpcMethod.DbDriver, [{}])['driver']", "def get_pool_id(pool_name, host=None):\n cmd = utils.XMS_CLI_HEADER + \"-f json pool list\"\n print cmd\n ret = utils.execute_cmd_in_host(cmd, host)\n if ret[2] != 0 or isinstance(ret[0], dict):\n print \"[Error] Failed to get pool info. Error message: [{err}]\".format(err=ret[1])\n return -1\n try:\n pool_info = json.loads(ret[0])\n pools = pool_info[\"pools\"]\n for p in pools:\n if pool_name == p[\"name\"]:\n return p[\"id\"]\n except Exception as e:\n print \"[Error] error message is: \" + e.message\n return -1", "def get_global_storage(self, name: str) -> Any:\n return self.global_storage[name]", "async def _get_work_pool_id_from_name(\n self, session: AsyncSession, work_pool_name: str\n ) -> UUID:\n work_pool = await models.workers.read_work_pool_by_name(\n session=session,\n work_pool_name=work_pool_name,\n )\n if not work_pool:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f'Worker pool \"{work_pool_name}\" not found.',\n )\n\n return work_pool.id", "def get_storage(path=None, options=None):\n path = path or settings.STORAGE\n option = options or {}\n options = options or settings.STORAGE_OPTIONS\n if not path:\n raise ImproperlyConfigured('You must specify a storage class using '\n 'DBBACKUP_STORAGE settings.')\n storage_module = import_module(path)\n return storage_module.Storage(**options)", "def _determine_resource_pool(session, vm_):\n resource_pool = \"\"\n if \"resource_pool\" in vm_.keys():\n resource_pool = _get_pool(vm_[\"resource_pool\"], session)\n else:\n pool = session.xenapi.pool.get_all()\n if not pool:\n resource_pool = None\n else:\n first_pool = session.xenapi.pool.get_all()[0]\n resource_pool = first_pool\n pool_record = session.xenapi.pool.get_record(resource_pool)\n log.debug(\"resource pool: %s\", pool_record[\"name_label\"])\n return resource_pool", "def driver_load(self, name):\r\n return AbstractServiceManager.service_load(self, name)", "def backend_name(self) -> str:\n return self._db_data.backend", "def pool(self):\n return self._properties.get('pool')", "def get_dbapi_module(name):\n return import_module(name)", "def pool_create(self, pool_name):\n self.core.api.os.shell.cmd('{0} add apppool /name:\"{1}\"'.format(\n self.APP_CMD, pool_name\n ))", "def get_db(db_name):\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db", "def test_volume_service_pool(self):\n self.assertEqual(\n ChangeStateScript()._deployer._volume_service._pool,\n StoragePool(reactor, b\"flocker\", FilePath(b\"/flocker\")))", "def _get_pools():\n conn = libvirt.open(None)\n try:\n _spsfs = list()\n _spsnetfs = list()\n if conn:\n # file system pool\n _spsfs = conn.listAllStoragePools(flags=128)\n # nfs pool\n _spsnetfs = conn.listAllStoragePools(flags=256)\n else:\n _logger.error('Failed to contact hypervisor')\n raise ValueError('Failed to contact hypervisor.')\n except libvirt.libvirtError as e:\n _logger.error('Failed to collect vm pool data: %s', str(e))\n raise ValueError('Failed to collect vm pool data.') from e\n finally:\n conn.close()\n return _spsfs, _spsnetfs", "def _load_driver(backend, **kargs):\n bk_module = importlib.import_module('backend', __package__)\n driver_cls = getattr(bk_module, str.capitalize(backend) + 'Backend')\n return driver_cls(**kargs)", "def generate_from_pool(pool_path):\n rules = registry_level2_only()\n pool = combine_pools(t_path(pool_path))\n asns = generate(pool, rules)\n return asns", "def get_db(db_name):\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db", "def storage_backend_get_by_id(context, id, inactive=False):\n return _find_storage_backend(context, dict(id = id), True, None, inactive=inactive)", "def fusion_api_get_storage_pools(self, uri=None, param='', api=None, headers=None):\n return self.pool.get(uri=uri, api=api, headers=headers, param=param)", "def module_name(self):\n return \"Storage\"", "def get_store(store_name: str):\n return store_handler.get_store(store_name)", "def get_driver(url='neo4j', neo4j_auth='neo4j/neo4j'):\n from neo4j import GraphDatabase\n\n auth_parts = neo4j_auth.split('/')\n if len(auth_parts) == 2:\n driver = GraphDatabase.driver('bolt://' + url + ':7687',\n auth=(auth_parts[0], auth_parts[1]))\n else:\n driver = GraphDatabase.driver('bolt://' + url + ':7687')\n\n return driver", "def storage_pool_create(context, values):\n if not values.get('id'):\n values['id'] = uuidutils.generate_uuid()\n\n storage_pool_ref = models.StoragePool()\n storage_pool_ref.update(values)\n\n session = get_session()\n with session.begin():\n session.add(storage_pool_ref)\n\n return _storage_pool_get(context,\n storage_pool_ref['id'],\n session=session)", "def NodePoolName(name: str) -> str:\n # GKE (or k8s?) requires nodepools use alphanumerics and hyphens\n # AKS requires full alphanumeric\n # PKB likes to use underscores strip them out.\n return name.replace('_', '')", "def getStorageObject(implementation, the_element):\n module=__import__(implementation)\n for i in implementation.split(\".\")[1:]:\n module = getattr(module, i)\n if module:\n cls=None\n for key in module.__dict__.keys():\n import inspect\n if inspect.isclass(getattr(module, key)) and inspect.getclasstree([getattr(module, key)], True)[0][0] == Storage:\n cls=getattr(module, key)\n break\n if cls:\n try:\n inst=object.__new__(cls)\n Storage.log.debug(\"class is %s\" %(cls))\n inst.__init__(element=the_element)\n connname=inst.getConnectionName()\n if not StorageConnections.has_key(connname):\n Storage.log.debug(\"Creating new storage connection %s %s\" %(connname, StorageConnections.keys()))\n StorageConnections[connname]=inst\n return inst\n else:\n Storage.log.debug(\"Returning already established storage connection %s\" %(connname))\n return StorageConnections[connname]\n except:\n import traceback\n traceback.print_exc()\n raise IncompatibleObjectException(cls, Storage)\n else:\n raise IncompatibleObjectException(getattr(module, key), Storage)\n else:\n raise ModuleNotFoundException(implementation)", "def getFileSpaceByName(fs_name):\n result = None\n session = Queries.createSession()\n try:\n result = session.execute(sqlalchemy.select([FileSpace]).where(FileSpace.storage_name == fs_name)).fetchone()\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return result", "def get_data_provider_by_name(name, train_params):\n if name == 'UCF101':\n return DataProvider(**train_params)\n if name == 'MERL':\n return DataProvider(**train_params)\n if name == 'KTH':\n return DataProvider(**train_params)\n else:\n print(\"Sorry, data provider for `%s` dataset \"\n \"was not implemented yet\" % name)\n exit()", "def get_cls(dataset_name):\n return find_dataset_using_name(dataset_name)", "def get_pool_info(_ns, pool, human_friendly):\n size = size2str(pool.TotalManagedSpace, human_friendly)\n return (pool.InstanceID,\n pool.ElementName,\n pool.ElementName,\n size,\n \"volume group (LVM)\")", "def get_storage(self, schema, storage, path, params=None):\n return self.storages[storage](schema, path, params)", "def setup_device_pool(project_arn, device_pool_name):\n\n target_device_pool_arn = ''\n is_device_pool_exists = False\n for device_pool in device_farm.list_device_pools(arn=project_arn)[\n 'devicePools']:\n pool_name = device_pool['name']\n if pool_name == device_pool_name:\n print('{} already exists'.format(pool_name))\n target_device_pool_arn = device_pool['arn']\n is_device_pool_exists = True\n break\n else:\n is_device_pool_exists = False\n\n if not is_device_pool_exists:\n target_device_pool_arn = create_device_pool(\n device_pool_name, project_arn)\n\n return target_device_pool_arn\n\n raise KeyError('Problem finding device pool %r' % device_pool_name)", "def get_backend_class(backend):\n # NOTE(sirp): avoiding circular import\n from glance.store.http import HTTPBackend\n from glance.store.s3 import S3Backend\n from glance.store.swift import SwiftBackend\n from glance.store.filesystem import FilesystemBackend\n\n BACKENDS = {\n \"file\": FilesystemBackend,\n \"http\": HTTPBackend,\n \"https\": HTTPBackend,\n \"swift\": SwiftBackend,\n \"s3\": S3Backend}\n\n try:\n return BACKENDS[backend]\n except KeyError:\n raise UnsupportedBackend(\"No backend found for '%s'\" % backend)", "def _is_file_pool(self, pool_name):\n\t\ttry:\n\t\t\treturn self.storage_pools[pool_name].type in ('dir', 'fs', 'netfs')\n\t\texcept KeyError:\n\t\t\treturn False", "def _get_backend_module(name):\n if name == \"numpy\":\n import numpy as np\n\n return np\n if name == \"numpy.ma\":\n import numpy as np\n\n return np.ma\n if name == \"torch\":\n import torch\n\n return torch\n if name == \"jax\":\n import jax\n import jax.numpy as jnp\n\n _JAX_KEY = jax.random.PRNGKey(0)\n return jnp\n if name == \"tensorflow\":\n import tensorflow as tf\n\n return tf", "def _getDriver(self):\n if not hasattr(self, '_driver'):\n with self._getDatasetLock:\n if not self.dataset or not self.dataset.GetDriver():\n self._driver = None\n else:\n self._driver = self.dataset.GetDriver().ShortName\n return self._driver", "def new_driver(name=\"chrome\"):\n if not name in DRIVERS:\n raise Exception(\"No driver support for '%s'\" % name)\n return DRIVERS[name]()", "def show_pool(self, pool, **_params):\r\n return self.get(self.pool_path % (pool), params=_params)", "def guid_fast_impl(pool):\n with open(f'/proc/spl/kstat/zfs/{pool}/guid') as f:\n return f.read().strip()", "def get_driver(self, **kwargs) -> Driver:\n from squirrel.framework.plugins.plugin_manager import squirrel_plugin_manager\n\n plugins: list[list[type[Driver]]] = squirrel_plugin_manager.hook.squirrel_drivers()\n for plugin in plugins:\n for driver_cls in plugin:\n if driver_cls.name == self.driver_name:\n # Problem: If users provide \"storage_options\" in the `kwargs` and the `self.driver_kwargs`\n # already defines \"storage_options\", then vanilla dict merging\n # (i.e., {**self.driver_kwargs, **kwargs}) will overwrite the \"storage_options\" in\n # `self.driver_kwargs` entirely. This is undesired, since important information like\n # bucket configurations (e.g., \"requester_pays\") may be stored in the `self.driver_kwargs`\n # \"storage_options\", which users don't want to provide again using `kwargs`.\n # Solution: The below mechanism merges the \"storage_options\" in `kwargs` with the existing\n # \"storage_options\" in `self.driver_kwargs` (while the newly passed \"storage_options\"\n # in `kwargs` take precendence).\n kwargs[\"storage_options\"] = {\n **self.driver_kwargs.get(\"storage_options\", {}),\n **kwargs.get(\"storage_options\", {}),\n }\n return driver_cls(catalog=self._catalog, **{**self.driver_kwargs, **kwargs})\n\n raise ValueError(f\"driver {self.driver_name} not found\")", "def get_engine(self, db_name):\n pass", "def get_database(self, instance, name):\n return instance.get_database(name)", "def get_storage_engine(settings=None):\n if not settings:\n settings = global_settings\n\n return _setup_engine(settings.STORAGE[\"engine\"], settings.STORAGE[\"params\"])", "def _available_space( self, pool_name ):\n\t\ttry:\n\t\t\treturn self.storage_pools[pool_name].available\n\t\texcept KeyError:\n\t\t\treturn -1", "def pool_selected( self, object ):\n\t\tud.debug( ud.ADMIN, ud.INFO, 'UVMM.DW.ps(node_uri=%s)' % self.node_uri)\n\t\tpool_name = object.options.get('pool-name')\n\t\tif not pool_name:\n\t\t\tpool_name = object.options['pool-name'] = 'default'\n\t\tdrive_type = object.options['drive-type']\n\t\ttry:\n\t\t\tif drive_type == 'cdrom':\n\t\t\t\tvols = self.uvmm.storage_pool_volumes(self.node_uri, pool_name, 'cdrom')\n\t\t\telse:\n\t\t\t\tvols = self.uvmm.storage_pool_volumes(self.node_uri, pool_name, 'disk' )\n\t\texcept uvmmd.UvmmError, e:\n\t\t\tvols = ()\n\t\tud.debug(ud.ADMIN, ud.INFO, 'UVMM.DW.ps: volumes=%s' % map(str, vols))\n\t\tchoices = []\n\t\tfor vol in vols:\n\t\t\tbasename = os.path.basename( vol.source )\n\t\t\tif '.' in basename:\n\t\t\t\tsuffix = basename[ basename.rfind( '.' ) + 1 : ]\n\t\t\t\tif suffix in ( 'xml', 'snapshot' ):\n\t\t\t\t\tcontinue\n\t\t\tchoices.append( basename )\n\t\tchoices.sort()\n\t\tself.image_syntax.update_choices( choices )\n\n\t\t# recreate pool button\n\t\tbtn = self._create_pool_select_button( object.options )\n\t\tself[DriveWizard.PAGE_OLD].options[0] = btn\n\t\tself[DriveWizard.PAGE_NEW].options[0] = btn\n\t\t# recreate driver-type button\n\t\titems = [self[DriveWizard.PAGE_NEW].options[2].id(), self[DriveWizard.PAGE_NEW].options[3].id()]\n\t\tbtn = self._create_type_select_button(object.options, items)\n\t\tself[DriveWizard.PAGE_NEW].options[1] = btn\n\n\t\tif drive_type == 'disk':\n\t\t\tself[DriveWizard.PAGE_OLD].hint = None\n\t\telif drive_type in ( 'cdrom', 'floppy' ):\n\t\t\tif self.image_syntax._choices:\n\t\t\t\tmsg = _( \"If the required image is not found it might be added by copying the file into the storage pool, e.g. to /var/lib/libvirt/images/ which is the directory of the storage pool <i>local directory</i>. After that go to the previous page and return to this one. The image should now be listed.\" )\n\t\t\telse:\n\t\t\t\tmsg = _( \"The list of available images is empty! To add an image the file needs to be copied into the storage pool, e.g. to /var/lib/libvirt/images/ which is the directory of the storage pool <i>local directory</i>. After that go to the previous page and return to this one. The image should now be listed.\" )\n\t\t\tself[DriveWizard.PAGE_OLD].hint = msg\n\t\t\tself[DriveWizard.PAGE_OLD].description = ''\n\t\telse:\n\t\t\traise ValueError('Invalid drive-type \"%s\"' % drive_type)\n\n\t\treturn self.type_selected(object)", "def driver_from_extension(file_extension: str) -> str:\n file_extension = file_extension.lstrip(\".\")\n all_drivers_extensions = {}\n for v in drivers:\n driver = v.load()\n try:\n driver_extensions = driver.METADATA.get(\"file_extensions\", []).copy()\n all_drivers_extensions[driver.METADATA[\"driver_name\"]] = driver_extensions\n if driver_extensions and file_extension in driver_extensions:\n return driver.METADATA[\"driver_name\"]\n except AttributeError: # pragma: no cover\n pass\n else:\n raise ValueError(\n f\"driver name for file extension {file_extension} could not be found: {all_drivers_extensions}\"\n )", "def _get_db(self, db_name: str) -> shelve.DbfilenameShelf:\n db_path = os.path.join(self.cache_folder, db_name)\n db = shelve.open(db_path)\n logging.info(f'Opened cache file {db_path!r}')\n return db", "def driver_name(self):\n return self._driver_name", "def get_db(db_label):\n defaults = get_defaults()\n db_name = defaults[db_label]\n m = re.match('(\\w+)://.*?/([\\w.]+)', db_name)\n if m is None:\n logger.error(\"Poorly formed db name: %s\" % db_name)\n return\n sqltype = m.groups()[0]\n return DatabaseManager(db_name, sqltype=sqltype, label=db_label)", "def remove_pool(ctx, pool_name):\n \n entryFound = False\n table = \"NAT_POOL\"\n key = pool_name\n\n if len(pool_name) > 32:\n ctx.fail(\"Invalid pool name. Maximum allowed pool name is 32 characters !!\")\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n data = config_db.get_entry(table, key)\n if not data:\n click.echo(\"Trying to delete pool, which is not present.\")\n entryFound = True\n\n binding_dict = config_db.get_table('NAT_BINDINGS')\n if binding_dict and entryFound == False: \n for binding_name, binding_values in binding_dict.items():\n if binding_values['nat_pool'] == pool_name:\n click.echo(\"Pool is not removed, as it is mapped to Binding {}, remove the pool binding first !!\".format(binding_name))\n entryFound = True\n break\n\n if entryFound == False:\n config_db.set_entry(table, key, None)", "def connect_to_database(name, settings):\n if name == 'mongodb':\n import models.mongodb\n database = models.mongodb.Database(settings)\n if database.check_connection():\n return database\n else:\n name = 'memory'\n\n if name == 'memory':\n import models.memory\n database = models.memory.Database(settings)\n return database\n\n raise ValueError('Unknown database.')", "def get_storage(local_path=None, redis_index=None):\n from config import STORAGE\n if STORAGE[\"Method\"] == \"local\":\n return LocalStorage(path=local_path or STORAGE.get(\"LocalPath\"))\n elif STORAGE[\"Method\"] == \"redis\":\n return RedisStorage(\n index=redis_index or STORAGE.get(\"RedisIndex\"),\n redis_url=STORAGE.get(\"RedisURL\")\n )\n else:\n raise ValueError(\"Invalid storage method\")" ]
[ "0.70222354", "0.6526476", "0.6414902", "0.63886523", "0.6257161", "0.6163656", "0.6162145", "0.61483705", "0.61444455", "0.61236185", "0.5998005", "0.59904814", "0.5983413", "0.58813095", "0.58759", "0.58657277", "0.58505595", "0.58058536", "0.5719337", "0.56997657", "0.56792563", "0.5666139", "0.56340885", "0.5611636", "0.55644166", "0.555749", "0.5508825", "0.5497287", "0.5496863", "0.547289", "0.5471842", "0.5391433", "0.5359493", "0.5345725", "0.5338422", "0.53207314", "0.5320055", "0.53155357", "0.5294707", "0.5294642", "0.52810365", "0.52801204", "0.52649564", "0.5235591", "0.5229767", "0.52285594", "0.51844317", "0.5164509", "0.5156784", "0.5155248", "0.5151746", "0.51439506", "0.5140495", "0.510892", "0.50758827", "0.5068999", "0.5061453", "0.505992", "0.50401", "0.5033541", "0.50200003", "0.5016477", "0.5012898", "0.50099266", "0.5004988", "0.49982432", "0.49973035", "0.4996877", "0.49907854", "0.49828818", "0.49792805", "0.49759588", "0.49559948", "0.49482378", "0.49468186", "0.4932647", "0.49258724", "0.49207345", "0.4918449", "0.49175435", "0.49116543", "0.4910319", "0.49003938", "0.48794284", "0.48766413", "0.48568958", "0.48491547", "0.4848468", "0.48484373", "0.48393098", "0.48369232", "0.48323995", "0.482976", "0.48282355", "0.4828069", "0.4827514", "0.4819293", "0.48182985", "0.48140666", "0.48140514" ]
0.548844
29
Get the ID for the pool assigned to the given queue.
def _pool_id(self, queue, project=None): return self._catalogue_ctrl.get(project, queue)['pool']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _get_work_pool_queue_id_from_name(\n self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str\n ) -> UUID:\n work_pool_queue = await models.workers.read_work_pool_queue_by_name(\n session=session,\n work_pool_name=work_pool_name,\n work_pool_queue_name=work_pool_queue_name,\n )\n if not work_pool_queue:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Worker queue '{work_pool_name}/{work_pool_queue_name}' not found.\",\n )\n\n return work_pool_queue.id", "def lookup(self, queue, project=None):\n\n try:\n pool_id = self._pool_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n return self.get_default_pool(use_listing=False)\n\n return self.get_driver(pool_id)", "def QueueId(self):\n\t\treturn self._get_attribute('queueId')", "def get_id(self, name, tenant=None):\n queue = self._get(name, tenant, fields=[\"_id\"])\n return queue.get(\"_id\")", "def pool_id ( self ):\n return self._pool_id", "def get_pool_id(pool_name, host=None):\n cmd = utils.XMS_CLI_HEADER + \"-f json pool list\"\n print cmd\n ret = utils.execute_cmd_in_host(cmd, host)\n if ret[2] != 0 or isinstance(ret[0], dict):\n print \"[Error] Failed to get pool info. Error message: [{err}]\".format(err=ret[1])\n return -1\n try:\n pool_info = json.loads(ret[0])\n pools = pool_info[\"pools\"]\n for p in pools:\n if pool_name == p[\"name\"]:\n return p[\"id\"]\n except Exception as e:\n print \"[Error] error message is: \" + e.message\n return -1", "def get_pid(self, pid):\n for p in self._queue:\n if p.id == pid:\n return p\n else: return 0", "def get_worker_id_queue():\n global _WORKER_ID_QUEUE\n if _WORKER_ID_QUEUE is None:\n _WORKER_ID_QUEUE = multiprocessing.Queue()\n return _WORKER_ID_QUEUE", "def _get_id(self) -> int:\n if len(self._id_pool) == 0:\n raise ArchonError(\"No ids reamining in the pool!\")\n return self._id_pool.pop()", "def get_queue_number(self):\n outstring = self.dut.send_expect(\"stop\", \"testpmd> \")\n time.sleep(2)\n result_scanner = r\"Forward Stats for RX Port= %s/Queue=\\s?([0-9]+)\" % self.dut_ports[0]\n scanner = re.compile(result_scanner, re.DOTALL)\n m = scanner.search(outstring)\n queue_id = m.group(1)\n print \"queue is %s\" % queue_id\n self.dut.send_expect(\"start\", \"testpmd> \")\n return queue_id", "def resource_pool_id(self) -> str:\n return pulumi.get(self, \"resource_pool_id\")", "def identity_pool_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"identity_pool_id\")", "def identity_pool_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_pool_id\")", "def service_bus_queue_endpoint_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def elastic_pool_id(self) -> Optional[str]:\n return pulumi.get(self, \"elastic_pool_id\")", "def _shard_id(self, queue, project=None):\n cache_key = _shard_cache_key(queue, project)\n shard_id = self._cache.get(cache_key)\n\n if shard_id is None:\n shard_id = self._catalogue_ctrl.get(project, queue)['shard']\n\n if not self._cache.set(cache_key, shard_id, _SHARD_CACHE_TTL):\n LOG.warn('Failed to cache shard ID')\n\n return shard_id", "def identity_pool_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"identity_pool_id\")", "def service_bus_queue_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def service_bus_queue_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def get_queue(self, task_name):\n for name, queue in self.queues.items():\n if task_name in queue:\n return name\n return self.default_queue", "async def _get_default_work_pool_queue_id_from_work_pool_name(\n self, session: AsyncSession, work_pool_name: str\n ):\n work_pool = await models.workers.read_work_pool_by_name(\n session=session,\n work_pool_name=work_pool_name,\n )\n if not work_pool:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f'Worker pool \"{work_pool_name}\" not found.',\n )\n\n return work_pool.default_queue_id", "async def _get_work_pool_id_from_name(\n self, session: AsyncSession, work_pool_name: str\n ) -> UUID:\n work_pool = await models.workers.read_work_pool_by_name(\n session=session,\n work_pool_name=work_pool_name,\n )\n if not work_pool:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f'Worker pool \"{work_pool_name}\" not found.',\n )\n\n return work_pool.id", "def get_queue_name(self):\n return self._graph_executor.get_queue_name()", "def __getNewIPpoolID(self):\n return db_main.getHandle().seqNextVal(\"ippool_id_seq\")", "def source_ipam_pool_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"source_ipam_pool_id\")", "def pop(self, pid):\n for p in self._queue:\n if p.id == pid:\n return self._queue.pop(self._queue.index(p)).id\n return 0", "def get_rabbit_queue():\n\n return \"metrics_queue\"", "def lookup(self, queue, project=None):\n\n try:\n shard_id = self._shard_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n # NOTE(kgriffs): Return `None`, rather than letting the\n # exception bubble up, so that the higher layer doesn't\n # have to duplicate the try..except..log code all over\n # the place.\n return None\n\n return self.get_driver(shard_id)", "def get_queue_num(self, qos_id, queue_id):\n\n q_num = None\n queues = self.qos_dict[qos_id][\"ovsdb:qos-entries\"][0][\"queue-list\"]\n\n # Go through all queues\n for queue in queues:\n cur_queue_id = queue[\"queue-ref\"].split(\"'\")[-2]\n # If we have a match, get the q_num and break\n if cur_queue_id == queue_id:\n q_num = queue[\"queue-number\"]\n break\n\n # queue_id is not found in the qos\n if q_num is None:\n #print(json.dumps(self.qos_dict[qos_id], indent=3))\n raise KeyError\n\n return q_num", "def get_id(self):\n\n self.redis.setnx('job_id', '-1')\n return self.redis.incr('job_id')", "def task_id(self) -> str:\n return self.get_from_redis(\"task_id\")", "def queue_name(is_parallel):\n return QUEUE_NAMES[int(bool(is_parallel))]", "def get_queue(queue_name=\"\"):\n print(get_qstat_arg(queue_name))\n q = subprocess.Popen(\n _get_qstat_arg(queue_name), stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, stdin=subprocess.PIPE\n )\n o, e = q.communicate()\n\n return o", "def source_ipam_pool_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"source_ipam_pool_id\")", "def source_ipam_pool_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"source_ipam_pool_id\")", "def lookup(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Raise an exception if the queue\n # does not have a mapping (it does not exist).\n\n # TODO(kgriffs): SHARDING - Get ID from the catalog backend\n shard_id = '[insert_id]'\n try:\n shard = self._shards[shard_id]\n except KeyError:\n self._shards[shard_id] = shard = self._init_shard(shard_id)\n\n return shard", "def queue_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"queue_name\")", "def magma_queue_get_device(queue):\n\n return _libmagma.magma_queue_get_device(queue)", "def _get_cache_identifier():\n return '{}:{}'.format(os.getpid(), threading.get_ident())", "def id(self):\n return self.job_proto.id", "async def get_queue(self, ctx: commands.Context) -> Optional[QueueManager]:\n\n return self.queue[ctx.guild.id]", "def getQueueDetails(self, queue_name, project_id=\"\"):\n if project_id == \"\":\n project_id = self.project_id\n url = \"%sprojects/%s/queues/%s?oauth=%s\" % (self.url, project_id,\n queue_name, self.token)\n body = self.__get(url)\n queue = json.loads(body)\n return queue", "def _retrieve_job_id(job_name, res_id):\n active_jobs = celery_inspector.active()\n job_id = _retrieve_task_id(job_name, res_id, active_jobs)\n if not job_id:\n reserved_jobs = celery_inspector.reserved()\n job_id = _retrieve_task_id(job_name, res_id, reserved_jobs)\n if not job_id:\n scheduled_jobs = celery_inspector.scheduled()\n job_id = _retrieve_task_id(job_name, res_id, scheduled_jobs)\n return job_id", "def task_id(self):\n return self._mpis.task_id", "def get_queue_settings(qid):\r\n db = get_db()\r\n rows = query_db(GET_QUEUE_SETTINGS_BY_ID, (qid,))\r\n if (not rows) or (len(rows) == 0):\r\n raise sqlite3.Error('The queue does not exist.')\r\n return rows[0]", "def AllocId(self, pool='default'):\n\n if self.__free_ids:\n idrange = self.__free_ids.pop()\n result = idrange.start\n if idrange.start < idrange.stop:\n self.__free_ids.append(self.IdRange(idrange.start+1, idrange.stop))\n else:\n result = self.__idcounter\n self.__idcounter += 1\n allocated_ranges = self.__idpools.get(pool)\n if allocated_ranges is None:\n allocated_ranges = []\n self.__idpools[pool] = allocated_ranges\n for index, idrange in enumerate(allocated_ranges):\n if result == idrange.start-1:\n idrange = self.IdRange(result, idrange.stop)\n allocated_ranges[index] = idrange\n break\n elif result == idrange.stop+1:\n idrange = self.IdRange(idrange.start, result)\n allocated_ranges[index] = idrange\n break\n else:\n allocated_ranges.append(self.IdRange(result, result))\n return result", "def register(self, queue, project=None, flavor=None):\n\n # NOTE(gengchc): if exist, get queue's pool.flavor:\n # if queue's pool.flavor is different, first delete it and add it.\n # Otherwise, if the flavor in the meteredata of the queue is\n # modified, the catalog will be inconsistent.\n if self._catalogue_ctrl.exists(project, queue):\n catalogue = self._catalogue_ctrl.get(project, queue)\n oldpoolids = catalogue['pool']\n oldpool = self._pools_ctrl.get(oldpoolids)\n oldflavor = oldpool['flavor']\n msgtmpl = _(u'register queue to pool: old flavor: %(oldflavor)s '\n ', new flavor: %(flavor)s')\n LOG.info(msgtmpl,\n {'oldflavor': oldflavor, 'flavor': flavor})\n if oldpool['flavor'] != flavor:\n self._catalogue_ctrl.delete(project, queue)\n\n if not self._catalogue_ctrl.exists(project, queue):\n if flavor is not None:\n flavor = self._flavor_ctrl.get(flavor, project=project)\n pools = self._pools_ctrl.get_pools_by_flavor(\n flavor=flavor,\n detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n msgtmpl = _(u'register queue to pool: new flavor:%(flavor)s')\n LOG.info(msgtmpl,\n {'flavor': flavor.get('name', None)})\n else:\n # NOTE(flaper87): Get pools assigned to the default\n # group `None`. We should consider adding a `default_group`\n # option in the future.\n pools = self._pools_ctrl.get_pools_by_flavor(detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n\n if not pool:\n # NOTE(flaper87): We used to raise NoPoolFound in this\n # case but we've decided to support automatic pool\n # creation. Note that we're now returning and the queue\n # is not being registered in the catalogue. This is done\n # on purpose since no pool exists and the \"dummy\" pool\n # doesn't exist in the storage\n if self.lookup(queue, project) is not None:\n return\n raise errors.NoPoolFound()\n msgtmpl = _(u'register queue to pool: new flavor: None')\n LOG.info(msgtmpl)\n\n msgtmpl = _(u'register queue: project:%(project)s'\n ' queue:%(queue)s pool:%(pool)s')\n LOG.info(msgtmpl,\n {'project': project,\n 'queue': queue,\n 'pool': pool})\n self._catalogue_ctrl.insert(project, queue, pool)", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def get_queue_items(self, queue_name):\n proc = start_proc([\"/usr/bin/sudo\", \"rabbitmqctl\", \"list_queues\"],\n shell=False)\n for line in iter(proc.stdout.readline, \"\"):\n print(\"LIST QUEUES:\" + line)\n m = re.search(r\"%s\\s+([0-9]+)\" % queue_name, line)\n if m:\n return int(m.group(1))\n return None", "def search_queue_number(self, Q_strip):\n if Q_strip is self.PF_Q_strip:\n out = self.dut.send_expect(\"cat config/common_base\", \"]# \", 10)\n pattern = \"(%s=)(\\d*)\" % Q_strip\n else :\n out = self.dut.send_expect(\"cat drivers/net/i40e/i40e_ethdev.c\", \"]# \", 10)\n pattern = \"#define %s\\s*(\\d*)\" % Q_strip\n s = re.compile(pattern)\n res = s.search(out)\n if res is None:\n print utils.RED('Search no queue number.')\n return None\n else:\n if Q_strip is self.VF_Q_strip:\n queue = res.group(1)\n else :\n queue = res.group(2)\n return int(queue)", "def _get_id(self):\n return self.id", "def get_queue(self):\n return self.queue", "def get_queue(self):\n return self.queue", "def id(self) -> ContainerID:\n _args: list[Arg] = []\n _ctx = self._select(\"id\", _args)\n return _ctx.execute_sync(ContainerID)", "def get_pool ( self ):\n if self._poolstack:\n return self._poolstack[-1]\n else:\n return self.get_new_pool ( force=True )", "def pop_queue(self, queue=None):\n if not queue:\n return False\n \n cur = self.conn.cursor()\n cur.execute(\"LOCK TABLE \" + queue + \" IN ACCESS EXCLUSIVE MODE;\")\n\n cur.execute(\"SELECT id FROM \" + queue + \" LIMIT 1;\")\n row = cur.fetchone()\n self.conn.commit()\n \n if row:\n cur.execute(\"DELETE FROM \" + queue + \" WHERE id='\"+str(row[0])+\"';\")\n return row[0]\n else:\n return False", "def get_id(self):\n return self.iid", "def get_queue_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.queue_controller", "def main_url(self) -> pulumi.Output[str]:\n return self.main_queue.id", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def get_current_id(self):\n\n id = self.ids[-1]\n\n if id is None:\n raise KeyError()\n\n return id", "def getid(self):\n return self.__id", "def get_queue(self):\n if self.queue is not None:\n return self.queue\n state = self.get_state()\n self.queue = state.get_queue()\n # print(\"IQ\", self.queue)\n return self.queue", "def get_id(self):\n if hasattr(self, \"_thread_id\"):\n return self._thread_id\n for id, thread in threading._active.items():\n if thread is self:\n return id", "def get_id(self):\n if hasattr(self, \"_thread_id\"):\n return self._thread_id\n for id, thread in threading._active.items():\n if thread is self:\n return id", "def QueueConfigPortNumber(self):\n\t\treturn self._get_attribute('queueConfigPortNumber')", "def get_pool():\n app = get_app()\n return app['pool']", "def current_worker_pool():\n try:\n return worker_thread_data.pool\n except AttributeError:\n return None", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self.__id", "def get_id(self):\n return self.__id", "def _get_job_id(self):\n return uuid.uuid4().hex", "def pool(self):\n return self._properties.get('pool')", "def _get_pool_path( self, pool_name ):\n\t\ttry:\n\t\t\treturn self.storage_pools[pool_name].path\n\t\texcept KeyError:\n\t\t\treturn ''", "def get_id(self):\n for id, thread in threading._active.items(): \n if thread is self: \n return id", "def get_queue(self, window_info, original_kwargs):\n if callable(self.queue):\n return str(self.queue(self, window_info, original_kwargs))\n return str(self.queue) or settings.CELERY_DEFAULT_QUEUE" ]
[ "0.7236343", "0.7009467", "0.6953268", "0.6914265", "0.6697232", "0.66893286", "0.6669339", "0.6644398", "0.65078753", "0.6422109", "0.63314754", "0.63168746", "0.63056153", "0.62965095", "0.6249178", "0.6227463", "0.62212545", "0.61551255", "0.61551255", "0.6059136", "0.60493004", "0.5949522", "0.59359086", "0.5933539", "0.5902253", "0.5891454", "0.58842653", "0.5871895", "0.5837569", "0.58102417", "0.5797481", "0.5749949", "0.5739031", "0.57279515", "0.57279515", "0.56608796", "0.56554854", "0.56530356", "0.55235964", "0.55068743", "0.54945475", "0.5467458", "0.54645586", "0.5463487", "0.54573286", "0.5444123", "0.54434997", "0.54397583", "0.54397583", "0.54397583", "0.54397583", "0.54397583", "0.54397583", "0.54397583", "0.54397583", "0.54397583", "0.54397583", "0.54397583", "0.54397583", "0.54397583", "0.54397583", "0.54397583", "0.5434865", "0.5418718", "0.5415388", "0.53889793", "0.53889793", "0.5378229", "0.5365838", "0.53648645", "0.5345631", "0.53367805", "0.5329483", "0.53267", "0.53267", "0.53267", "0.53267", "0.53267", "0.53267", "0.5323061", "0.5317948", "0.53172094", "0.5316139", "0.5316139", "0.5315452", "0.5306482", "0.5303448", "0.52852607", "0.52852607", "0.52852607", "0.52852607", "0.52852607", "0.52852607", "0.5275218", "0.5275218", "0.52705204", "0.5268838", "0.5266474", "0.52641165", "0.5256418" ]
0.8461307
0
Register a new queue in the pool catalog. This method should be called whenever a new queue is being created, and will create an entry in the pool catalog for the given queue. After using this method to register the queue in the catalog, the caller should call `lookup()` to get a reference to a storage driver which will allow interacting with the queue's assigned backend pool.
def register(self, queue, project=None, flavor=None): # NOTE(gengchc): if exist, get queue's pool.flavor: # if queue's pool.flavor is different, first delete it and add it. # Otherwise, if the flavor in the meteredata of the queue is # modified, the catalog will be inconsistent. if self._catalogue_ctrl.exists(project, queue): catalogue = self._catalogue_ctrl.get(project, queue) oldpoolids = catalogue['pool'] oldpool = self._pools_ctrl.get(oldpoolids) oldflavor = oldpool['flavor'] msgtmpl = _(u'register queue to pool: old flavor: %(oldflavor)s ' ', new flavor: %(flavor)s') LOG.info(msgtmpl, {'oldflavor': oldflavor, 'flavor': flavor}) if oldpool['flavor'] != flavor: self._catalogue_ctrl.delete(project, queue) if not self._catalogue_ctrl.exists(project, queue): if flavor is not None: flavor = self._flavor_ctrl.get(flavor, project=project) pools = self._pools_ctrl.get_pools_by_flavor( flavor=flavor, detailed=True) pool = select.weighted(pools) pool = pool and pool['name'] or None msgtmpl = _(u'register queue to pool: new flavor:%(flavor)s') LOG.info(msgtmpl, {'flavor': flavor.get('name', None)}) else: # NOTE(flaper87): Get pools assigned to the default # group `None`. We should consider adding a `default_group` # option in the future. pools = self._pools_ctrl.get_pools_by_flavor(detailed=True) pool = select.weighted(pools) pool = pool and pool['name'] or None if not pool: # NOTE(flaper87): We used to raise NoPoolFound in this # case but we've decided to support automatic pool # creation. Note that we're now returning and the queue # is not being registered in the catalogue. This is done # on purpose since no pool exists and the "dummy" pool # doesn't exist in the storage if self.lookup(queue, project) is not None: return raise errors.NoPoolFound() msgtmpl = _(u'register queue to pool: new flavor: None') LOG.info(msgtmpl) msgtmpl = _(u'register queue: project:%(project)s' ' queue:%(queue)s pool:%(pool)s') LOG.info(msgtmpl, {'project': project, 'queue': queue, 'pool': pool}) self._catalogue_ctrl.insert(project, queue, pool)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register(self, queue, project=None):\n # NOTE(cpp-cabrera): only register a queue if the entry\n # doesn't exist\n if not self._catalogue_ctrl.exists(project, queue):\n # NOTE(cpp-cabrera): limit=0 implies unlimited - select from\n # all shards\n shard = select.weighted(self._shards_ctrl.list(limit=0))\n\n if not shard:\n raise errors.NoShardFound()\n\n self._catalogue_ctrl.insert(project, queue, shard['name'])", "def add_queue(self, queue):\n\n queue_id = queue[\"ovsdb:queues\"][0][\"queue-id\"]\n self.queue_dict[queue_id] = queue", "def register(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def register_queue(self, queue) -> None:\r\n if queue is None:\r\n raise ValueError('queue is None')\r\n if not hasattr(queue, 'empty'):\r\n raise ValueError(f'queue {queue} is missing empty member')\r\n if not hasattr(queue, 'get_nowait'):\r\n raise ValueError(f'queue {queue} is missing get_nowait member')\r\n self.receive_queues.append(queue)", "def add_queue(self, queue):\n with self.mutex:\n self.queues.append(queue)", "def declare_queue(self, c, queue_name):\n return self._management_req(\n c, 'PUT', ['queues', urlquote(c.vhost, safe=''), queue_name],\n {\"auto_delete\": False, \"durable\": False, \"arguments\": {}})", "def create_queue(self):\n queue_name = self.generate_name()\n try:\n queue = self.sqs.create_queue(QueueName=queue_name)\n except Exception as e:\n raise RuntimeError('SQS could create queue: %s' % e)\n self.queue_name, self.queue = queue_name, queue", "def add_queue(self, queue_name, alt_exchange_name=None, passive=False, durable=False, arguments = None):\n amqp_session = self.__broker.getAmqpSession()\n if arguments == None:\n arguments = {}\n if alt_exchange_name:\n amqp_session.queue_declare(queue_name, alternate_exchange=alt_exchange_name, passive=passive,\n durable=durable, arguments=arguments)\n else:\n amqp_session.queue_declare(queue_name, passive=passive, durable=durable, arguments=arguments)", "def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)", "def lookup(self, queue, project=None):\n\n try:\n pool_id = self._pool_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n return self.get_default_pool(use_listing=False)\n\n return self.get_driver(pool_id)", "def _create_queue(self):\n # Instantiate\n queue = pbs.queue(verbose=not self.quiet)\n\n if self.q == 'ember':\n # Submitting to Utah ember cluster\n ppn = 12\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n walltime = self.walltime if int(self.walltime.split(':')[0]) < 72 else '72:00:00'\n queue.create(label=self.label, nodes=self.nodes, qos=self.qos, umask=self.umask,\n walltime=walltime, ppn=ppn, cpus=cpus, partition='ember', alloc='sdss')\n elif self.q is not None:\n # All other self.q values expected for Portsmouth cluster,\n # sciama. In this case, the number of nodes is queue\n # dependent, and qos is not set\n if self.q == 'sciama1.q':\n ppn = 12\n elif self.q == 'sciama3.q':\n ppn = 20\n else:\n ppn = 16\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n queue.create(label=self.label, nodes=self.nodes, umask=self.umask,\n walltime=self.walltime, queue=self.q, ppn=ppn, cpus=cpus)\n else:\n # self.q can be None when submitting to both the Portsmouth\n # and Utah clusters. In this case, the default queue\n # destination and ppn is correct. qos is also set, but this\n # should only be used when submitting to Utah.\n ppn = 16\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n queue.create(label=self.label, nodes=self.nodes, qos=self.qos, umask=self.umask,\n walltime=self.walltime, ppn=ppn, cpus=cpus)\n\n return queue", "def declare_queue(self, queue_name):\n if queue_name not in self.queues:\n self.emit_before(\"declare_queue\", queue_name)\n self.queues.add(queue_name)\n self.emit_after(\"declare_queue\", queue_name)\n\n delayed_name = dq_name(queue_name)\n self.delay_queues.add(delayed_name)\n self.emit_after(\"declare_delay_queue\", delayed_name)", "def queue(self, name):\n # First create a queue\n queue = self.inbound_channel.declare_queue(name)\n\n # Create the registry for the queue\n registry = Registry(self, queue)\n\n # Prepare consuming queue with registry\n self.inbound_channel.consume(queue=queue, callback=registry)\n\n # Then, return the Registry object.\n return registry", "def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None", "def setup_queue(self, channel, queue_name):\n logger.info('Declaring queue %s', queue_name)\n channel.queue_declare(queue = queue_name, \n durable = True,\n auto_delete = False)", "def add(ctx, wf_name, wf_version, wf_owner):\n jess_url = ctx.obj.get('JT_CONFIG').get('jess_server')\n if wf_owner is None:\n wf_owner = ctx.obj.get('JT_CONFIG').get('jt_account')\n\n url = \"%s/queues/owner/%s/workflow/%s/ver/%s\" % (jess_url, wf_owner, wf_name, wf_version)\n\n r = requests.post(url)\n if r.status_code != 200:\n click.echo('Queue creation for: %s failed: %s' % (wf_owner, r.text))\n else:\n click.echo(\"Queue registration succeeded, details as below\")\n click.echo(r.text)", "async def create_work_pool_queue(\n work_pool_queue: schemas.actions.WorkPoolQueueCreate,\n work_pool_name: str = Path(..., description=\"The work pool name\"),\n worker_lookups: WorkerLookups = Depends(WorkerLookups),\n db: OrionDBInterface = Depends(provide_database_interface),\n) -> schemas.core.WorkPoolQueue:\n\n try:\n async with db.session_context(begin_transaction=True) as session:\n work_pool_id = await worker_lookups._get_work_pool_id_from_name(\n session=session,\n work_pool_name=work_pool_name,\n )\n\n model = await models.workers.create_work_pool_queue(\n session=session,\n work_pool_id=work_pool_id,\n work_pool_queue=work_pool_queue,\n db=db,\n )\n except sa.exc.IntegrityError:\n raise HTTPException(\n status_code=status.HTTP_409_CONFLICT,\n detail=\"A worker with this name already exists.\",\n )\n\n return model", "def _insert_request(self, queue: Queue, catalog: ComponentCatalogMetadata, action: str):\n # Ensure referenced runtime is available\n if not PipelineProcessorRegistry.instance().is_valid_runtime_type(catalog.runtime_type.name):\n return\n\n if self.is_server_process:\n queue.put((catalog, action))\n else:\n manifest: Dict[str, str] = self._load_manifest()\n manifest[catalog.name] = action\n self.update_manifest(manifest=manifest)", "def post_qos_queue_create(self, resource_dict):\n pass", "def queue(self, sid):\r\n return queues.Queue(self, sid)", "def add_queue(queues, host=\"localhost\", port=6379, http_host=\"localhost\",\n http_port=DEFAULT_HTTP_PORT, workers=1,\n unix_domain_socket=None):\n if http_host.startswith('/'):\n # This is an unix socket\n new_http_host = UnixResolver.register_unixsocket(http_host)\n else:\n new_http_host = http_host\n if isinstance(queues, six.string_types):\n Queues.add(Queue([queues], host=host, port=port,\n http_host=new_http_host,\n http_port=http_port, workers=workers,\n unix_domain_socket=unix_domain_socket))\n else:\n Queues.add(Queue(queues, host=host, port=port, http_host=new_http_host,\n http_port=http_port, workers=workers,\n unix_domain_socket=unix_domain_socket))", "def declare_queue(self):\n\n self._channel.queue_declare(queue=self._queue_name, durable=True)\n print(\"Queue declared....\")", "def new_queue() -> Queue:\n return multiprocessing.Queue()", "def add_a_queue(self, size):\n \tself.queues.append(ContextModalityQueue(size))", "def lookup(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Raise an exception if the queue\n # does not have a mapping (it does not exist).\n\n # TODO(kgriffs): SHARDING - Get ID from the catalog backend\n shard_id = '[insert_id]'\n try:\n shard = self._shards[shard_id]\n except KeyError:\n self._shards[shard_id] = shard = self._init_shard(shard_id)\n\n return shard", "def setup_queue(self):\n self.logger.info('declaring queue %s', self.queue)\n if self.otq:\n self._channel.queue_declare(self.on_queue_declareok, self.queue, auto_delete=True)\n else:\n self._channel.queue_declare(self.on_queue_declareok, self.queue)", "def get_rabbit_queue():\n\n return \"metrics_queue\"", "def __init__(__self__, *,\n endpoint_type: pulumi.Input[str],\n queue_name: Optional[pulumi.Input[str]] = None,\n resource_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"endpoint_type\", 'StorageQueue')\n if queue_name is not None:\n pulumi.set(__self__, \"queue_name\", queue_name)\n if resource_id is not None:\n pulumi.set(__self__, \"resource_id\", resource_id)", "def queue(self, queue):\n allowed_values = [\"BLANK\", \"high\", \"low\", \"cancel\"] # noqa: E501\n if queue not in allowed_values:\n raise ValueError(\n \"Invalid value for `queue` ({0}), must be one of {1}\" # noqa: E501\n .format(queue, allowed_values)\n )\n\n self._queue = queue", "async def declare(self) -> 'Queue':\n # we are relying to this in other functions\n self._channel = await self._backend.channel()\n self.log.debug(\"Channel acquired CHANNEL%i\",\n self._channel.channel_number)\n\n if self.exchange:\n await self.declare_exchange()\n\n if self.name is not None:\n await self.declare_queue()\n\n if self.exchange:\n await self.bind_queue()\n\n return self", "async def update_work_pool_queue(\n work_pool_queue: schemas.actions.WorkPoolQueueUpdate,\n work_pool_name: str = Path(..., description=\"The work pool name\"),\n work_pool_queue_name: str = Path(\n ..., description=\"The work pool queue name\", alias=\"name\"\n ),\n worker_lookups: WorkerLookups = Depends(WorkerLookups),\n db: OrionDBInterface = Depends(provide_database_interface),\n):\n\n async with db.session_context(begin_transaction=True) as session:\n work_pool_queue_id = await worker_lookups._get_work_pool_queue_id_from_name(\n work_pool_name=work_pool_name,\n work_pool_queue_name=work_pool_queue_name,\n session=session,\n )\n\n await models.workers.update_work_pool_queue(\n session=session,\n work_pool_queue_id=work_pool_queue_id,\n work_pool_queue=work_pool_queue,\n db=db,\n )", "def publish(self, queue, message):\n\n # Instead of passing a queue to the constructor, the publish checks if\n # the target queue exists. If not, it declares the target queue\n if not self.queue:\n self.channel.queue_declare(queue=queue)\n self.queue = queue\n\n self.channel.basic_publish(\n exchange='', routing_key=queue, body=message)", "def get(queue_name: str, **kwargs) -> Queue:\n return Queue(queue_name, **kwargs)", "def jsonJobInfo_queuePut(self, **kwargs):\n\n str_queue = 'startQueue'\n for k,v in kwargs.items():\n if k == 'queue': str_queue = v\n\n if str_queue == 'startQueue': queue = self.queueStart\n if str_queue == 'endQueue': queue = self.queueEnd\n\n # self.dp.qprint(self.shell.d_job)\n\n queue.put(self.shell.d_job.copy())", "def setup_queue(self, method_frame):\n logger.info('Declaring queue %s', self.queue_name)\n # self._channel.queue_declare(self.on_queue_declareok, queue_name)\n\n self._channel.queue_declare(self.on_queue_declareok, exclusive=False, durable=True, queue=self.queue_name)", "def bind_queue_to_exchange(self, channel, exchange, queue_name, routing_key):\n logger.info('Binding %s to %s with %s',\n exchange, queue_name, routing_key)\n channel.queue_bind(queue=queue_name,\n exchange=exchange,\n routing_key=routing_key)", "def pre_qos_queue_create(self, resource_dict):\n pass", "def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='../../../../../../../../queues/queue/name', caller=self._path() + ['queue'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='../../../../../../../../queues/queue/name', caller=self._path() + ['queue'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='../../../../../../../../queues/queue/name', caller=self._path() + ['queue'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='../../../../../../../../queues/queue/name', caller=self._path() + ['queue'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='../../../../../../../../queues/queue/name', caller=self._path() + ['queue'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='../../../../../../../../queues/queue/name', caller=self._path() + ['queue'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='../../../../../../../../queues/queue/name', caller=self._path() + ['queue'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='../../../../../../../../queues/queue/name', caller=self._path() + ['queue'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='../../../../../../../../queues/queue/name', caller=self._path() + ['queue'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='../../../../../../../../queues/queue/name', caller=self._path() + ['queue'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='../../../../../../../../queues/queue/name', caller=self._path() + ['queue'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='../../../../../../../../queues/queue/name', caller=self._path() + ['queue'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()", "def _listen_queue(self, queue, callback):\n # Listen buy/sell orders from external system\n self._logger.info(f\"Declaring rabbit queue {queue}\")\n self._consumer_rabbit_channel.queue_declare(queue=queue, durable=True, auto_delete=True)\n self._logger.info(f\"Declaring callback to rabbit queue: {queue}, callback: {callback}\")\n self._consumer_rabbit_channel.basic_consume(queue, callback,\n consumer_tag=queue)", "def declare_queue(self, topic):\n #from trove.rpc.impl_kombu import Connection\n from trove.openstack.common.rpc import create_connection\n with create_connection() as conn:\n consumer = conn.declare_topic_consumer(topic=topic)", "def create_queue(queue_name: str,\n durable: bool = True,\n auto_delete: bool = False,\n priorities: int = 0,\n extra_properties: Optional[dict] = None,\n server_url: Optional[str] = None):\n method_arguments: dict = {\n 'type': 'queue',\n 'name': queue_name,\n 'properties': {\n 'durable': durable,\n 'auto-delete': auto_delete,\n 'qpid.priorities': priorities\n }\n }\n\n if extra_properties:\n method_arguments['properties'].update(extra_properties)\n\n rpc = RemoteProcedure(handle_QMF2_exception,\n 'qmf.default.direct', server_url)\n create_queue_message = create_QMF2_method_invoke(\n get_broker_id(server_url),\n 'create', method_arguments)\n rpc.call(create_queue_message, timedelta(seconds=5))", "def load_queue(self, queue=None):\n if not queue:\n return False\n elif queue == \"ready_queue\":\n table = \"tangerine\"\n condition = \" WHERE state='ready';\"\n elif queue == \"job_queue\":\n table = \"jobs\"\n condition = \"\"\n else:\n table = \"tangerine\"\n condition = \"\"\n \n cur = self.conn.cursor()\n cur.execute(\"LOCK TABLE \" + queue + \" IN ACCESS EXCLUSIVE MODE;\")\n \n cur.execute(\"SELECT COUNT(id) FROM \" + queue + \";\")\n\n # if the queue still has tasks return nothing\n if cur.fetchone()[0]:\n self.conn.rollback()\n else:\n cur.execute(\"SELECT COUNT(id) FROM \" + table + condition + \";\")\n\n # If the task table is empty return nothing\n if not cur.fetchone()[0]:\n self.conn.commit()\n return\n \n cur.execute(\"SELECT id FROM \" + table + condition + \";\")\n ids = (\"(\" + str(id[0]) + \")\" for id in cur.fetchall())\n cur.execute(\"INSERT INTO \" + queue + \" VALUES \" + \", \".join(ids) + \";\")\n self.conn.commit()", "def create_queue(self, queue_name, visibility_timeout=None):\r\n params = {'QueueName': queue_name}\r\n if visibility_timeout:\r\n params['DefaultVisibilityTimeout'] = '%d' % (visibility_timeout,)\r\n return self.get_object('CreateQueue', params, Queue)", "def __init__(self, queue_name, task_class, namespace='redisqueue'):\n\n self.__db = None\n self.connected = False\n self.name = queue_name\n self.namespace = namespace\n self.task_class = task_class\n self.logger = logging.getLogger(self.__class__.__name__)\n self._key = '%s:%s' % (namespace, queue_name)\n self._lock_key = '%s:%s:lock' % (namespace, queue_name)\n\n self.logger.debug(\n \"Initializing Queue [name: {queue_name}, namespace: {namespace}]\".\n format(queue_name=queue_name, namespace=namespace))", "def _set_queue_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue_type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\"\"\",\n })\n\n self.__queue_type = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue_type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\"\"\",\n })\n\n self.__queue_type = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue_type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=True)\"\"\",\n })\n\n self.__queue_type = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue_type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\"\"\",\n })\n\n self.__queue_type = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue_type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\"\"\",\n })\n\n self.__queue_type = t\n if hasattr(self, '_set'):\n self._set()", "def _set_queue_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue_type must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-qos:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:DROP_TAIL': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'oc-qos-types:WRED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}, u'RED': {'@namespace': u'http://openconfig.net/yang/qos-types', '@module': u'openconfig-qos-types'}},), is_leaf=True, yang_name=\"queue-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='identityref', is_config=False)\"\"\",\n })\n\n self.__queue_type = t\n if hasattr(self, '_set'):\n self._set()", "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def declare(self):\n self.channel.queue_declare(queue='files_to_database')", "def store_queue_for_restart(queue):\n if TEST_MODE:\n return queue.__dict__\n if not queue.currentM:\n logger.error('Message was not found in queue for restart daemon.')\n return None\n return {\n 'conn_region': queue.conn.region.name,\n 'queue_name': queue.q.name,\n 'body': queue.currentM.get_body(),\n 'attributes': queue.currentM.attributes,\n 'md5_message_attributes': queue.currentM.md5_message_attributes,\n 'message_attributes': queue.currentM.message_attributes,\n 'receipt_handle': queue.currentM.receipt_handle,\n 'id': queue.currentM.id,\n 'md5': queue.currentM.md5\n }", "def shopify_create_product_queue(self, instance, created_by='import'):\n #Added created_by field which is used to identify the queue is created from which process import or webhook : Dipak Gogiya\n product_queue_vals = {\n 'shopify_instance_id':instance and instance.id or False,\n 'state':'draft',\n 'created_by': created_by\n }\n product_queue_data_id = self.create(product_queue_vals)\n\n return product_queue_data_id", "def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"name\",yc_queue_openconfig_qos__qos_queues_queue, yang_name=\"queue\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"name\",yc_queue_openconfig_qos__qos_queues_queue, yang_name=\"queue\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()", "def query_queue(self, queue_name, alt_exchange_name=None):\n return self._query(queue_name, \"queue\", \"org.apache.qpid.broker\", alt_exchange_name)", "def store(self, queue: Queue):\n\n validate = RegisterValidator(self.request).register()\n if validate.check():\n validate.check_exists()\n\n if not validate.check():\n self.request.session.flash('validation', json.dumps(validate.errors()))\n return self.request.redirect_to('register')\n\n # register the user\n password = bcrypt_password(self.request.input('password'))\n\n auth.AUTH['model'].create(\n name=self.request.input('username'),\n password=password,\n email=self.request.input('email'),\n )\n\n # login the user\n # redirect to the homepage\n if Auth(self.request).login(self.request.input(auth.AUTH['model'].__auth__), self.request.input('password')):\n queue.push(WelcomeEmailJob, args=[self.request.input('email')])\n return self.request.redirect('/home')\n\n return self.request.redirect('/register')", "def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"name\",yc_queue_openconfig_qos_interfaces__qos_queues_queue, yang_name=\"queue\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"name\",yc_queue_openconfig_qos_interfaces__qos_queues_queue, yang_name=\"queue\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()", "def support_queue(self, queue_id):\r\n return support_queues.SupportQueue(self, queue_id)", "def create_qos_queue(self, body=None):\r\n return self.post(self.qos_queues_path, body=body)", "def enter_queue(self, name=None):\r\n if(name):\r\n self.log.debug(\"ENTERING queue: (%s)\" % (name))\r\n self._queues[name].acquire()\r\n self.log.debug(\"SUCCESS ENTERING queue: (%s)\" % (name))", "def create_queue(self, queue_name='', exclusive=True, queue_size=10,\n message_ttl=60000, overflow_behaviour='drop-head',\n expires=600000):\n args = {\n 'x-max-length': queue_size,\n 'x-overflow': overflow_behaviour,\n 'x-message-ttl': message_ttl,\n 'x-expires': expires\n }\n\n result = self._channel.queue_declare(\n exclusive=exclusive,\n queue=queue_name,\n durable=False,\n auto_delete=True,\n arguments=args)\n queue_name = result.method.queue\n self.logger.debug('Created queue [{}] [size={}, ttl={}]'.format(\n queue_name, queue_size, message_ttl))\n return queue_name", "def __init__(self, queue_id):\n self.queue_id = queue_id\n self.action_type = 'set_queue'", "def queue_exists(self, queue_name):\n # resp = self._channel.queue_declare(queue_name, passive=True,\n # callback=self._queue_exists_clb)\n try:\n resp = self._channel.queue_declare(queue_name, passive=True)\n except pika.exceptions.ChannelClosedByBroker as exc:\n self.connect()\n if exc.reply_code == 404: # Not Found\n return False\n else:\n self.logger.warning('Queue exists <{}>'.format(queue_name))\n return True", "def support_queue(self, queue_id):\n return support_queues.SupportQueue(self, queue_id)", "def test_create_qos_queue(self):\r\n resource = 'qos_queue'\r\n cmd = qos.CreateQoSQueue(\r\n test_cli20.MyApp(sys.stdout), None)\r\n myid = 'myid'\r\n name = 'my_queue'\r\n default = False\r\n args = ['--default', default, name]\r\n position_names = ['name', 'default']\r\n position_values = [name, default]\r\n self._test_create_resource(resource, cmd, name, myid, args,\r\n position_names, position_values)", "def add_to_queue(self, word):\n self.q.put(word)\n print(\"word \\'{}\\' added in clients queue\".format(word))", "def lookup(self, queue, project=None):\n\n try:\n shard_id = self._shard_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n # NOTE(kgriffs): Return `None`, rather than letting the\n # exception bubble up, so that the higher layer doesn't\n # have to duplicate the try..except..log code all over\n # the place.\n return None\n\n return self.get_driver(shard_id)", "async def _connect(self):\n self._connection = await connect_robust(self._connection_string)\n self._channel = await self._connection.channel()\n await self._channel.declare_queue(self._queue, durable=True, arguments={'x-max-priority': 10})", "def bind_queue(self, exchange_name, queue_name, bind_key):\n self.logger.info('Subscribed to topic: {}'.format(bind_key))\n try:\n self._channel.queue_bind(\n exchange=exchange_name, queue=queue_name, routing_key=bind_key)\n except Exception as exc:\n raise exc", "def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"name\",yc_queue_openconfig_qos_elements__qos_queues_queue, yang_name=\"queue\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"name\",yc_queue_openconfig_qos_elements__qos_queues_queue, yang_name=\"queue\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()", "def addQueueEntry(*args):\n try:\n #A unique id for each command.\n self.cmd_seq = self.cmd_seq + 1\n #Create a new queu entry\n self.entries[self.cmd_seq] = _QueueEntry(self, name, args, self.cmd_seq, self.log)\n #append it to the command queue\n self.queue.append(self.cmd_seq)\n #Return handle to the new entry for setting callbacks on.\n return self.entries[self.cmd_seq]\n except Exception as ex:\n self.log.failure(\"Error in addQueueEntry {err!r}\",err=str(ex))", "def setup_queues_and_bindings(self):\n self._channel.exchange_declare(self.setup_queue, exchange=self.exchange, passive=True)", "def runQueueEnqueue(self):\n raise NotImplementedError", "def subscribe_sqs_queue(self, topic, queue):\r\n t = queue.id.split('/')\r\n q_arn = 'arn:aws:sqs:%s:%s:%s' % (queue.connection.region.name,\r\n t[1], t[2])\r\n resp = self.subscribe(topic, 'sqs', q_arn)\r\n policy = queue.get_attributes('Policy')\r\n if 'Version' not in policy:\r\n policy['Version'] = '2008-10-17'\r\n if 'Statement' not in policy:\r\n policy['Statement'] = []\r\n statement = {'Action' : 'SQS:SendMessage',\r\n 'Effect' : 'Allow',\r\n 'Principal' : {'AWS' : '*'},\r\n 'Resource' : q_arn,\r\n 'Sid' : str(uuid.uuid4()),\r\n 'Condition' : {'StringLike' : {'aws:SourceArn' : topic}}}\r\n policy['Statement'].append(statement)\r\n queue.set_attribute('Policy', json.dumps(policy))\r\n return resp", "def queue(self, *args, **kwargs):\n queue_args = self._pop_tq_add_args(kwargs)\n app = queue_args.pop('app', None) or flask.current_app\n\n with app.test_request_context():\n # flask.url_for uses the request context if it is present\n # as we're most likely in a request context, use a\n # test_request_context() instead.\n url = self.url()\n\n payload = pickle.dumps((args, kwargs))\n\n taskqueue.add(\n url=url,\n queue_name=self.queue_name,\n payload=payload,\n **queue_args\n )", "def instantiate_queue(self):\n serialized_queue = self.cache.get('queue')\n queue = ast.literal_eval(serialized_queue.decode('utf-8'))\n return queue", "def register_server(self, server_name):\n if server_name not in self.server_queues.keys():\n print(\"Server '{}' registered itself.\".format(server_name))\n else:\n print(\"Server '{}' re-registered itself.\".format(server_name))\n # Remove the old queue, just to be safe\n del self.server_queues[server_name]\n\n dq = DispatcherQueue()\n self.server_queues[server_name] = dq", "def encode_queue(self, queue):\n raise NotImplementedError()", "def __init__(self, queue_name, **kwargs):\n super(Queue, self).__init__(**kwargs)\n self.value = queue_name", "def enqueue(xredis, qname, thash, opts={}, jid=None, silent=False):\n # generate a job ID from the current time\n if not jid:\n jid = str(time.time()).replace(\".\", \"\")\n\n # JSON-encode and LPUSH on to the selected queue\n xredis.lpush(\"queue_\"+qname, json.dumps({'id': jid, 'thash': thash, 'opts': opts }))\n\n if not silent:\n logthis(\"Enqueued job# %s in queue:\" % (jid), suffix=qname, loglevel=LL.VERBOSE)\n\n return jid", "def set_queue_name(self, queue_name):\n self._graph_executor.set_queue_name(queue_name)", "def setup_queues():\n sqs = boto.connect_sqs()\n sqs.create_queue('mls_parse_requests')\n sqs.create_queue('mls_fetcher')", "def register_catalog(catalog_name, catalog_config):\n _registered_catalogs[catalog_name] = catalog_config", "def addCustomer(self, c):\n self.queue.add(c)", "def subscribe(self):\n with self._rabbit_connection.connection.channel() as channel:\n self._queue = rabbitpy.Queue(\n channel=channel,\n name=self._subscriber_name + \"_queue\",\n durable=True,\n message_ttl=5 * 24 * 60 * 60 * 1000 # 5 days\n )\n self._queue.declare()\n self._queue.bind(self._exchange, self._routing_key)\n\n self._consume()", "def subscribe(self, queue, consumer_id):\n\n # Add myself to the list of consumers, if not already present.\n self.redis.sadd(self._ns_subscriptions(queue), consumer_id)\n\n return Subscription(self, queue, consumer_id)", "def bind_queue(self):\n # pylint: disable=protected-access\n future = self._backend._create_future()\n\n def on_bindok(unused_frame):\n future.set_result(True)\n\n self.log.debug('Bind queue exchange=%s, routing_key=%s',\n self.exchange, self.routing_key)\n self._channel.queue_bind(on_bindok, self.name,\n self.exchange, self.routing_key)\n\n return future", "def create(\n queue_name: str,\n region: str = \"\",\n delay_seconds: int = 0,\n maximum_message_size: int = 262144,\n message_retention_period: int = 345600,\n visibility_timeout: int = 30,\n fifo: bool = False,\n receive_message_wait_time_seconds: int = 0,\n **additional_attributes\n) -> Queue:\n sqs_client = _client(region=region)\n new_queue_url = sqs_client.create_queue(\n QueueName=queue_name,\n Attributes=dict(\n DelaySeconds=str(delay_seconds),\n MaximumMessageSize=str(maximum_message_size),\n MessageRetentionPeriod=str(message_retention_period),\n ReceiveMessageWaitTimeSeconds=str(receive_message_wait_time_seconds),\n VisibilityTimeout=str(visibility_timeout),\n FifoQueue=str(fifo).lower(),\n **additional_attributes\n ),\n )\n if not new_queue_url:\n raise FailedToCreateQueue()\n return get(new_queue_url[\"QueueUrl\"].split(\"/\")[-1])", "def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"name\",yc_queue_openconfig_qos__qos_interfaces_interface_input_queues_queue, yang_name=\"queue\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"name\",yc_queue_openconfig_qos__qos_interfaces_interface_input_queues_queue, yang_name=\"queue\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()", "def create_queue(q_settings):\r\n db = get_db()\r\n cursor = db.cursor()\r\n cursor.execute(INSERT_QUEUE)\r\n q_settings['qid'] = cursor.lastrowid\r\n cursor.execute(INSERT_QUEUE_SETTINGS, qsettings_dict_to_db_tuple(q_settings))\r\n cursor.close()\r\n db.commit()\r\n permissions.add_permission_list(get_uids(q_settings['admins']), q_settings['qid'], permissions.ADMIN)\r\n if q_settings.has_key('managers'):\r\n permissions.add_permission_list(get_uids(q_settings['managers']), q_settings['qid'], permissions.MANAGER)\r\n if q_settings.has_key('blocked_users'):\r\n permissions.add_permission_list(get_uids(q_settings['blocked_users']), q_settings['qid'], permissions.BLOCKED_USER)\r\n return q_settings['qid']", "def register_backend(self, name, backend):\n self._backends[name] = backend", "async def create_work_queue(\n self,\n name: str,\n tags: Optional[List[str]] = None,\n description: Optional[str] = None,\n is_paused: Optional[bool] = None,\n concurrency_limit: Optional[int] = None,\n priority: Optional[int] = None,\n work_pool_name: Optional[str] = None,\n ) -> WorkQueue:\n if tags:\n warnings.warn(\n (\n \"The use of tags for creating work queue filters is deprecated.\"\n \" This option will be removed on 2023-02-23.\"\n ),\n DeprecationWarning,\n )\n filter = QueueFilter(tags=tags)\n else:\n filter = None\n create_model = WorkQueueCreate(name=name, filter=filter)\n if description is not None:\n create_model.description = description\n if is_paused is not None:\n create_model.is_paused = is_paused\n if concurrency_limit is not None:\n create_model.concurrency_limit = concurrency_limit\n if priority is not None:\n create_model.priority = priority\n\n data = create_model.dict(json_compatible=True)\n try:\n if work_pool_name is not None:\n response = await self._client.post(\n f\"/work_pools/{work_pool_name}/queues\", json=data\n )\n else:\n response = await self._client.post(\"/work_queues/\", json=data)\n except httpx.HTTPStatusError as e:\n if e.response.status_code == status.HTTP_409_CONFLICT:\n raise prefect.exceptions.ObjectAlreadyExists(http_exc=e) from e\n elif e.response.status_code == status.HTTP_404_NOT_FOUND:\n raise prefect.exceptions.ObjectNotFound(http_exc=e) from e\n else:\n raise\n return WorkQueue.parse_obj(response.json())", "def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"name\",yc_queue_openconfig_qos_interfaces__qos_interfaces_interface_input_queues_queue, yang_name=\"queue\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"name\",yc_queue_openconfig_qos_interfaces__qos_interfaces_interface_input_queues_queue, yang_name=\"queue\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()", "def subscribe(self, queue, action):\n self.channel.queue_declare(queue=queue)\n self.channel.basic_consume(queue=queue,\n on_message_callback=action,\n auto_ack=True)\n self.channel.start_consuming()" ]
[ "0.71361166", "0.6695945", "0.64565563", "0.6338736", "0.6160316", "0.6083422", "0.5962883", "0.59370244", "0.5895258", "0.5773519", "0.5767887", "0.5727844", "0.5669611", "0.5621137", "0.55773187", "0.5558326", "0.5552847", "0.55482846", "0.55192804", "0.5471164", "0.5442383", "0.5414257", "0.5396806", "0.5395114", "0.5368862", "0.5367132", "0.536324", "0.534793", "0.53379726", "0.5334681", "0.52646434", "0.52090657", "0.52055365", "0.5201767", "0.5177617", "0.51616627", "0.51600784", "0.5158286", "0.5158286", "0.5158286", "0.5156399", "0.5156399", "0.5156399", "0.51483893", "0.51455665", "0.51407284", "0.5127527", "0.5123421", "0.5119232", "0.5108577", "0.5108577", "0.5108577", "0.51052016", "0.51052016", "0.51052016", "0.5084417", "0.50837475", "0.5079523", "0.5058547", "0.5052396", "0.50434136", "0.5041739", "0.50014627", "0.50013864", "0.50007796", "0.49852645", "0.49801874", "0.49798658", "0.49723783", "0.49689963", "0.4965245", "0.4964576", "0.4957793", "0.49420294", "0.4932033", "0.49176794", "0.49080884", "0.49046794", "0.49042374", "0.48827007", "0.4875609", "0.48698622", "0.48644426", "0.48534933", "0.48434213", "0.48421174", "0.4826823", "0.4811218", "0.48080152", "0.4805549", "0.48052195", "0.47868925", "0.4773511", "0.47665188", "0.4763972", "0.47576323", "0.47556046", "0.4753925", "0.47449666", "0.4737703" ]
0.7536504
0
Removes a queue from the pool catalog. Call this method after successfully deleting it from a backend pool.
def deregister(self, queue, project=None): self._catalogue_ctrl.delete(project, queue)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_queue(self):\n self.work_queue_client.delete_queue()", "def _queue_delete(self, queue):\n\n queue.delete()", "def remove_queue(self, queue) -> None:\r\n self.receive_queues.remove(queue)", "def remove_queue(self, queue):\n with self.mutex:\n self.queues.remove(queue)", "def delete_queue(qid):\r\n raise NotImplementedError()", "def free_queue(self, sycl_queue_val):\n fn = DpctlCAPIFnBuilder.get_dpctl_queue_delete(\n builder=self.builder, context=self.context\n )\n self.builder.call(fn, [self.builder.load(sycl_queue_val)])", "def del_queue(self, queue_id):\n del self.queue_dict[queue_id]", "def destroy_queue(self):\n response = self.queue.delete()\n if self._is_error_call(response):\n raise RuntimeError('SQS could not delete queue: %s' % response)\n self.queue, self.queue_name = None, None", "def delete_queue(self, queue_name):\n amqp_session = self.__broker.getAmqpSession()\n amqp_session.queue_delete(queue_name)", "def delete_queue(self, queue_name: str) -> None:\n if queue_name is None:\n raise TypeError(\"Queue name cannot be None.\")\n\n with self.get_conn() as service_mgmt_conn:\n service_mgmt_conn.delete_queue(queue_name)", "def deregister(self, queue, project=None):\n self._invalidate_cached_id(queue, project)\n self._catalogue_ctrl.delete(project, queue)", "async def remove(self):\n\n await self.VoiceClient.http.removeQueueSource(self.tag)\n\n return self", "def purge(self):\n self._rpc(specification.Queue.Purge())", "def remove_from_queue(self, confid):\n\n queued_ids = self.c.select(queued=1, gaid=confid)\n ids = [q.id for q in queued_ids]\n self.c.delete(ids)", "def delete_qos_queue(self, queue):\r\n return self.delete(self.qos_queue_path % (queue))", "def queue_delete(queue):\n\n for job in queue.jobs:\n job_delete(job)\n if os.path.exists(queue.data_abspath):\n os.rmdir(queue.data_abspath)\n db.session.delete(queue)\n db.session.commit()", "def delete_queue(client, vhost, queue):\n client.delete_queue(vhost, queue)", "def delete_queue(self, queue, force_deletion=False):\r\n return self.get_status('DeleteQueue', None, queue.id)", "def clearQueue(self, queue_name, project_id=None):\n if project_id is None:\n project_id = self.project_id\n\n url = \"%sprojects/%s/queues/%s/clear?oauth=%s\" % (self.url, project_id, queue_name, self.token)\n body = self.__post(url)\n return json.loads(body)", "def delete_queue(queue_name: str, server_url: Optional[str] = None):\n rpc = RemoteProcedure(handle_QMF2_exception,\n 'qmf.default.direct', server_url)\n delete_queue_message = create_QMF2_method_invoke(\n get_broker_id(server_url),\n 'delete', {\n 'type': 'queue',\n 'name': queue_name\n }\n )\n rpc.call(delete_queue_message, timedelta(seconds=5))", "def pop_queue(self, queue=None):\n if not queue:\n return False\n \n cur = self.conn.cursor()\n cur.execute(\"LOCK TABLE \" + queue + \" IN ACCESS EXCLUSIVE MODE;\")\n\n cur.execute(\"SELECT id FROM \" + queue + \" LIMIT 1;\")\n row = cur.fetchone()\n self.conn.commit()\n \n if row:\n cur.execute(\"DELETE FROM \" + queue + \" WHERE id='\"+str(row[0])+\"';\")\n return row[0]\n else:\n return False", "def deregister(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def deleteQueues(self, queueIDToDelete):\r\n #method = moduleName + '.' + self.className + '.' + 'deleteQueues'\r\n #errorMsg = \"Forcing deletion of management infrastructure for worker queue %s\" %queueIDToDelete\r\n #Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])\r\n try: del self.workerQueues[queueIDToDelete]\r\n except: pass\r\n try: del self.depricatedWorkerQueues[queueIDToDelete]\r\n except: pass", "def qdel(self, *options):\n if self.in_queue():\n jobid = self.get_db('jobid')\n cmd = ['qdel'] + list(options) + [jobid]\n status, output, err = getstatusoutput(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status != 0:\n print(output + err)\n return status, output\n return '{} not in queue.'.format(self.directory)", "def _cleanup_method(self, queue_name, ep=None):\n if ep._chan is not None and not ep._chan._queue_auto_delete:\n # only need to delete if AMQP didn't handle it for us already!\n # @TODO this will not work with XOs (future)\n try:\n ch = self.container.node.channel(RecvChannel)\n ch._recv_name = NameTrio(get_sys_name(), \"%s.%s\" % (get_sys_name(), queue_name))\n ch._destroy_queue()\n except TransportError as ex:\n log.warn(\"Cleanup method triggered an error, ignoring: %s\", ex)", "async def delete_work_pool_queue(\n work_pool_name: str = Path(..., description=\"The work pool name\"),\n work_pool_queue_name: str = Path(\n ..., description=\"The work pool queue name\", alias=\"name\"\n ),\n worker_lookups: WorkerLookups = Depends(WorkerLookups),\n db: OrionDBInterface = Depends(provide_database_interface),\n):\n\n async with db.session_context(begin_transaction=True) as session:\n work_pool_queue_id = await worker_lookups._get_work_pool_queue_id_from_name(\n session=session,\n work_pool_name=work_pool_name,\n work_pool_queue_name=work_pool_queue_name,\n )\n\n await models.workers.delete_work_pool_queue(\n session=session, work_pool_queue_id=work_pool_queue_id, db=db\n )", "def RemoveFromQueue(self, addr):\n if addr in self.connection_queue:\n self.connection_queue.remove(addr)", "def delete(self):\n self._lbcall('delete_pool', [self._name])", "def delete_a_queue(self,index):\n try:\n del self.queues[index]\n return True\n except IndexError:\n return False", "def deQueue(self):\n\t\tif self.isEmpty():\n\t\t\tprint(\"Queue already empty: Queue Empty\")\n\t\t\texit(1)\n\t\tprint(\"Dequeueing: \", self.queue[self.front])\n\t\tself.queue[self.front] = None\n\t\tself.front = self.front + 1\n\t\tself.size = self.size - 1", "def ctrlqueue_clear_queue(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(0), ctypes.c_int32(0))", "def test_queue():\n mq = IPCComm.get_queue()\n key = str(mq.key)\n assert(CommBase.is_registered('IPCComm', key))\n CommBase.unregister_comm('IPCComm', key, dont_close=True)\n nt.assert_raises(KeyError, IPCComm.remove_queue, mq)\n CommBase.register_comm('IPCComm', key, mq)\n IPCComm.remove_queue(mq)\n assert(not CommBase.is_registered('IPCComm', key))", "def on_queue_clear_command(self, event):\n self.pre_check(event)\n self.same_channel_check(event)\n if self.get_player(event.guild.id).queue:\n self.get_player(event.guild.id).queue.clear()\n api_loop(event.channel.send_message, \"The queue has been cleared.\")\n else:\n api_loop(event.channel.send_message, \"The queue is already empty.\")", "def remove_from_queue(self, series_id: str):\n params = {\n \"series_id\": series_id\n }\n return self._api._api_call(\"remove_from_queue\", params)", "def exit_queue(self, name=None):\r\n if(name):\r\n self.log.debug(\"EXITING queue: (%s)\" % (name))\r\n self._queues[name].release()\r\n self.log.debug(\"SUCCESS EXITING queue: (%s)\" % (name))", "def pre_qos_queue_delete(self, resource_id):\n pass", "def __clear_message_queue(self):\r\n self.__lib.CC_ClearMessageQueue(self.__serno)", "def del_queue(self): # delete last=delete first come group\n return self.groups.pop()", "def cancle(self, queue_id):\n server = jenkins_server.get_jenkins_server()\n try:\n server.cancel_queue(queue_id)\n except Exception as e:\n return '取消队列中任务%d失败' % queue_id\n return '取消队列中任务%d成功' % queue_id", "def delete_pool(self, pool):\r\n return self.delete(self.pool_path % (pool))", "def post_qos_queue_delete(self, resource_id, resource_dict):\n pass", "def purge_queue(client, queue):\n channel = client.channel()\n\n channel.queue_declare(queue=queue, durable=True, auto_delete=False)\n channel.queue_purge(queue)\n channel.close()", "def dequeue(self, irc, msg, args):\n pos = self._find_in_queue(msg.nick)\n if pos < 0:\n irc.reply(\"You're not in the queue, did your nick change?\")\n return\n self._queue.pop(pos)\n self._count -= 1\n self._dump_queue()\n irc.reply(\"Removed you from the queue as requested\")", "def unsubscribe(self):\n\n # Unsubscribe\n self.pyrps.redis.srem(self.pyrps._ns_subscriptions(self.queue), self.consumer_id) \n\n # Remove message queue\n self.pyrps.redis.delete(self.pyrps._ns_queue(self.queue, self.consumer_id))", "def remove_job(data, job):\n for j in data.queue:\n if job.proc_id == j:\n del j\n return", "def clear(self):\n\n if not self.connected:\n raise QueueNotConnectedError(\"Queue is not Connected\")\n\n self.__db.delete(self._key)\n self.__db.delete(self._lock_key)", "async def remove(self, ctx, index: int):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if not player.is_connected:\n return await ctx.send(\"I'm not connected to a voice channel :no_entry:\")\n if not player.is_playing:\n return await ctx.send(\"Nothing is currently playing :no_entry:\")\n if not player.queue:\n return await ctx.send('Nothing is queued :no_entry:')\n if index > len(player.queue) or index < 1:\n return await ctx.send(\"Invalid song index :no_entry:\")\n index -= 1\n removed = player.queue.pop(index)\n\n await ctx.send(\"Removed **\" + removed.title + \"** from the queue <:done:403285928233402378>\")", "def remove_from_usage_queue(self, md5_hash):\n self.usage_queue.remove(md5_hash)", "def clear_queue(self):\n self.queue = deque()", "def cancel_collections(codes, queues=None):\n params = {}\n if codes:\n params[\"codes\"] = codes\n if queues:\n params[\"queues\"] = queues\n response = houston.delete(\"/history/queue\", params=params)\n houston.raise_for_status_with_json(response)\n return response.json()", "def fusion_api_delete_storage_pool(self, uri=None, api=None, headers=None):\n return self.pool.delete(uri=uri, api=api, headers=headers)", "def dequeue(self):\r\n if self.size():\r\n self.queue.pop(0)\r\n else:\r\n raise IndexError(\"Queue is empty.\")", "def safe_queue_delete(self, queue_name, channel=None):\n channel = channel or self.channel\n full_queue_name = self.full_name(queue_name)\n try:\n yield from channel.queue_delete(full_queue_name, no_wait=False, timeout=1.0)\n except asyncio.TimeoutError:\n logger.warning('Timeout on queue %s deletion', full_queue_name, exc_info=True)\n except Exception:\n logger.error('Unexpected error on queue %s deletion', full_queue_name, exc_info=True)", "async def delete_work_queue_by_id(\n self,\n id: UUID,\n ):\n try:\n await self._client.delete(\n f\"/work_queues/{id}\",\n )\n except httpx.HTTPStatusError as e:\n if e.response.status_code == status.HTTP_404_NOT_FOUND:\n raise prefect.exceptions.ObjectNotFound(http_exc=e) from e\n else:\n raise", "def redis_rm_queue(self) -> bool:\n redis_client: Redis = self.redis_client\n\n queue_type: str = self._config[\"graph_queue_type\"]\n queue_key: str = self._config[\"graph_queue_key\"]\n\n try:\n redis_client.delete(queue_key)\n\n except RedisError as e:\n result: bool = False\n self._logger.exception( # noqa: G200\n \"Exception deleting Redis key: %s\", str(e)\n )\n\n else:\n result = True\n self._logger.info(\"Cleared %s key '%s'\", queue_type.upper(), queue_key)\n\n return result", "def ctrlqueue_delete(self, argument: int) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(1), ctypes.c_int32(argument))", "def clear_queue(self):\n\t\t\tself.message_queue.clear()\n\t\t\treturn self.message_queue", "def remove_to_deletes(self):\n go = True\n while go:\n go = False\n for op in self.queue:\n if op.delete:\n self.queue.remove(op)\n go = True\n break", "def deQueue(self):\r\n if (len(self.queue) >= 1):\r\n self.queue.pop(0)\r\n return True\r\n else:\r\n return False", "def drop_message(self):\n heapq.heappop(self._message_queue)", "def register(self, queue, project=None, flavor=None):\n\n # NOTE(gengchc): if exist, get queue's pool.flavor:\n # if queue's pool.flavor is different, first delete it and add it.\n # Otherwise, if the flavor in the meteredata of the queue is\n # modified, the catalog will be inconsistent.\n if self._catalogue_ctrl.exists(project, queue):\n catalogue = self._catalogue_ctrl.get(project, queue)\n oldpoolids = catalogue['pool']\n oldpool = self._pools_ctrl.get(oldpoolids)\n oldflavor = oldpool['flavor']\n msgtmpl = _(u'register queue to pool: old flavor: %(oldflavor)s '\n ', new flavor: %(flavor)s')\n LOG.info(msgtmpl,\n {'oldflavor': oldflavor, 'flavor': flavor})\n if oldpool['flavor'] != flavor:\n self._catalogue_ctrl.delete(project, queue)\n\n if not self._catalogue_ctrl.exists(project, queue):\n if flavor is not None:\n flavor = self._flavor_ctrl.get(flavor, project=project)\n pools = self._pools_ctrl.get_pools_by_flavor(\n flavor=flavor,\n detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n msgtmpl = _(u'register queue to pool: new flavor:%(flavor)s')\n LOG.info(msgtmpl,\n {'flavor': flavor.get('name', None)})\n else:\n # NOTE(flaper87): Get pools assigned to the default\n # group `None`. We should consider adding a `default_group`\n # option in the future.\n pools = self._pools_ctrl.get_pools_by_flavor(detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n\n if not pool:\n # NOTE(flaper87): We used to raise NoPoolFound in this\n # case but we've decided to support automatic pool\n # creation. Note that we're now returning and the queue\n # is not being registered in the catalogue. This is done\n # on purpose since no pool exists and the \"dummy\" pool\n # doesn't exist in the storage\n if self.lookup(queue, project) is not None:\n return\n raise errors.NoPoolFound()\n msgtmpl = _(u'register queue to pool: new flavor: None')\n LOG.info(msgtmpl)\n\n msgtmpl = _(u'register queue: project:%(project)s'\n ' queue:%(queue)s pool:%(pool)s')\n LOG.info(msgtmpl,\n {'project': project,\n 'queue': queue,\n 'pool': pool})\n self._catalogue_ctrl.insert(project, queue, pool)", "def reset_queue(self, db_session):\n for player in self.player_queue.queue:\n self.command_queue.appendleft(('_delete_last_row', {}))\n self.player_queue = PlayerQueue.PlayerQueue()\n db_session.execute(sqlalchemy.update(db.User.__table__, values={db.User.__table__.c.times_played: 0}))\n self._add_to_chat_queue('The queue has been emptied and all players start fresh.')", "async def cleanup(\n self, voice_client: Optional[discord.VoiceClient], guild: discord.Guild\n ):\n\n if voice_client:\n try:\n await voice_client.disconnect(force=True)\n except ValueError:\n # Raised from wavelink\n pass\n\n if guild.id in self.queue:\n queue = self.queue.pop(guild.id)\n queue.cleanup()\n del queue", "async def remove(self, ctx, song_index: int):\n player = self.bot.lavalink.player_manager.get(ctx.guild.id)\n\n if not player.is_connected:\n # We can't disconnect, if we're not connected.\n return await ctx.send(embed=self.error_embed(f'Not playing. [{ctx.message.author.mention}]'))\n\n if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):\n # Abuse prevention. Users not in voice channels, or not in the same voice channel as the bot\n # may not disconnect the bot.\n return await ctx.send(embed=self.error_embed(f'Not connected to the same voice channel. [{ctx.message.author.mention}]'))\n\n if song_index > len(player.queue) + 1:\n return await ctx.send(embed=self.error_embed(\"There is no such song in the queue.\"))\n\n await ctx.send(embed=self.reply_embed(f\"Removed **{player.queue[song_index - 1].title}** from the queue\"))\n player.queue.pop(song_index - 1)\n await ctx.message.add_reaction(\"✅\")", "def destroy_catalogue(self):\n # Call the backend to remove anything related to the archive.\n if self._catalogue_exists():\n self._backend.destroy()", "def clear(self):\n self.queue.clear()", "async def queue_remove(self, ctx: commands.Context, index: int) -> Optional[Player]:\n\n try:\n queue = self.queue[ctx.guild.id]\n\n return queue.remove(queue.pos + index)\n except IndexError:\n await self.call_event(\n \"on_music_error\",\n ctx,\n RemoveIndexInvalid(\"Failure when removing player from queue\"),\n )", "def clear(self):\n self.queue = Queue()", "def delete_device_pool(arn=None):\n pass", "def clear_queue(self):\n while not self.queue.empty():\n self.queue.get()", "def clearQueueAll():", "async def _remove(self, ctx: commands.Context, index: int):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('Cannot remove song because the queue is empty.')\n\n ctx.voice_state.songs.remove(index - 1)\n await ctx.message.add_reaction('✅')", "async def _remove(self, ctx: commands.Context, index: int):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('Empty queue.')\n\n ctx.voice_state.songs.remove(index - 1)\n await ctx.message.add_reaction('✅')", "async def _remove(self, ctx: commands.Context, index: int):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('Empty queue.')\n\n ctx.voice_state.songs.remove(index - 1)\n await ctx.message.add_reaction('✅')", "def clear(cls, resq):\n first = MultipleBackend.classes[0]\n return first.clear(resq)", "def remove_torrent_from_queue(self, torr):\n pass", "def remove(self) -> T:\n if not self.is_empty():\n return self._queue.pop()", "def cli(env, account_id, queue_name, message_id, force, datacenter, network):\n\n manager = SoftLayer.MessagingManager(env.client)\n mq_client = manager.get_connection(account_id,\n datacenter=datacenter, network=network)\n\n if message_id:\n mq_client.delete_message(queue_name, message_id)\n else:\n mq_client.delete_queue(queue_name, force)", "def dequeue(self):\r\n raise QueueException(\"Unimplemented Abstract Queue Function\")", "async def job_remove(self, uid):\n self._require_running()\n job = self._get_job(uid)\n await job.close()\n del self._jobs[uid]\n del self._jobs_by_connection[job.sender.connection][uid]\n if len(self._jobs_by_connection[job.sender.connection]) == 0:\n del self._jobs_by_connection[job.sender.connection]\n self._log.debug('Removed job %s', job)", "def dequeue(self):\n if len(self.queue) > 0:\n return self.queue.pop()\n return (\"Queue Empty!\")", "def _flush_enqueued(self):\n\n msgs = self.RPC.query.all()\n for msg in msgs:\n if msg.enqueued:\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n msg.delete()", "def do_remove(self, arg):\n jail_destroy('remove', arg)", "def clean_test_queues(prefix=TEST_NAME_PREFIX, region_name=None):\n sqs = boto3.resource('sqs', region_name=region_name)\n num_queues = 0\n try:\n for queue in sqs.queues.all():\n if re.match(r'.+%s\\d+' % TEST_NAME_PREFIX, queue.url):\n queue.delete()\n num_queues += 1\n finally:\n log.info('deleted %s test queues' % num_queues)", "def deQueue(self):\n if not self.isEmpty():\n self.queue.pop(0)\n self.rear -= 1\n return True\n else:\n return False", "def removeFromDownloadQueue(self, _src):\n for dl in self.downloadQueue:\n if _src in dl['src']:\n self.downloadQueue.pop(self.downloadQueue.index(dl))\n return", "def remove(self):\n with managed_session() as session:\n session.delete(self)", "def deQueue(self):\n if not self.isEmpty():\n self.queue.pop(0)\n return True\n else:\n return False", "def return_del_queue(hostname, username):\n #Established the connection\n myconnection = ssh_connection(hostname, username)\n if myconnection == 1:\n return \"Connection to %s failed\" % hostname\n else:\n #Empty the queue\n commandline=\"sudo /usr/sbin/postsuper -d ALL\"\n stdin, stdout, stderr = myconnection.exec_command(commandline)\n if stderr.read():\n return \"Problem with the queue. Not flushed. Please contact system administrator (admin@adthink-media.com)!\"\n else:\n return \"The postfix queue on (%s) has been flushed\" % (hostname)\n\n # Disconnect from the host\n myconnection.close()", "def qdel(jid):\n command = '%s -j %d' % (QDEL_PATH, jid)\n subprocess.check_output([command], env=ENV, shell=True)", "def dequeue(self):\n if not self.is_empty():\n return self._queue_items.pop()\n else:\n raise QueueException('dequeue operation not supported on an empty queue')", "def delete_pool(self, argu):\n\n if not argu:\n LOG.error(\"In delete_pool, it should not pass the None.\")\n\n # delete policy\n self._delete_policy(\n argu['listener_id'],\n argu['session_persistence_type'],\n argu['lb_algorithm']\n )\n\n cmd_apv_no_group = ADCDevice.no_group(argu['pool_id'])\n for base_rest_url in self.base_rest_urls:\n self.run_cli_extend(base_rest_url, cmd_apv_no_group)", "def remove_query(iden):\r\n table = query_queue_table\r\n d = table.delete(table.c.iden == iden)\r\n d.execute()", "def remove(self):\r\n\t\tself._delete()", "def purge_queue(queue_name: str,\n limit: int = 0,\n message_filter: Optional[Tuple[str, str]] = None,\n server_url: Optional[str] = None):\n queue = get_object('org.apache.qpid.broker', 'queue', queue_name,\n server_url)\n method_arguments = {'request': limit} # type: dict\n if message_filter:\n method_arguments['filter'] = _build_message_filter(*message_filter)\n\n rpc = RemoteProcedure(handle_QMF2_exception,\n 'qmf.default.direct', server_url)\n rpc.call(create_QMF2_method_invoke(queue['_object_id'],\n 'purge', method_arguments),\n timedelta(seconds=5))", "def on_remove_command(self, event, index):\n self.pre_check(event)\n self.same_channel_check(event)\n if not self.get_player(event.guild.id).queue:\n api_loop(\n event.channel.send_message,\n \"There aren't any songs queued right now.\",\n )\n elif str(index).lower() == \"all\":\n self.get_player(event.guild.id).queue = list()\n api_loop(event.channel.send_message, \"Cleared playing queue.\")\n elif (str(index).isdigit() and\n 0 <= (int(index) - 1) <=\n len(self.get_player(event.guild.id).queue)):\n yt_dl_object = self.get_player(event.guild.id).pop(int(index) - 1)\n ytdata = self.get_ytdl_values(yt_dl_object.metadata)\n api_loop(\n event.channel.send_message,\n \"Removed index ``{}`` at index ``{}``.\".format(\n ytdata[\"title\"],\n index,\n ),\n )\n else:\n api_loop(event.channel.send_message, \"Invalid index input.\")", "def pop(self, pid):\n for p in self._queue:\n if p.id == pid:\n return self._queue.pop(self._queue.index(p)).id\n return 0", "def reset_queueing(self):\n self._num_queued = 0", "def delete(self) -> None:\n self.pop()", "def clean_queue(queue):\n yield queue\n\n # Clean the dirty queue\n queue.join()\n\n # Check for clean shutdown\n assert queue.qsize == 0\n assert queue.inprogress_size == 0" ]
[ "0.72310483", "0.7100992", "0.70777076", "0.70770454", "0.6994525", "0.699254", "0.6895938", "0.6883569", "0.68662935", "0.6845864", "0.6806441", "0.68028337", "0.67998916", "0.6713241", "0.67060405", "0.66622037", "0.64299446", "0.6404603", "0.63555455", "0.6346353", "0.6262542", "0.6222476", "0.62064046", "0.6168046", "0.6147086", "0.61307657", "0.6045567", "0.6040789", "0.6012379", "0.60057896", "0.59905815", "0.59574455", "0.59365773", "0.5933812", "0.5889267", "0.5884575", "0.5884425", "0.5878983", "0.58305186", "0.58025116", "0.5801966", "0.57857114", "0.5767803", "0.5761944", "0.5718676", "0.57051426", "0.56584936", "0.5627618", "0.56275415", "0.5611683", "0.55819017", "0.55685645", "0.55374354", "0.55358255", "0.5509682", "0.54977584", "0.54907405", "0.54866606", "0.5481267", "0.54695493", "0.5464929", "0.5454885", "0.54358", "0.54354215", "0.54216325", "0.54124683", "0.5407196", "0.5392442", "0.537528", "0.5371408", "0.53655565", "0.5364861", "0.5362982", "0.5362982", "0.5360377", "0.53586566", "0.53453034", "0.5340603", "0.53234357", "0.53203124", "0.53116256", "0.53082347", "0.53003615", "0.5297043", "0.52933043", "0.52911633", "0.5289461", "0.5287974", "0.5283732", "0.52743596", "0.5272526", "0.5263896", "0.52517843", "0.524128", "0.5237578", "0.5230389", "0.52292407", "0.5228964", "0.5228866", "0.5225944" ]
0.70685136
4
Lookup the queue controller for the given queue and project.
def get_queue_controller(self, queue, project=None): target = self.lookup(queue, project) return target and target.queue_controller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_claim_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.claim_controller", "def get_message_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.message_controller", "def get_subscription_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.subscription_controller", "def lookup(self, queue, project=None):\n\n try:\n shard_id = self._shard_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n # NOTE(kgriffs): Return `None`, rather than letting the\n # exception bubble up, so that the higher layer doesn't\n # have to duplicate the try..except..log code all over\n # the place.\n return None\n\n return self.get_driver(shard_id)", "def lookup(self, queue, project=None):\n\n try:\n pool_id = self._pool_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n return self.get_default_pool(use_listing=False)\n\n return self.get_driver(pool_id)", "def lookup(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Raise an exception if the queue\n # does not have a mapping (it does not exist).\n\n # TODO(kgriffs): SHARDING - Get ID from the catalog backend\n shard_id = '[insert_id]'\n try:\n shard = self._shards[shard_id]\n except KeyError:\n self._shards[shard_id] = shard = self._init_shard(shard_id)\n\n return shard", "def get_queue(self, task_name):\n for name, queue in self.queues.items():\n if task_name in queue:\n return name\n return self.default_queue", "def get_topic_controller(self, topic, project=None):\n target = self.lookup(topic, project)\n return target and target.topic_controller", "def getQueueDetails(self, queue_name, project_id=\"\"):\n if project_id == \"\":\n project_id = self.project_id\n url = \"%sprojects/%s/queues/%s?oauth=%s\" % (self.url, project_id,\n queue_name, self.token)\n body = self.__get(url)\n queue = json.loads(body)\n return queue", "async def get_queue(self, ctx: commands.Context) -> Optional[QueueManager]:\n\n return self.queue[ctx.guild.id]", "def queue_path(self, project, location, queue):\n # This is value is not actually used, but it might be good for debugging.\n return \"projects/{project}/locations/{location}/queues/{queue}\".format(\n project=project, location=location, queue=queue)", "def find(self, task_id):\n for task_obj in self.queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in queue: '{}'\".format(task_id))", "def get_queue_num(self, qos_id, queue_id):\n\n q_num = None\n queues = self.qos_dict[qos_id][\"ovsdb:qos-entries\"][0][\"queue-list\"]\n\n # Go through all queues\n for queue in queues:\n cur_queue_id = queue[\"queue-ref\"].split(\"'\")[-2]\n # If we have a match, get the q_num and break\n if cur_queue_id == queue_id:\n q_num = queue[\"queue-number\"]\n break\n\n # queue_id is not found in the qos\n if q_num is None:\n #print(json.dumps(self.qos_dict[qos_id], indent=3))\n raise KeyError\n\n return q_num", "def find_queue(queue):\n athena_queue = canonicalize_queue(queue)\n # If a queue isn't an Athena queue, punt straight to the default\n # CUPS server\n if not athena_queue:\n return SYSTEM_CUPS, None, queue\n queue = athena_queue\n\n # Get rid of any instance on the queue name\n # TODO The purpose of instances is to have different sets of default\n # options. Queues may also have default options on the null\n # instance. Figure out if we need to do anything about them\n queue = queue.split('/')[0]\n\n # If we're still here, the queue is definitely an Athena print\n # queue; it was either in the local cupsd pointing to Athena, or the\n # local cupsd didn't know about it.\n # Figure out what Athena thinks the backend server is, and whether\n # that server is running a cupsd; if not, fall back to LPRng\n\n rm = get_hesiod_print_server(queue)\n if not rm:\n # In the unlikely event we're wrong about it being an Athena\n # print queue, the local cupsd is good enough\n return SYSTEM_CUPS, None, queue\n\n # Give up and return rm and queue. If it's not running a cupsd,\n # too bad. It's not our job to check whether cupsd is running.\n return SYSTEM_CUPS, rm, queue", "def register(self, queue, project=None):\n # NOTE(cpp-cabrera): only register a queue if the entry\n # doesn't exist\n if not self._catalogue_ctrl.exists(project, queue):\n # NOTE(cpp-cabrera): limit=0 implies unlimited - select from\n # all shards\n shard = select.weighted(self._shards_ctrl.list(limit=0))\n\n if not shard:\n raise errors.NoShardFound()\n\n self._catalogue_ctrl.insert(project, queue, shard['name'])", "def magma_queue_get_device(queue):\n\n return _libmagma.magma_queue_get_device(queue)", "def get(queue_name: str, **kwargs) -> Queue:\n return Queue(queue_name, **kwargs)", "def register(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def search_queue_number(self, Q_strip):\n if Q_strip is self.PF_Q_strip:\n out = self.dut.send_expect(\"cat config/common_base\", \"]# \", 10)\n pattern = \"(%s=)(\\d*)\" % Q_strip\n else :\n out = self.dut.send_expect(\"cat drivers/net/i40e/i40e_ethdev.c\", \"]# \", 10)\n pattern = \"#define %s\\s*(\\d*)\" % Q_strip\n s = re.compile(pattern)\n res = s.search(out)\n if res is None:\n print utils.RED('Search no queue number.')\n return None\n else:\n if Q_strip is self.VF_Q_strip:\n queue = res.group(1)\n else :\n queue = res.group(2)\n return int(queue)", "def support_queue(self, queue_id):\r\n return support_queues.SupportQueue(self, queue_id)", "def find(self, task_id):\n for task_obj in self._queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in dorm: '{}'\".format(task_id))", "def support_queue(self, queue_id):\n return support_queues.SupportQueue(self, queue_id)", "def deregister(self, queue, project=None):\n self._catalogue_ctrl.delete(project, queue)", "def queue_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"queue_name\")", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def get_project(self, project):\n project_name = project\n\n try:\n # FIXME: project should be an integer or str, no both\n project_id = int(project)\n except ValueError:\n project_id = None\n\n try:\n # Find the first project occurrence\n project_found = next(p for p in self.get_projects() if p[\"id\"] == project_id\n or p[\"name\"] == project_name)\n # FIXME: use namedtuple instead? create a self.project = dict()?\n self.project_name = project_found[\"name\"]\n self.project_id = project_found[\"id\"]\n self.project_address = \"projects/%s/\" % self.project_id\n except StopIteration:\n logger.error(\"Project %s not found\" % project)\n raise KeyError", "def get_queue_number(self):\n outstring = self.dut.send_expect(\"stop\", \"testpmd> \")\n time.sleep(2)\n result_scanner = r\"Forward Stats for RX Port= %s/Queue=\\s?([0-9]+)\" % self.dut_ports[0]\n scanner = re.compile(result_scanner, re.DOTALL)\n m = scanner.search(outstring)\n queue_id = m.group(1)\n print \"queue is %s\" % queue_id\n self.dut.send_expect(\"start\", \"testpmd> \")\n return queue_id", "def _pool_id(self, queue, project=None):\n return self._catalogue_ctrl.get(project, queue)['pool']", "def ctrlqueue_device_handle(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(5), ctypes.c_int32(0))", "def register(self, queue, project=None, flavor=None):\n\n # NOTE(gengchc): if exist, get queue's pool.flavor:\n # if queue's pool.flavor is different, first delete it and add it.\n # Otherwise, if the flavor in the meteredata of the queue is\n # modified, the catalog will be inconsistent.\n if self._catalogue_ctrl.exists(project, queue):\n catalogue = self._catalogue_ctrl.get(project, queue)\n oldpoolids = catalogue['pool']\n oldpool = self._pools_ctrl.get(oldpoolids)\n oldflavor = oldpool['flavor']\n msgtmpl = _(u'register queue to pool: old flavor: %(oldflavor)s '\n ', new flavor: %(flavor)s')\n LOG.info(msgtmpl,\n {'oldflavor': oldflavor, 'flavor': flavor})\n if oldpool['flavor'] != flavor:\n self._catalogue_ctrl.delete(project, queue)\n\n if not self._catalogue_ctrl.exists(project, queue):\n if flavor is not None:\n flavor = self._flavor_ctrl.get(flavor, project=project)\n pools = self._pools_ctrl.get_pools_by_flavor(\n flavor=flavor,\n detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n msgtmpl = _(u'register queue to pool: new flavor:%(flavor)s')\n LOG.info(msgtmpl,\n {'flavor': flavor.get('name', None)})\n else:\n # NOTE(flaper87): Get pools assigned to the default\n # group `None`. We should consider adding a `default_group`\n # option in the future.\n pools = self._pools_ctrl.get_pools_by_flavor(detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n\n if not pool:\n # NOTE(flaper87): We used to raise NoPoolFound in this\n # case but we've decided to support automatic pool\n # creation. Note that we're now returning and the queue\n # is not being registered in the catalogue. This is done\n # on purpose since no pool exists and the \"dummy\" pool\n # doesn't exist in the storage\n if self.lookup(queue, project) is not None:\n return\n raise errors.NoPoolFound()\n msgtmpl = _(u'register queue to pool: new flavor: None')\n LOG.info(msgtmpl)\n\n msgtmpl = _(u'register queue: project:%(project)s'\n ' queue:%(queue)s pool:%(pool)s')\n LOG.info(msgtmpl,\n {'project': project,\n 'queue': queue,\n 'pool': pool})\n self._catalogue_ctrl.insert(project, queue, pool)", "def deregister(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def get_queue(self, window_info, original_kwargs):\n if callable(self.queue):\n return str(self.queue(self, window_info, original_kwargs))\n return str(self.queue) or settings.CELERY_DEFAULT_QUEUE", "def deregister(self, queue, project=None):\n self._invalidate_cached_id(queue, project)\n self._catalogue_ctrl.delete(project, queue)", "def _shard_id(self, queue, project=None):\n cache_key = _shard_cache_key(queue, project)\n shard_id = self._cache.get(cache_key)\n\n if shard_id is None:\n shard_id = self._catalogue_ctrl.get(project, queue)['shard']\n\n if not self._cache.set(cache_key, shard_id, _SHARD_CACHE_TTL):\n LOG.warn('Failed to cache shard ID')\n\n return shard_id", "def get_queue_by_name(name):\n sqs = boto3.resource('sqs')\n return sqs.get_queue_by_name(QueueName=name)", "def get_queue(self):\n if self.queue is not None:\n return self.queue\n state = self.get_state()\n self.queue = state.get_queue()\n # print(\"IQ\", self.queue)\n return self.queue", "def ctrlqueue_show(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(7), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to get *Num Actions of CrlQueue* check if *Queue* is NOT empty\", e)", "def query_queue(self, queue_name, alt_exchange_name=None):\n return self._query(queue_name, \"queue\", \"org.apache.qpid.broker\", alt_exchange_name)", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def get_queue(self):\n return self.queue", "def get_queue(self):\n return self.queue", "def _ns_queue(self, queue, consumer_id):\n return self._ns(queue, consumer_id, \"messages\")", "def queue(config):\n q_cmd = osp.join(get_condor_bin_dir(config),\n CONDOR_COMMAND['queue'])\n return _simple_command_run([q_cmd, '-global'],\n ignore_output_errors=['All queues are empty',])", "def get_queue(self):\r\n return _channeldata[self.chan].queue", "def getQueue(serverName: str, queueType: str):\n if queueType is \"k\":\n queue = kitchenQueue\n elif queueType is \"b\":\n queue = bathroomQueue\n else:\n raise Exception(\"Incorrect parameters\")\n\n if serverName in queue.keys():\n return queue.get(serverName)\n else:\n queue[serverName] = []\n return queue.get(serverName)", "def get_pid(self, pid):\n for p in self._queue:\n if p.id == pid:\n return p\n else: return 0", "async def _get_work_pool_queue_id_from_name(\n self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str\n ) -> UUID:\n work_pool_queue = await models.workers.read_work_pool_queue_by_name(\n session=session,\n work_pool_name=work_pool_name,\n work_pool_queue_name=work_pool_queue_name,\n )\n if not work_pool_queue:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Worker queue '{work_pool_name}/{work_pool_queue_name}' not found.\",\n )\n\n return work_pool_queue.id", "def clearQueue(self, queue_name, project_id=None):\n if project_id is None:\n project_id = self.project_id\n\n url = \"%sprojects/%s/queues/%s/clear?oauth=%s\" % (self.url, project_id, queue_name, self.token)\n body = self.__post(url)\n return json.loads(body)", "def getQueue():\n dm_plugin_url = f\"https://api.zuri.chat/marketplace/plugins/{PLUGIN_ID}\"\n try:\n response = requests.get(url=dm_plugin_url)\n except requests.exceptions.RequestException as e:\n return e\n if response.status_code == 200:\n return response.json()[\"data\"][\"queue\"]\n else:\n return None", "def get_rabbit_queue():\n\n return \"metrics_queue\"", "def show_qos_queue(self, queue, **_params):\r\n return self.get(self.qos_queue_path % (queue),\r\n params=_params)", "def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None", "def queue(self):\n if self._queue is None:\n qstr = self.query_queue(user=self._user)\n self._queue = self.parse_queue_str(qstr)\n\n return self._queue", "def get_app_queue(client: MarathonClient, app_id: str) -> Optional[MarathonQueueItem]:\n app_id = \"/%s\" % app_id\n app_queue = client.list_queue(embed_last_unused_offers=True)\n for app_queue_item in app_queue:\n if app_queue_item.app.id == app_id:\n return app_queue_item\n return None", "def get_queue_settings(qid):\r\n db = get_db()\r\n rows = query_db(GET_QUEUE_SETTINGS_BY_ID, (qid,))\r\n if (not rows) or (len(rows) == 0):\r\n raise sqlite3.Error('The queue does not exist.')\r\n return rows[0]", "def get_project_name(self, project_id):\n test = \"\"\"SELECT EXISTS(\n SELECT 1\n FROM barcodes.project\n WHERE project_id=%s\n )\"\"\"\n query = \"\"\"SELECT project\n FROM barcodes.project\n WHERE project_id=%s\"\"\"\n\n with self._transaction.cursor() as cur:\n cur.execute(test, [project_id, ])\n if not cur.fetchone()[0]:\n raise NotFound(f\"Project f'{project_id}' not found\")\n else:\n cur.execute(query, [project_id, ])\n return cur.fetchone()[0]", "def get_queue(queue_name=\"\"):\n print(get_qstat_arg(queue_name))\n q = subprocess.Popen(\n _get_qstat_arg(queue_name), stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, stdin=subprocess.PIPE\n )\n o, e = q.communicate()\n\n return o", "def queue(self):\n if self._queue is None:\n qstr = self.query_queue(user=self._user)\n self._queue = self.parse_queue_str(qstr, keys=self.QSTAT_KEYS)\n\n return self._queue", "def ctrlqueue_action_code(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(4), ctypes.c_int32(0))", "def ctrlqueue_action(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(3), ctypes.c_int32(0))", "def QueueId(self):\n\t\treturn self._get_attribute('queueId')", "def service_bus_queue_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def service_bus_queue_endpoint_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_bus_queue_endpoint_id\")", "def get_current_queue(self):\n sycl_queue_val = cgutils.alloca_once(\n self.builder, utils.get_llvm_type(context=self.context, type=types.voidptr)\n )\n fn = DpctlCAPIFnBuilder.get_dpctl_queuemgr_get_current_queue(\n builder=self.builder, context=self.context\n )\n self.builder.store(self.builder.call(fn, []), sycl_queue_val)\n return sycl_queue_val", "def sqs_lookup_url(session, queue_name):\n client = session.client('sqs')\n resp = client.get_queue_url(QueueName=queue_name)\n return resp['QueueUrl']", "def get_short_queue_name(cls):\n if 'long_queue_name' in cls.global_settings:\n return cls.global_settings['short_queue_name']\n else:\n return None", "def get_queue_items(self, queue_name):\n proc = start_proc([\"/usr/bin/sudo\", \"rabbitmqctl\", \"list_queues\"],\n shell=False)\n for line in iter(proc.stdout.readline, \"\"):\n print(\"LIST QUEUES:\" + line)\n m = re.search(r\"%s\\s+([0-9]+)\" % queue_name, line)\n if m:\n return int(m.group(1))\n return None", "def get(self, queue=None, style=None, prettyPrint=None, countryCode=None):\r\n params = base.get_params(None, locals())\r\n params.update(self.lookup)\r\n\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def showqueue(self, irc, msg, args):\n if len(self._queue) == 0:\n irc.reply(\"The queue is empty\", private=True)\n return\n pos = self._find_in_queue(msg.nick)\n if pos < 0:\n irc.reply(\"You're not in the queue, did your nick change?\",\n private=True)\n return\n irc.reply(\"You are queued at position %d\" % (pos + 1), private=True)", "async def read_work_queue(\n self,\n id: UUID,\n ) -> WorkQueue:\n try:\n response = await self._client.get(f\"/work_queues/{id}\")\n except httpx.HTTPStatusError as e:\n if e.response.status_code == status.HTTP_404_NOT_FOUND:\n raise prefect.exceptions.ObjectNotFound(http_exc=e) from e\n else:\n raise\n return WorkQueue.parse_obj(response.json())", "def __getitem__(self, uri):\n\t\t# The queue is empty, so return None\n\t\tif self.qsize() == 0:\n\t\t\treturn\n\n\t\t# Iterate through the queue grabbing a worker, comparing its URI with the one provided\n\t\t# and putting it back if they do not match. Note that this implementation assumes that\n\t\t# all workers are equal and therefore their order does not matter\n\t\tfound = None\n\t\tfor i in range(self.qsize()):\n\t\t\tp = self.get_nowait()\n\t\t\tif p.uri == uri:\n\t\t\t\tfound = p\n\t\t\t\tself._uris.remove(uri)\n\t\t\t\tbreak\n\t\t\tself.put_nowait(p)\n\t\treturn found", "def _find_controller(self, controller):\n if controller is None:\n return None\n # If the output specified is a string controller e.g. \"WelcomeController@show\"\n elif isinstance(controller, str):\n if \"@\" in controller:\n controller_path, controller_method_str = controller.split(\"@\")\n else:\n controller_path = controller\n controller_method_str = \"__call__\"\n\n controller_path = modularize(controller_path).split(\".\")\n if len(controller_path) > 1:\n controller_name = controller_path.pop()\n prefix_path = \".\".join(controller_path)\n else:\n controller_name = controller_path[0]\n prefix_path = \"\"\n # build a list of all locations where the controller can be found\n # if the controller is defined such as auth.WelcomeController, append the prefix path to\n # the locations\n locations = list(\n map(\n lambda loc: f\"{loc}.{removeprefix(prefix_path, loc)}\"\n if prefix_path\n else loc,\n self.controllers_locations,\n )\n )\n try:\n self.controller_class = Loader.find(\n Controller, locations, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # controller is an instance with a bound method\n elif hasattr(controller, \"__self__\"):\n _, controller_method_str = controller.__qualname__.split(\".\")\n self.controller_instance = controller.__self__\n\n # it's a class or class.method, we don't have to find it, just get the class\n elif hasattr(controller, \"__qualname__\"):\n if \".\" in controller.__qualname__:\n controller_name, controller_method_str = controller.__qualname__.split(\n \".\"\n )\n else:\n controller_name = controller.__qualname__\n controller_method_str = \"__call__\"\n\n try:\n self.controller_class = Loader.get_object(\n controller.__module__, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # it's a controller instance\n else:\n self.controller_instance = controller\n controller_method_str = \"__call__\"\n\n # Set the controller method on class. This is a string\n self.controller_method = controller_method_str", "def get_project(db, id):\n \n for element in db:\n if element['project_no'] == id:\n return element\n return None", "def _get_controller(self):\n return self.__controller", "def get_issue(issue_number):\n backend_name = os.environ[\"ISSUE_BACKEND\"]\n backend_module = importlib.import_module(\n \"issuebranch.backends.{}\".format(backend_name)\n )\n\n return getattr(backend_module, \"Backend\")(issue_number)", "def lookForQueueingCommands():\n for queue, binary in queueBinaryMap.items():\n if checkForBinary(binary):\n return queue\n else:\n raise Exception(\"Cannot locate a queueing system. None of these executables were found in your PATH: %s\" % (queueBinaryMap.values(),))", "def get_project(self, name=None):\n if not name:\n name = self.get_project_name()\n projects = self.get_projects()\n for p in projects:\n if p.name == name:\n return p\n raise NotFound(name)", "def get_worker_id_queue():\n global _WORKER_ID_QUEUE\n if _WORKER_ID_QUEUE is None:\n _WORKER_ID_QUEUE = multiprocessing.Queue()\n return _WORKER_ID_QUEUE", "def queue_name(is_parallel):\n return QUEUE_NAMES[int(bool(is_parallel))]", "def get_project(project_id):\n return Project.objects.get(id=project_id)", "def get(id=None, username=None, status=None):\r\n if (id):\r\n qry = ImportQueue.query.filter(ImportQueue.id == id)\r\n elif (username):\r\n qry = ImportQueue.query.filter(ImportQueue.username == username)\r\n\r\n if status is not None:\r\n qry = qry.filter(ImportQueue.status == status)\r\n\r\n return qry.first()" ]
[ "0.77063626", "0.7698841", "0.76416427", "0.7288941", "0.6930836", "0.6912205", "0.60668814", "0.606344", "0.60406834", "0.594424", "0.5849223", "0.5683665", "0.5642507", "0.560627", "0.559546", "0.55402505", "0.55328107", "0.5492972", "0.54287374", "0.5398488", "0.539153", "0.53624433", "0.53489435", "0.5328278", "0.5277877", "0.5277877", "0.5277877", "0.5277877", "0.5277877", "0.5277877", "0.5277877", "0.5277877", "0.5277877", "0.5277877", "0.5277877", "0.5277877", "0.5277877", "0.5277877", "0.5277877", "0.5258311", "0.5249762", "0.52148193", "0.5211566", "0.52013606", "0.5200739", "0.51983833", "0.519788", "0.51896423", "0.5187592", "0.51841086", "0.5163718", "0.51564467", "0.5152412", "0.5152412", "0.5152412", "0.5152412", "0.5152412", "0.5152412", "0.5132529", "0.5132529", "0.5111543", "0.50937724", "0.5090339", "0.5076954", "0.5064561", "0.50552243", "0.5048265", "0.50362146", "0.5033193", "0.5010804", "0.50107133", "0.4990466", "0.4977783", "0.49440435", "0.49236965", "0.49235427", "0.49178493", "0.49164397", "0.48924512", "0.4885478", "0.48732942", "0.48732942", "0.48691857", "0.48681182", "0.48566985", "0.48326206", "0.48177734", "0.48152295", "0.47905913", "0.47590256", "0.47551882", "0.47550273", "0.47341383", "0.4724977", "0.47243264", "0.4723923", "0.47210047", "0.47191674", "0.4713838", "0.47089922" ]
0.86451554
0
Lookup the message controller for the given queue and project.
def get_message_controller(self, queue, project=None): target = self.lookup(queue, project) return target and target.message_controller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queue_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.queue_controller", "def get_claim_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.claim_controller", "def get_subscription_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.subscription_controller", "def lookup(self, queue, project=None):\n\n try:\n shard_id = self._shard_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n # NOTE(kgriffs): Return `None`, rather than letting the\n # exception bubble up, so that the higher layer doesn't\n # have to duplicate the try..except..log code all over\n # the place.\n return None\n\n return self.get_driver(shard_id)", "def get_topic_controller(self, topic, project=None):\n target = self.lookup(topic, project)\n return target and target.topic_controller", "def lookup(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Raise an exception if the queue\n # does not have a mapping (it does not exist).\n\n # TODO(kgriffs): SHARDING - Get ID from the catalog backend\n shard_id = '[insert_id]'\n try:\n shard = self._shards[shard_id]\n except KeyError:\n self._shards[shard_id] = shard = self._init_shard(shard_id)\n\n return shard", "def lookup(self, queue, project=None):\n\n try:\n pool_id = self._pool_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n return self.get_default_pool(use_listing=False)\n\n return self.get_driver(pool_id)", "def getQueueDetails(self, queue_name, project_id=\"\"):\n if project_id == \"\":\n project_id = self.project_id\n url = \"%sprojects/%s/queues/%s?oauth=%s\" % (self.url, project_id,\n queue_name, self.token)\n body = self.__get(url)\n queue = json.loads(body)\n return queue", "def find(self, task_id):\n for task_obj in self.queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in queue: '{}'\".format(task_id))", "def getMessage(self, queue_name, max=None, project_id=None):\n if project_id is None:\n project_id = self.project_id\n n = \"\"\n if max is not None:\n n = \"&n=%s\" % max\n url = \"%sprojects/%s/queues/%s/messages?oauth=%s%s\" % (self.url,\n project_id, queue_name, self.token, n)\n body = self.__get(url)\n\n return json.loads(body)", "async def get_queue(self, ctx: commands.Context) -> Optional[QueueManager]:\n\n return self.queue[ctx.guild.id]", "def get_queue(self, task_name):\n for name, queue in self.queues.items():\n if task_name in queue:\n return name\n return self.default_queue", "def _find_controller(self, controller):\n if controller is None:\n return None\n # If the output specified is a string controller e.g. \"WelcomeController@show\"\n elif isinstance(controller, str):\n if \"@\" in controller:\n controller_path, controller_method_str = controller.split(\"@\")\n else:\n controller_path = controller\n controller_method_str = \"__call__\"\n\n controller_path = modularize(controller_path).split(\".\")\n if len(controller_path) > 1:\n controller_name = controller_path.pop()\n prefix_path = \".\".join(controller_path)\n else:\n controller_name = controller_path[0]\n prefix_path = \"\"\n # build a list of all locations where the controller can be found\n # if the controller is defined such as auth.WelcomeController, append the prefix path to\n # the locations\n locations = list(\n map(\n lambda loc: f\"{loc}.{removeprefix(prefix_path, loc)}\"\n if prefix_path\n else loc,\n self.controllers_locations,\n )\n )\n try:\n self.controller_class = Loader.find(\n Controller, locations, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # controller is an instance with a bound method\n elif hasattr(controller, \"__self__\"):\n _, controller_method_str = controller.__qualname__.split(\".\")\n self.controller_instance = controller.__self__\n\n # it's a class or class.method, we don't have to find it, just get the class\n elif hasattr(controller, \"__qualname__\"):\n if \".\" in controller.__qualname__:\n controller_name, controller_method_str = controller.__qualname__.split(\n \".\"\n )\n else:\n controller_name = controller.__qualname__\n controller_method_str = \"__call__\"\n\n try:\n self.controller_class = Loader.get_object(\n controller.__module__, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # it's a controller instance\n else:\n self.controller_instance = controller\n controller_method_str = \"__call__\"\n\n # Set the controller method on class. This is a string\n self.controller_method = controller_method_str", "def get_project(self, project):\n project_name = project\n\n try:\n # FIXME: project should be an integer or str, no both\n project_id = int(project)\n except ValueError:\n project_id = None\n\n try:\n # Find the first project occurrence\n project_found = next(p for p in self.get_projects() if p[\"id\"] == project_id\n or p[\"name\"] == project_name)\n # FIXME: use namedtuple instead? create a self.project = dict()?\n self.project_name = project_found[\"name\"]\n self.project_id = project_found[\"id\"]\n self.project_address = \"projects/%s/\" % self.project_id\n except StopIteration:\n logger.error(\"Project %s not found\" % project)\n raise KeyError", "def _ns_queue(self, queue, consumer_id):\n return self._ns(queue, consumer_id, \"messages\")", "def _get_controller(self):\n return self.__controller", "def find(self, task_id):\n for task_obj in self._queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in dorm: '{}'\".format(task_id))", "def queue_path(self, project, location, queue):\n # This is value is not actually used, but it might be good for debugging.\n return \"projects/{project}/locations/{location}/queues/{queue}\".format(\n project=project, location=location, queue=queue)", "def get_message_from_queue(self):\n message = None, None\n\n try:\n message = self.queue.get(block=True, timeout=3)\n except Empty:\n self.fail(msg='Queue get() failed empty')\n\n return message", "def controller( self ):\n\t\ttry:\n\t\t\treturn self._controller\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"controller: %s\" % str(e) )", "def get_controller(cls):\n if not cls.hnd:\n raise Exception('A handler is to be set for getting contoller.')\n if not cls.controller:\n cls.controller = cls.config.controller_class(cls.hnd)\n cls.session = cls.controller.session\n return cls.controller", "def get_pid(self, pid):\n for p in self._queue:\n if p.id == pid:\n return p\n else: return 0", "def __find_notification_channel_name_in_message(message):\n channel_name = [t for t in message.split() if t.startswith('[projects')]\n return channel_name[0].translate(None, '[].')", "def magma_queue_get_device(queue):\n\n return _libmagma.magma_queue_get_device(queue)", "def get_obj(self, name):\r\n val = self.get(name)\r\n if not val:\r\n return None\r\n if name.find('queue') >= 0:\r\n obj = boto.lookup('sqs', val)\r\n if obj:\r\n obj.set_message_class(ServiceMessage)\r\n elif name.find('bucket') >= 0:\r\n obj = boto.lookup('s3', val)\r\n elif name.find('domain') >= 0:\r\n obj = boto.lookup('sdb', val)\r\n else:\r\n obj = None\r\n return obj", "def _ns_message(self, queue, message_id):\n return self._ns(queue, \"messages\", message_id)", "def get_project_name(self, project_id):\n test = \"\"\"SELECT EXISTS(\n SELECT 1\n FROM barcodes.project\n WHERE project_id=%s\n )\"\"\"\n query = \"\"\"SELECT project\n FROM barcodes.project\n WHERE project_id=%s\"\"\"\n\n with self._transaction.cursor() as cur:\n cur.execute(test, [project_id, ])\n if not cur.fetchone()[0]:\n raise NotFound(f\"Project f'{project_id}' not found\")\n else:\n cur.execute(query, [project_id, ])\n return cur.fetchone()[0]", "def get_obj(self, name):\n val = self.get(name)\n if not val:\n return None\n if name.find('queue') >= 0:\n obj = boto.lookup('sqs', val)\n if obj:\n obj.set_message_class(ServiceMessage)\n elif name.find('bucket') >= 0:\n obj = boto.lookup('s3', val)\n elif name.find('domain') >= 0:\n obj = boto.lookup('sdb', val)\n else:\n obj = None\n return obj", "def get_controller_func(controller):\n\n if controller in CONTROLLERS:\n return CONTROLLERS[controller]\n\n return None", "def get_queue_num(self, qos_id, queue_id):\n\n q_num = None\n queues = self.qos_dict[qos_id][\"ovsdb:qos-entries\"][0][\"queue-list\"]\n\n # Go through all queues\n for queue in queues:\n cur_queue_id = queue[\"queue-ref\"].split(\"'\")[-2]\n # If we have a match, get the q_num and break\n if cur_queue_id == queue_id:\n q_num = queue[\"queue-number\"]\n break\n\n # queue_id is not found in the qos\n if q_num is None:\n #print(json.dumps(self.qos_dict[qos_id], indent=3))\n raise KeyError\n\n return q_num", "def get_controller_doc(self, controller_name: str) -> ControllerDoc:\n if controller_name not in self.controller_docs:\n raise KeyError(f\"Controller {controller_name} not found\")\n\n return self.controller_docs[controller_name]", "def find_queue(queue):\n athena_queue = canonicalize_queue(queue)\n # If a queue isn't an Athena queue, punt straight to the default\n # CUPS server\n if not athena_queue:\n return SYSTEM_CUPS, None, queue\n queue = athena_queue\n\n # Get rid of any instance on the queue name\n # TODO The purpose of instances is to have different sets of default\n # options. Queues may also have default options on the null\n # instance. Figure out if we need to do anything about them\n queue = queue.split('/')[0]\n\n # If we're still here, the queue is definitely an Athena print\n # queue; it was either in the local cupsd pointing to Athena, or the\n # local cupsd didn't know about it.\n # Figure out what Athena thinks the backend server is, and whether\n # that server is running a cupsd; if not, fall back to LPRng\n\n rm = get_hesiod_print_server(queue)\n if not rm:\n # In the unlikely event we're wrong about it being an Athena\n # print queue, the local cupsd is good enough\n return SYSTEM_CUPS, None, queue\n\n # Give up and return rm and queue. If it's not running a cupsd,\n # too bad. It's not our job to check whether cupsd is running.\n return SYSTEM_CUPS, rm, queue", "def get_current_controller():\n controllers = parse_yaml_file(JUJU_CONTROLLERS_YAML)\n return controllers.get(\"current-controller\", \"\")", "def get(queue_name: str, **kwargs) -> Queue:\n return Queue(queue_name, **kwargs)", "def get_controller_id_for_topic(self, topic_name):\n request = MetadataRequest_v1(topics=[topic_name])\n response = self.send_request_and_get_response(request)\n return response.controller_id", "def ctrlqueue_device_handle(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(5), ctypes.c_int32(0))", "def get_object(self):\n # read the URL data values into variables\n astronaut_pk = self.kwargs['astronaut_pk']\n message_pk = self.kwargs['message_pk']\n\n # find the SendMessage object, and return it\n st_cfh = SendMessage.objects.get(pk=message_pk)\n return st_cfh", "def get_project(con):\n try:\n return con.project_read(fq_name=conf.get('default_project', 'UNEXPECTED_VALUE'))\n except:\n log.debug('Unable to find project default-domain, admin:', exc_info=True)\n return None", "def deregister(self, queue, project=None):\n self._catalogue_ctrl.delete(project, queue)", "def getController(self):\n return self.__controller", "def get_project(self, name=None):\n if not name:\n name = self.get_project_name()\n projects = self.get_projects()\n for p in projects:\n if p.name == name:\n return p\n raise NotFound(name)", "def get(self, name):\n try:\n return self.projects[name]\n except KeyError:\n print(\"No project called %s was found\" %name)", "def _dispatch(req):\n match = req.environ['wsgiorg.routing_args'][1]\n if not match:\n return webob.exc.HTTPNotFound()\n app = match['controller']\n return app", "def searchClientProject(self, name):\n for client in self.getClients():\n try:\n for project in self.getClientProjects(client['id']):\n if project['name'] == name:\n return project\n except Exception:\n continue\n\n print('Could not find client by the name')\n return None", "def get_project(self, project_name):\n raise self._get_notimplementederror(\"get_project\")", "def search_queue_number(self, Q_strip):\n if Q_strip is self.PF_Q_strip:\n out = self.dut.send_expect(\"cat config/common_base\", \"]# \", 10)\n pattern = \"(%s=)(\\d*)\" % Q_strip\n else :\n out = self.dut.send_expect(\"cat drivers/net/i40e/i40e_ethdev.c\", \"]# \", 10)\n pattern = \"#define %s\\s*(\\d*)\" % Q_strip\n s = re.compile(pattern)\n res = s.search(out)\n if res is None:\n print utils.RED('Search no queue number.')\n return None\n else:\n if Q_strip is self.VF_Q_strip:\n queue = res.group(1)\n else :\n queue = res.group(2)\n return int(queue)", "def get_project(project_id):\n return Project.objects.get(id=project_id)", "def GetApiMessage(message_name):\n messages = apis.GetMessagesModule(_BQ_API, _BQ_API_VERSION)\n return getattr(messages, message_name)", "def get_message(cls):\n rp = cls.get()\n try:\n message = rp.queue_send.get_nowait()\n except Exception:\n return None\n\n return message", "def find_project(self, value, key=\"name\"):\n if not value:\n return\n if key.lower() not in (\"name\", \"id\"):\n raise ValueError()\n\n if key == \"name\" and not getattr(self, \"projects\", None):\n self.get_projects()\n elif key == \"id\" and not getattr(self, \"projects\", None):\n return self.get_project(value)\n\n try:\n if key.lower() == \"name\":\n return self.projects[self._project_indices_by_name[value]]\n elif key.lower() == \"id\":\n return self.projects[self._project_indices_by_id[value]]\n except KeyError:\n self.logger.debug(\"Project {}: {} not found\".format(key, value))", "def get_project(self, project_id):\n res = self.conn.cursor().execute(\"SELECT * FROM projects where id=?\", (project_id,))\n return res.fetchone()", "def get_registered_controller(self, model):\n return self._registry[model]", "async def _get_work_pool_queue_id_from_name(\n self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str\n ) -> UUID:\n work_pool_queue = await models.workers.read_work_pool_queue_by_name(\n session=session,\n work_pool_name=work_pool_name,\n work_pool_queue_name=work_pool_queue_name,\n )\n if not work_pool_queue:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Worker queue '{work_pool_name}/{work_pool_queue_name}' not found.\",\n )\n\n return work_pool_queue.id", "def get_queue_number(self):\n outstring = self.dut.send_expect(\"stop\", \"testpmd> \")\n time.sleep(2)\n result_scanner = r\"Forward Stats for RX Port= %s/Queue=\\s?([0-9]+)\" % self.dut_ports[0]\n scanner = re.compile(result_scanner, re.DOTALL)\n m = scanner.search(outstring)\n queue_id = m.group(1)\n print \"queue is %s\" % queue_id\n self.dut.send_expect(\"start\", \"testpmd> \")\n return queue_id", "def _dispatch(req):\n match = req.environ['wsgiorg.routing_args'][1]\n if not match:\n msg = ('(%(url)s): The resource could not be found.' %\n {'url': req.url})\n return render_exception(exception.NotFound(msg))\n app = match['controller']\n return app", "def get_project(project):\n command = 'openstack project show %s' % project\n try:\n project_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n except:\n print \"Project '%s' not found.\" % project\n sys.exit(-1)\n return project_info", "def get_thread_for_message(id):\n query = 'SELECT thread_id from messages WHERE id like %s'\n return __perform__(query, (id,), method='fetchone')", "def queue_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"queue_name\")", "def show_controller(cls, args, config):\n if len(args) == 0:\n raise MOLNSException(\"USAGE: molns controller show name\")\n return {'msg': str(config.get_object(name=args[0], kind='Controller'))}", "def _pool_id(self, queue, project=None):\n return self._catalogue_ctrl.get(project, queue)['pool']", "def getClientProject(self, clientName, projectName):\n for client in self.getClients():\n if client['name'] == clientName:\n cid = client['id']\n\n if not cid:\n print('Could not find such client name')\n return None\n\n for projct in self.getClientProjects(cid):\n if projct['name'] == projectName:\n pid = projct['id']\n\n if not pid:\n print('Could not find such project name')\n return None\n\n return self.getProject(pid)", "def get_project(self, name=None):\n if not name:\n if not self.select_project:\n log.error(\"no default project name specified\")\n return\n name = self.select_project\n\n if name in self.projects:\n return self.projects[name]\n\n log.debug( \"project {} not found in {} projects \".format(name, len(self.projects)) )\n return None", "def ctrlqueue_show(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(7), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to get *Num Actions of CrlQueue* check if *Queue* is NOT empty\", e)", "def register(self, queue, project=None):\n # NOTE(cpp-cabrera): only register a queue if the entry\n # doesn't exist\n if not self._catalogue_ctrl.exists(project, queue):\n # NOTE(cpp-cabrera): limit=0 implies unlimited - select from\n # all shards\n shard = select.weighted(self._shards_ctrl.list(limit=0))\n\n if not shard:\n raise errors.NoShardFound()\n\n self._catalogue_ctrl.insert(project, queue, shard['name'])", "def get_current_project():\n return get_from_session(KEY_PROJECT)", "def get_queue(self, window_info, original_kwargs):\n if callable(self.queue):\n return str(self.queue(self, window_info, original_kwargs))\n return str(self.queue) or settings.CELERY_DEFAULT_QUEUE", "def get_message_class_by_type(msgtype):\n\n try:\n module = importlib.import_module('platypush.message.' + msgtype)\n except ImportError as e:\n logging.warning('Unsupported message type {}'.format(msgtype))\n raise RuntimeError(e)\n\n cls_name = msgtype[0].upper() + msgtype[1:]\n\n try:\n msgclass = getattr(module, cls_name)\n except AttributeError as e:\n logging.warning('No such class in {}: {}'.format(\n module.__name__, cls_name))\n raise RuntimeError(e)\n\n return msgclass", "def controller(self): # type: () -> ControllerHostConfig\n return self.host_settings.controller", "def clearQueue(self, queue_name, project_id=None):\n if project_id is None:\n project_id = self.project_id\n\n url = \"%sprojects/%s/queues/%s/clear?oauth=%s\" % (self.url, project_id, queue_name, self.token)\n body = self.__post(url)\n return json.loads(body)", "def get_queue_by_name(name):\n sqs = boto3.resource('sqs')\n return sqs.get_queue_by_name(QueueName=name)", "def register(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def get_project(db, id):\n \n for element in db:\n if element['project_no'] == id:\n return element\n return None", "def show(self, req, id):\n context = req.environ['manila.context']\n\n try:\n message = self.message_api.get(context, id)\n except exception.MessageNotFound as error:\n raise exc.HTTPNotFound(explanation=error.msg)\n\n return self._view_builder.detail(req, message)", "def deregister(self, queue, project=None):\n self._invalidate_cached_id(queue, project)\n self._catalogue_ctrl.delete(project, queue)", "def getQueue():\n dm_plugin_url = f\"https://api.zuri.chat/marketplace/plugins/{PLUGIN_ID}\"\n try:\n response = requests.get(url=dm_plugin_url)\n except requests.exceptions.RequestException as e:\n return e\n if response.status_code == 200:\n return response.json()[\"data\"][\"queue\"]\n else:\n return None", "def process_project(self, project_name):\n self.logging.debug('Retrieving project %s..', project_name)\n\n try:\n project = self.get_lp_client().projects[project_name]\n except KeyError:\n self.logging.error(\n \"Project %s wasn't found. Skipped..\",\n project_name\n )\n else:\n if project:\n self.logging.debug(\n 'Retrieving active milestone %s..',\n self.get_new_milestone_name()\n )\n\n new_milestone = project.getMilestone(\n name=self.get_new_milestone_name()\n )\n self.get_stats()[project.name] = {}\n\n for old_milestone_name in self.get_old_milestone_names():\n if self.is_limit_achived():\n break\n\n self.process_milestone_on_project(\n project, old_milestone_name, new_milestone\n )\n\n else:\n self.logging.debug(\n \"Project %s wasn't found. Skipped..\",\n project_name\n )", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def ctrlqueue_action_code(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(4), ctypes.c_int32(0))", "def findPluginFromTrigger(self, trigger):\n\t\ttrigger = trigger.lower() # lowercase!\n\t\t\n\t\t# Loop through all plugins.\n\t\tfor plugin_name in self.plugins:\n\t\t\tplugin = self.getPlugin(plugin_name)\n\t\t\t\n\t\t\t# Check if the plugin has that trigger.\n\t\t\tif plugin.hasCommand(trigger):\n\t\t\t\treturn plugin_name\n\t\t\n\t\t# Not found :(\n\t\treturn None", "def get_issue(issue_number):\n backend_name = os.environ[\"ISSUE_BACKEND\"]\n backend_module = importlib.import_module(\n \"issuebranch.backends.{}\".format(backend_name)\n )\n\n return getattr(backend_module, \"Backend\")(issue_number)", "def get_queue(self):\r\n return _channeldata[self.chan].queue", "def get_project(self):\n raise NotImplementedError(\"get_project is not implemented\")", "def get_controller(equipment, accessmethod, logfile=None):\n path = _CONTROLLERMAP[accessmethod]\n constructor = module.get_object(path)\n return constructor(equipment, logfile)", "def support_queue(self, queue_id):\r\n return support_queues.SupportQueue(self, queue_id)", "def find_project_by_id(self, project_id):\n try:\n with open('{}/{}'.format(self._storage_location, project_id)) as project_file:\n name = project_file.readline().rstrip('\\n')\n description = project_file.readline().rstrip('\\n')\n members = project_file.readline().rstrip('\\n').split(',')\n documents = project_file.readline().rstrip('\\n').split(',')\n project = Project(name, description, members, documents)\n except:\n raise ValueError(\"No such project\")\n return project", "def _shard_id(self, queue, project=None):\n cache_key = _shard_cache_key(queue, project)\n shard_id = self._cache.get(cache_key)\n\n if shard_id is None:\n shard_id = self._catalogue_ctrl.get(project, queue)['shard']\n\n if not self._cache.set(cache_key, shard_id, _SHARD_CACHE_TTL):\n LOG.warn('Failed to cache shard ID')\n\n return shard_id" ]
[ "0.80495954", "0.73924774", "0.73809624", "0.6577108", "0.6438663", "0.6041202", "0.5956915", "0.5211523", "0.5192648", "0.5166102", "0.5160857", "0.5156492", "0.5145304", "0.51198655", "0.51061904", "0.50889635", "0.49702215", "0.49656522", "0.49377242", "0.49108028", "0.4877074", "0.4871782", "0.4865142", "0.4850226", "0.48427004", "0.4820803", "0.4807853", "0.48022175", "0.47915515", "0.47628483", "0.47456834", "0.47415808", "0.4736486", "0.47153962", "0.4706798", "0.47006172", "0.46787837", "0.46666265", "0.46634918", "0.46571285", "0.46492067", "0.46469486", "0.46399045", "0.46354756", "0.46345502", "0.46338356", "0.46331784", "0.46030805", "0.459231", "0.4591029", "0.45820606", "0.4572108", "0.45671323", "0.45661882", "0.45553812", "0.4547761", "0.4546366", "0.4545206", "0.4530519", "0.45246986", "0.45215943", "0.45138147", "0.4509467", "0.44847766", "0.44841892", "0.44822854", "0.44719744", "0.4459616", "0.44583696", "0.44573224", "0.4457278", "0.4450984", "0.44477892", "0.4444918", "0.44445452", "0.44397825", "0.44342345", "0.44342345", "0.44342345", "0.44342345", "0.44342345", "0.44342345", "0.44342345", "0.44342345", "0.44342345", "0.44342345", "0.44342345", "0.44342345", "0.44342345", "0.44342345", "0.44342345", "0.44222242", "0.44190606", "0.4416369", "0.4414844", "0.44108084", "0.43987778", "0.43965757", "0.4391886", "0.4387389" ]
0.8593036
0
Lookup the claim controller for the given queue and project.
def get_claim_controller(self, queue, project=None): target = self.lookup(queue, project) return target and target.claim_controller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queue_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.queue_controller", "def get_subscription_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.subscription_controller", "def get_message_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.message_controller", "def get_topic_controller(self, topic, project=None):\n target = self.lookup(topic, project)\n return target and target.topic_controller", "def lookup(self, queue, project=None):\n\n try:\n shard_id = self._shard_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n # NOTE(kgriffs): Return `None`, rather than letting the\n # exception bubble up, so that the higher layer doesn't\n # have to duplicate the try..except..log code all over\n # the place.\n return None\n\n return self.get_driver(shard_id)", "def lookup(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Raise an exception if the queue\n # does not have a mapping (it does not exist).\n\n # TODO(kgriffs): SHARDING - Get ID from the catalog backend\n shard_id = '[insert_id]'\n try:\n shard = self._shards[shard_id]\n except KeyError:\n self._shards[shard_id] = shard = self._init_shard(shard_id)\n\n return shard", "def lookup(self, queue, project=None):\n\n try:\n pool_id = self._pool_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n return self.get_default_pool(use_listing=False)\n\n return self.get_driver(pool_id)", "def _get_controller(self):\n return self.__controller", "def get_queue_num(self, qos_id, queue_id):\n\n q_num = None\n queues = self.qos_dict[qos_id][\"ovsdb:qos-entries\"][0][\"queue-list\"]\n\n # Go through all queues\n for queue in queues:\n cur_queue_id = queue[\"queue-ref\"].split(\"'\")[-2]\n # If we have a match, get the q_num and break\n if cur_queue_id == queue_id:\n q_num = queue[\"queue-number\"]\n break\n\n # queue_id is not found in the qos\n if q_num is None:\n #print(json.dumps(self.qos_dict[qos_id], indent=3))\n raise KeyError\n\n return q_num", "def magma_queue_get_device(queue):\n\n return _libmagma.magma_queue_get_device(queue)", "def getQueueDetails(self, queue_name, project_id=\"\"):\n if project_id == \"\":\n project_id = self.project_id\n url = \"%sprojects/%s/queues/%s?oauth=%s\" % (self.url, project_id,\n queue_name, self.token)\n body = self.__get(url)\n queue = json.loads(body)\n return queue", "def get_controller_func(controller):\n\n if controller in CONTROLLERS:\n return CONTROLLERS[controller]\n\n return None", "def deregister(self, queue, project=None):\n self._catalogue_ctrl.delete(project, queue)", "def get_project(arn=None):\n pass", "def deregister(self, queue, project=None):\n self._invalidate_cached_id(queue, project)\n self._catalogue_ctrl.delete(project, queue)", "def get_project(self, project):\n project_name = project\n\n try:\n # FIXME: project should be an integer or str, no both\n project_id = int(project)\n except ValueError:\n project_id = None\n\n try:\n # Find the first project occurrence\n project_found = next(p for p in self.get_projects() if p[\"id\"] == project_id\n or p[\"name\"] == project_name)\n # FIXME: use namedtuple instead? create a self.project = dict()?\n self.project_name = project_found[\"name\"]\n self.project_id = project_found[\"id\"]\n self.project_address = \"projects/%s/\" % self.project_id\n except StopIteration:\n logger.error(\"Project %s not found\" % project)\n raise KeyError", "def _find_controller(self, controller):\n if controller is None:\n return None\n # If the output specified is a string controller e.g. \"WelcomeController@show\"\n elif isinstance(controller, str):\n if \"@\" in controller:\n controller_path, controller_method_str = controller.split(\"@\")\n else:\n controller_path = controller\n controller_method_str = \"__call__\"\n\n controller_path = modularize(controller_path).split(\".\")\n if len(controller_path) > 1:\n controller_name = controller_path.pop()\n prefix_path = \".\".join(controller_path)\n else:\n controller_name = controller_path[0]\n prefix_path = \"\"\n # build a list of all locations where the controller can be found\n # if the controller is defined such as auth.WelcomeController, append the prefix path to\n # the locations\n locations = list(\n map(\n lambda loc: f\"{loc}.{removeprefix(prefix_path, loc)}\"\n if prefix_path\n else loc,\n self.controllers_locations,\n )\n )\n try:\n self.controller_class = Loader.find(\n Controller, locations, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # controller is an instance with a bound method\n elif hasattr(controller, \"__self__\"):\n _, controller_method_str = controller.__qualname__.split(\".\")\n self.controller_instance = controller.__self__\n\n # it's a class or class.method, we don't have to find it, just get the class\n elif hasattr(controller, \"__qualname__\"):\n if \".\" in controller.__qualname__:\n controller_name, controller_method_str = controller.__qualname__.split(\n \".\"\n )\n else:\n controller_name = controller.__qualname__\n controller_method_str = \"__call__\"\n\n try:\n self.controller_class = Loader.get_object(\n controller.__module__, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # it's a controller instance\n else:\n self.controller_instance = controller\n controller_method_str = \"__call__\"\n\n # Set the controller method on class. This is a string\n self.controller_method = controller_method_str", "def get_queue(self, task_name):\n for name, queue in self.queues.items():\n if task_name in queue:\n return name\n return self.default_queue", "def record_get_for_project(project_id, deleted=False, session=None):\n session = session or get_session()\n result = session.query(models.ProjectAccountRecord).\\\n filter_by(project_id=project_id).\\\n filter_by(deleted=deleted).\\\n first()\n\n if not result:\n raise exception.ProjectRecordNotFound()\n\n return result", "def get_controller(cls):\n if not cls.hnd:\n raise Exception('A handler is to be set for getting contoller.')\n if not cls.controller:\n cls.controller = cls.config.controller_class(cls.hnd)\n cls.session = cls.controller.session\n return cls.controller", "def get_current_controller():\n controllers = parse_yaml_file(JUJU_CONTROLLERS_YAML)\n return controllers.get(\"current-controller\", \"\")", "def get_controller_id_for_topic(self, topic_name):\n request = MetadataRequest_v1(topics=[topic_name])\n response = self.send_request_and_get_response(request)\n return response.controller_id", "def get_project(projectname):\n return jsonify(admin.get_project_info(current_app.scoped_session(), projectname))", "def get_project(self):\n project_id = self.kwargs['project_id']\n try:\n project = Project.objects.get(pk=project_id)\n except ObjectDoesNotExist:\n raise ObjectNotFound('Not found')\n contributors = CustomUser.objects.filter(contributor__project=project.pk)\n if self.request.user not in contributors:\n raise ObjectNotFound('Not found')\n return project", "def get_project(self):\n project_id = self.kwargs['project_id']\n try:\n project = Project.objects.get(pk=project_id)\n except ObjectDoesNotExist:\n raise ObjectNotFound('Not found')\n contributors = CustomUser.objects.filter(contributor__project=project.pk)\n if self.request.user not in contributors:\n raise ObjectNotFound('Not found')\n return project", "def get_project(con):\n try:\n return con.project_read(fq_name=conf.get('default_project', 'UNEXPECTED_VALUE'))\n except:\n log.debug('Unable to find project default-domain, admin:', exc_info=True)\n return None", "def get_project_specific(self, project_format='id'):\n if self.api_version == 2:\n return self.creds.get('tenant_%s' % project_format)\n else:\n return self.creds.get('project_%s' % project_format)", "def get_controller(equipment, accessmethod, logfile=None):\n path = _CONTROLLERMAP[accessmethod]\n constructor = module.get_object(path)\n return constructor(equipment, logfile)", "def get_project(project_id):\n return Project.objects.get(id=project_id)", "def _shard_id(self, queue, project=None):\n cache_key = _shard_cache_key(queue, project)\n shard_id = self._cache.get(cache_key)\n\n if shard_id is None:\n shard_id = self._catalogue_ctrl.get(project, queue)['shard']\n\n if not self._cache.set(cache_key, shard_id, _SHARD_CACHE_TTL):\n LOG.warn('Failed to cache shard ID')\n\n return shard_id", "def getController(self):\n return self.__controller", "def register(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def get_issue(issue_number):\n backend_name = os.environ[\"ISSUE_BACKEND\"]\n backend_module = importlib.import_module(\n \"issuebranch.backends.{}\".format(backend_name)\n )\n\n return getattr(backend_module, \"Backend\")(issue_number)", "def register(self, queue, project=None):\n # NOTE(cpp-cabrera): only register a queue if the entry\n # doesn't exist\n if not self._catalogue_ctrl.exists(project, queue):\n # NOTE(cpp-cabrera): limit=0 implies unlimited - select from\n # all shards\n shard = select.weighted(self._shards_ctrl.list(limit=0))\n\n if not shard:\n raise errors.NoShardFound()\n\n self._catalogue_ctrl.insert(project, queue, shard['name'])", "def get_customer_id_from_project(context):\n project_id = context.current_parameters.get('project_id')\n project = Project.get(project_id)\n return project.customer_id", "def ctrlqueue_device_handle(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(5), ctypes.c_int32(0))", "def get_project(db, id):\n \n for element in db:\n if element['project_no'] == id:\n return element\n return None", "def get_pid(self, pid):\n for p in self._queue:\n if p.id == pid:\n return p\n else: return 0", "def get_controller(cls, args, config):\n logging.debug(\"MOLNSController.get_controller(args={0})\".format(args))\n controller_obj = cls._get_controllerobj(args, config)\n if controller_obj is None:\n return\n\n if controller_obj.provider.type == constants.Constants.DockerProvider:\n raise NotImplementedError(\"DockerController does not support this feature yet.\")\n\n # Check if any instances are assigned to this controller\n instance_list = config.get_controller_instances(controller_id=controller_obj.id)\n\n # Check if they are running\n ip = None\n if len(instance_list) > 0:\n for i in instance_list:\n status = controller_obj.get_instance_status(i)\n logging.debug(\"instance={0} has status={1}\".format(i, status))\n if status == controller_obj.STATUS_RUNNING:\n ip = i.ip_address\n if ip is None:\n print \"No active instance for this controller\"\n return\n cmd = ['/usr/bin/scp','-oStrictHostKeyChecking=no','-oUserKnownHostsFile=/dev/null','-i',\n controller_obj.provider.sshkeyfilename(), 'ubuntu@{0}:{1}'.format(ip, args[1]), '.']\n print \" \".join(cmd)\n subprocess.call(cmd)\n print \"SSH process completed\"", "def find(self, task_id):\n for task_obj in self.queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in queue: '{}'\".format(task_id))", "def _pool_id(self, queue, project=None):\n return self._catalogue_ctrl.get(project, queue)['pool']", "async def get_queue(self, ctx: commands.Context) -> Optional[QueueManager]:\n\n return self.queue[ctx.guild.id]", "def get_project(self):\n project_id = self.kwargs['project_id']\n try:\n project = Project.objects.get(pk=project_id)\n except ObjectDoesNotExist:\n raise ObjectNotFound('Not found')\n self.contributors = CustomUser.objects.filter(contributor__project=project.pk)\n if self.request.user not in self.contributors:\n raise ObjectNotFound('Not found')\n return project", "def get_project(project):\n command = 'openstack project show %s' % project\n try:\n project_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n except:\n print \"Project '%s' not found.\" % project\n sys.exit(-1)\n return project_info", "def controller( self ):\n\t\ttry:\n\t\t\treturn self._controller\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"controller: %s\" % str(e) )", "def request_project_by_key(cfg, project_key):\n\n url = cjm.request.make_cj_url(cfg, \"project\", project_key)\n response = cjm.request.make_cj_request(cfg, url)\n return response.json()", "def _dispatch(req):\n match = req.environ['wsgiorg.routing_args'][1]\n if not match:\n return webob.exc.HTTPNotFound()\n app = match['controller']\n return app", "def get_current_project():\n return get_from_session(KEY_PROJECT)", "def find_queue(queue):\n athena_queue = canonicalize_queue(queue)\n # If a queue isn't an Athena queue, punt straight to the default\n # CUPS server\n if not athena_queue:\n return SYSTEM_CUPS, None, queue\n queue = athena_queue\n\n # Get rid of any instance on the queue name\n # TODO The purpose of instances is to have different sets of default\n # options. Queues may also have default options on the null\n # instance. Figure out if we need to do anything about them\n queue = queue.split('/')[0]\n\n # If we're still here, the queue is definitely an Athena print\n # queue; it was either in the local cupsd pointing to Athena, or the\n # local cupsd didn't know about it.\n # Figure out what Athena thinks the backend server is, and whether\n # that server is running a cupsd; if not, fall back to LPRng\n\n rm = get_hesiod_print_server(queue)\n if not rm:\n # In the unlikely event we're wrong about it being an Athena\n # print queue, the local cupsd is good enough\n return SYSTEM_CUPS, None, queue\n\n # Give up and return rm and queue. If it's not running a cupsd,\n # too bad. It's not our job to check whether cupsd is running.\n return SYSTEM_CUPS, rm, queue", "def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None", "def get(self):\n policy_number = reqparse.request.args.get('policy_number')\n category = reqparse.request.args.get('category')\n\n dao = ClaimDao()\n return dao.get(policy_number=policy_number, category=category)", "def get_project(self, project_id):\n res = self.conn.cursor().execute(\"SELECT * FROM projects where id=?\", (project_id,))\n return res.fetchone()", "def get_controller1(self):\n return self.__controller1", "def get_one(self, controller_fs_uuid):\n if self._from_isystems:\n raise exception.OperationNotPermitted\n\n rpc_controller_fs = \\\n objects.controller_fs.get_by_uuid(pecan.request.context,\n controller_fs_uuid)\n return ControllerFs.convert_with_links(rpc_controller_fs)", "def deregister(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def get_project_name(self, project_id):\n test = \"\"\"SELECT EXISTS(\n SELECT 1\n FROM barcodes.project\n WHERE project_id=%s\n )\"\"\"\n query = \"\"\"SELECT project\n FROM barcodes.project\n WHERE project_id=%s\"\"\"\n\n with self._transaction.cursor() as cur:\n cur.execute(test, [project_id, ])\n if not cur.fetchone()[0]:\n raise NotFound(f\"Project f'{project_id}' not found\")\n else:\n cur.execute(query, [project_id, ])\n return cur.fetchone()[0]", "def check_claim(item, prop, target):\n item_dict = item.get()\n try:\n claims = item_dict['claims'][prop]\n except KeyError:\n return None\n\n for claim in claims:\n if claim.target_equals(target):\n return claim\n return None", "def get_controller_doc(self, controller_name: str) -> ControllerDoc:\n if controller_name not in self.controller_docs:\n raise KeyError(f\"Controller {controller_name} not found\")\n\n return self.controller_docs[controller_name]", "def queue_path(self, project, location, queue):\n # This is value is not actually used, but it might be good for debugging.\n return \"projects/{project}/locations/{location}/queues/{queue}\".format(\n project=project, location=location, queue=queue)", "def __project(uri):\n uri = uri.lower().split('/')[-1].split('_')[0]\n project = {\n 'as': \"ICOS\",\n 'es': \"ICOS\",\n 'os': \"ICOS\",\n 'neon': 'NEON',\n 'ingos': 'INGOS',\n 'fluxnet': 'FLUXNET'\n }\n\n if uri in project:\n return project.get(uri)\n else:\n return 'other'", "def claim_id(self) -> str:\n return self._claim_id", "def claim_id(self) -> str:\n return self._claim_id", "def getController(self,deviceID):\n if deviceID in self.controllers:\n return self.controllers[deviceID]\n else:\n newCtrl = MotorController(self,deviceID)\n self.controllers[deviceID] = newCtrl\n return newCtrl", "def get_registered_controller(self, model):\n return self._registry[model]", "def find_by_project_id(cls, project_id, is_prod: bool) -> OIDCConfig:\n return cls.query.filter(OIDCConfig.project_id == project_id and\n OIDCConfig.is_prod == is_prod).first()", "def ctrlqueue_show(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(7), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to get *Num Actions of CrlQueue* check if *Queue* is NOT empty\", e)", "def get_scoped_project(self, project_auth_scope):\n\n filter_params = {}\n url = \"{0}/{1}/{2}\".format(self.keystone_server_url, DEFAULT_KEYSTONE_API_VERSION, \"projects\")\n if project_auth_scope.tenant_id:\n if project_auth_scope.project_name:\n return {\"id\": project_auth_scope.tenant_id, \"name\": project_auth_scope.project_name}\n\n url = \"{}/{}\".format(url, project_auth_scope.tenant_id)\n else:\n filter_params = {\"name\": project_auth_scope.project_name, \"domain_id\": project_auth_scope.domain_id}\n\n headers = {'X-Auth-Token': project_auth_scope.auth_token}\n\n try:\n project_details = self._make_request_with_auth_fallback(url, headers, params=filter_params)\n if filter_params:\n assert len(project_details[\"projects\"]) == 1, \"Non-unique project credentials\"\n\n # Set the tenant_id so we won't have to fetch it next time\n project_auth_scope.tenant_id = project_details[\"projects\"][0].get(\"id\")\n return project_details[\"projects\"][0]\n else:\n project_auth_scope.project_name = project_details[\"project\"][\"name\"]\n return project_details[\"project\"]\n\n except Exception as e:\n self.warning('Unable to get the project details: %s', e)\n raise e\n\n return None", "def _dispatch(req):\n match = req.environ['wsgiorg.routing_args'][1]\n if not match:\n msg = ('(%(url)s): The resource could not be found.' %\n {'url': req.url})\n return render_exception(exception.NotFound(msg))\n app = match['controller']\n return app", "def get_controller(self):\n node_id, _host, _port, _rack = self.client.cluster.controller\n return node_id", "def _get_issue_tracker_project_name(testcase=None):\n from clusterfuzz._internal.datastore import data_handler\n job_type = testcase.job_type if testcase else None\n return data_handler.get_issue_tracker_name(job_type)", "def ctrlqueue_action_code(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(4), ctypes.c_int32(0))", "def find(self, task_id):\n for task_obj in self._queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in dorm: '{}'\".format(task_id))", "def get_key(cls, obj, query):\n\n if hasattr(obj, 'config'):\n for item in obj.config.hardware.device:\n if query in item.deviceInfo.label:\n key = item.key\n controller_key = item.controllerKey\n\n return (key, controller_key)", "def get(self, uuid: str) -> Preprocessor:\n return self.preprocessors[uuid]", "def getProducer():\r\n\r\n # get the config and a producer\r\n config = ecommerce.config.getConfig()\r\n return ecommerce.queue.queue(config, queuePrefix)", "def get_project(self, project_name):\n raise self._get_notimplementederror(\"get_project\")", "def policy_controller(self) -> Optional['outputs.FeatureMembershipConfigmanagementPolicyController']:\n return pulumi.get(self, \"policy_controller\")", "def get_by_project_id(project_id: int):\n return db.session.get(CustomEditor, project_id)", "def get(self, name):\n try:\n return self.projects[name]\n except KeyError:\n print(\"No project called %s was found\" %name)", "def get_player_controller() -> unrealsdk.UObject:\n return unrealsdk.GetEngine().GamePlayers[0].Actor", "def _controller(self):\n # TODO: Probably better to use request_patron and check for\n # None here.\n patron = self.authenticated_patron_from_request()\n storage = CirculationPatronProfileStorage(patron, flask.url_for)\n return CoreProfileController(storage)", "def first_claim(self, key: str, default=None):\n if key not in self._itempage.claims:\n return default\n if not self._itempage.claims[key]:\n return default\n return self._itempage.claims[key][0].getTarget()", "def get(queue_name: str, **kwargs) -> Queue:\n return Queue(queue_name, **kwargs)", "def check_ownership(project_pk, user):\n\ttry:\n\t\tproject_pk = int(project_pk)\n\t\tproject = Project.objects.get(pk=project_pk)\n\t\tif project.owner == user:\n\t\t\treturn project\n\t\telse:\n\t\t\treturn None # if not the owner\n\texcept:\n\t\treturn None # if exception when parsing the primary key (or non-existant pk requested)", "def get_id(self, name, tenant=None):\n queue = self._get(name, tenant, fields=[\"_id\"])\n return queue.get(\"_id\")", "def get(self, instance, database):\n key = instance + '/' + database\n\n if not key in self.queues:\n queue = Queue(self.poolsize)\n self.queues[key] = queue\n\n queue = self.queues[key]\n\n if queue.empty():\n cnxn = cnxn_ctor(instance, database)\n else:\n cnxn = queue.get()\n # Make sure the connection is still good.\n cnxn.ping()\n cnxn.commit()\n\n return cnxn", "def controller(self): # type: () -> ControllerHostConfig\n return self.host_settings.controller", "def get_project(self, project_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"projects\", \"project_id\", project_id)", "def retrieve(self, request, pk=None):\n try:\n project = Project.objects.get(pk=pk)\n serializer = ProjectSerializer(project, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def get_permission_object(self):\n if not self.project:\n self.project = get_object_or_404(Proyecto, pk=self.kwargs['project_pk'])\n return self.project", "def ctrlqueue_action(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(3), ctypes.c_int32(0))", "def get_project(self):\n if self.api_version == 2:\n return self.creds.get('tenant_id') or self.creds.get('tenant_name')\n else:\n return self.creds.get('project_id') or self.creds.get('project_name')", "def get_project(self, project_id):\n endpoint = '/projects/{}'.format(project_id)\n return self._api_call('get', endpoint)", "def get_controller2(self):\n return self.__controller2", "def project_by_name(self,project_name=''):\n logger.debug(f'project_by_name project_name={project_name}')\n return self.get('{}/groups/byName/{}'.format(ApiVersion.CM1.value,project_name))", "def getClientProject(self, clientName, projectName):\n for client in self.getClients():\n if client['name'] == clientName:\n cid = client['id']\n\n if not cid:\n print('Could not find such client name')\n return None\n\n for projct in self.getClientProjects(cid):\n if projct['name'] == projectName:\n pid = projct['id']\n\n if not pid:\n print('Could not find such project name')\n return None\n\n return self.getProject(pid)", "def _project(request, key):\n context = request.context\n if not context.project_id:\n raise exceptions.QuotaMissingTenant()\n return {key: {key + '_id': context.project_id}}", "def _get_project_name(self, context, project_id):\n return project_id", "def show_controller(cls, args, config):\n if len(args) == 0:\n raise MOLNSException(\"USAGE: molns controller show name\")\n return {'msg': str(config.get_object(name=args[0], kind='Controller'))}", "def get_project(self, name=None):\n if not name:\n name = self.get_project_name()\n projects = self.get_projects()\n for p in projects:\n if p.name == name:\n return p\n raise NotFound(name)" ]
[ "0.7787301", "0.7567917", "0.7170647", "0.62005764", "0.6165711", "0.5723963", "0.5608801", "0.5049111", "0.5029624", "0.48735002", "0.48166627", "0.4790605", "0.47787577", "0.47474897", "0.47414377", "0.4711115", "0.47098687", "0.47030538", "0.4683094", "0.46805832", "0.4673311", "0.46701303", "0.4637543", "0.46345162", "0.46345162", "0.46257275", "0.46004865", "0.45940182", "0.45808622", "0.45764256", "0.45655993", "0.45621553", "0.45533758", "0.45473143", "0.4532036", "0.4530288", "0.45272446", "0.4525887", "0.45240653", "0.4522134", "0.4518954", "0.4516501", "0.45101064", "0.44961208", "0.4488267", "0.44738042", "0.44588488", "0.44580695", "0.4453716", "0.44376427", "0.4430676", "0.44245502", "0.44153157", "0.44128543", "0.44113433", "0.44048584", "0.43888408", "0.43729013", "0.43725115", "0.4371344", "0.43680888", "0.43680888", "0.43675035", "0.43536916", "0.43535945", "0.43514252", "0.43347096", "0.43346015", "0.43232083", "0.4317866", "0.43127036", "0.43077326", "0.43013197", "0.4294839", "0.42846358", "0.4282534", "0.42725572", "0.4270334", "0.4266102", "0.4259464", "0.42534906", "0.4248319", "0.42398757", "0.4234702", "0.4234375", "0.42314923", "0.42253807", "0.4216996", "0.421508", "0.42135045", "0.42095834", "0.42084587", "0.42070785", "0.42067108", "0.4205867", "0.4205712", "0.42053884", "0.42039827", "0.42012373", "0.4198121" ]
0.8776693
0
Lookup the subscription controller for the given queue and project.
def get_subscription_controller(self, queue, project=None): target = self.lookup(queue, project) return target and target.subscription_controller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queue_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.queue_controller", "def get_claim_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.claim_controller", "def get_message_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.message_controller", "def lookup(self, queue, project=None):\n\n try:\n shard_id = self._shard_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n # NOTE(kgriffs): Return `None`, rather than letting the\n # exception bubble up, so that the higher layer doesn't\n # have to duplicate the try..except..log code all over\n # the place.\n return None\n\n return self.get_driver(shard_id)", "def get_topic_controller(self, topic, project=None):\n target = self.lookup(topic, project)\n return target and target.topic_controller", "def lookup(self, queue, project=None):\n\n try:\n pool_id = self._pool_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n return self.get_default_pool(use_listing=False)\n\n return self.get_driver(pool_id)", "def lookup(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Raise an exception if the queue\n # does not have a mapping (it does not exist).\n\n # TODO(kgriffs): SHARDING - Get ID from the catalog backend\n shard_id = '[insert_id]'\n try:\n shard = self._shards[shard_id]\n except KeyError:\n self._shards[shard_id] = shard = self._init_shard(shard_id)\n\n return shard", "def deregister(self, queue, project=None):\n self._catalogue_ctrl.delete(project, queue)", "def register(self, queue, project=None):\n # NOTE(cpp-cabrera): only register a queue if the entry\n # doesn't exist\n if not self._catalogue_ctrl.exists(project, queue):\n # NOTE(cpp-cabrera): limit=0 implies unlimited - select from\n # all shards\n shard = select.weighted(self._shards_ctrl.list(limit=0))\n\n if not shard:\n raise errors.NoShardFound()\n\n self._catalogue_ctrl.insert(project, queue, shard['name'])", "def _ns_subscriptions(self, queue):\n return self._ns(queue, \"consumers\")", "def subscription_name_from_path(path, project):\n return _name_from_project_path(path, project, _SUBSCRIPTION_TEMPLATE)", "def get_subscription(\n connection, subscription_id, project_id, fields=None, error_msg=None\n):\n return connection.get(\n url=f'{connection.base_url}/api/subscriptions/{subscription_id}',\n params={'fields': fields},\n headers={'X-MSTR-ProjectID': project_id},\n )", "def deregister(self, queue, project=None):\n self._invalidate_cached_id(queue, project)\n self._catalogue_ctrl.delete(project, queue)", "def get_project(self, project):\n project_name = project\n\n try:\n # FIXME: project should be an integer or str, no both\n project_id = int(project)\n except ValueError:\n project_id = None\n\n try:\n # Find the first project occurrence\n project_found = next(p for p in self.get_projects() if p[\"id\"] == project_id\n or p[\"name\"] == project_name)\n # FIXME: use namedtuple instead? create a self.project = dict()?\n self.project_name = project_found[\"name\"]\n self.project_id = project_found[\"id\"]\n self.project_address = \"projects/%s/\" % self.project_id\n except StopIteration:\n logger.error(\"Project %s not found\" % project)\n raise KeyError", "def getQueueDetails(self, queue_name, project_id=\"\"):\n if project_id == \"\":\n project_id = self.project_id\n url = \"%sprojects/%s/queues/%s?oauth=%s\" % (self.url, project_id,\n queue_name, self.token)\n body = self.__get(url)\n queue = json.loads(body)\n return queue", "def _pool_id(self, queue, project=None):\n return self._catalogue_ctrl.get(project, queue)['pool']", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def register(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def get_queue(self, task_name):\n for name, queue in self.queues.items():\n if task_name in queue:\n return name\n return self.default_queue", "def queue_path(self, project, location, queue):\n # This is value is not actually used, but it might be good for debugging.\n return \"projects/{project}/locations/{location}/queues/{queue}\".format(\n project=project, location=location, queue=queue)", "def get_queue_num(self, qos_id, queue_id):\n\n q_num = None\n queues = self.qos_dict[qos_id][\"ovsdb:qos-entries\"][0][\"queue-list\"]\n\n # Go through all queues\n for queue in queues:\n cur_queue_id = queue[\"queue-ref\"].split(\"'\")[-2]\n # If we have a match, get the q_num and break\n if cur_queue_id == queue_id:\n q_num = queue[\"queue-number\"]\n break\n\n # queue_id is not found in the qos\n if q_num is None:\n #print(json.dumps(self.qos_dict[qos_id], indent=3))\n raise KeyError\n\n return q_num", "def subscribe(self, queue, consumer_id):\n\n # Add myself to the list of consumers, if not already present.\n self.redis.sadd(self._ns_subscriptions(queue), consumer_id)\n\n return Subscription(self, queue, consumer_id)", "def get_project(project_id):\n return Project.objects.get(id=project_id)", "def _get_controller(self):\n return self.__controller", "def get_namespace_plan(namespace):\n namespace_user = model.user.get_namespace_user(namespace)\n if namespace_user is None:\n return None\n\n if not namespace_user.stripe_id:\n return None\n\n # Ask Stripe for the subscribed plan.\n # TODO: Can we cache this or make it faster somehow?\n try:\n cus = billing.Customer.retrieve(namespace_user.stripe_id)\n except stripe.error.APIConnectionError:\n abort(503, message=\"Cannot contact Stripe\")\n\n if not cus.subscription:\n return None\n\n return get_plan(cus.subscription.plan.id)", "def magma_queue_get_device(queue):\n\n return _libmagma.magma_queue_get_device(queue)", "def get(queue_name: str, **kwargs) -> Queue:\n return Queue(queue_name, **kwargs)", "def getSubscription(uniq):\n return Subscription(Cuebot.getStub('subscription').Get(\n subscription_pb2.SubscriptionGetRequest(id=uniq), timeout=Cuebot.Timeout).subscription)", "def get(subject_name, user_email):\n return Subscription.get_by_key_name(subject_name + ':' + user_email)", "def get_project(con):\n try:\n return con.project_read(fq_name=conf.get('default_project', 'UNEXPECTED_VALUE'))\n except:\n log.debug('Unable to find project default-domain, admin:', exc_info=True)\n return None", "def find_queue(queue):\n athena_queue = canonicalize_queue(queue)\n # If a queue isn't an Athena queue, punt straight to the default\n # CUPS server\n if not athena_queue:\n return SYSTEM_CUPS, None, queue\n queue = athena_queue\n\n # Get rid of any instance on the queue name\n # TODO The purpose of instances is to have different sets of default\n # options. Queues may also have default options on the null\n # instance. Figure out if we need to do anything about them\n queue = queue.split('/')[0]\n\n # If we're still here, the queue is definitely an Athena print\n # queue; it was either in the local cupsd pointing to Athena, or the\n # local cupsd didn't know about it.\n # Figure out what Athena thinks the backend server is, and whether\n # that server is running a cupsd; if not, fall back to LPRng\n\n rm = get_hesiod_print_server(queue)\n if not rm:\n # In the unlikely event we're wrong about it being an Athena\n # print queue, the local cupsd is good enough\n return SYSTEM_CUPS, None, queue\n\n # Give up and return rm and queue. If it's not running a cupsd,\n # too bad. It's not our job to check whether cupsd is running.\n return SYSTEM_CUPS, rm, queue", "def find(self, task_id):\n for task_obj in self.queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in queue: '{}'\".format(task_id))", "def show_qos_queue(self, queue, **_params):\r\n return self.get(self.qos_queue_path % (queue),\r\n params=_params)", "def deregister(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def get_project(arn=None):\n pass", "def register(self, queue, project=None, flavor=None):\n\n # NOTE(gengchc): if exist, get queue's pool.flavor:\n # if queue's pool.flavor is different, first delete it and add it.\n # Otherwise, if the flavor in the meteredata of the queue is\n # modified, the catalog will be inconsistent.\n if self._catalogue_ctrl.exists(project, queue):\n catalogue = self._catalogue_ctrl.get(project, queue)\n oldpoolids = catalogue['pool']\n oldpool = self._pools_ctrl.get(oldpoolids)\n oldflavor = oldpool['flavor']\n msgtmpl = _(u'register queue to pool: old flavor: %(oldflavor)s '\n ', new flavor: %(flavor)s')\n LOG.info(msgtmpl,\n {'oldflavor': oldflavor, 'flavor': flavor})\n if oldpool['flavor'] != flavor:\n self._catalogue_ctrl.delete(project, queue)\n\n if not self._catalogue_ctrl.exists(project, queue):\n if flavor is not None:\n flavor = self._flavor_ctrl.get(flavor, project=project)\n pools = self._pools_ctrl.get_pools_by_flavor(\n flavor=flavor,\n detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n msgtmpl = _(u'register queue to pool: new flavor:%(flavor)s')\n LOG.info(msgtmpl,\n {'flavor': flavor.get('name', None)})\n else:\n # NOTE(flaper87): Get pools assigned to the default\n # group `None`. We should consider adding a `default_group`\n # option in the future.\n pools = self._pools_ctrl.get_pools_by_flavor(detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n\n if not pool:\n # NOTE(flaper87): We used to raise NoPoolFound in this\n # case but we've decided to support automatic pool\n # creation. Note that we're now returning and the queue\n # is not being registered in the catalogue. This is done\n # on purpose since no pool exists and the \"dummy\" pool\n # doesn't exist in the storage\n if self.lookup(queue, project) is not None:\n return\n raise errors.NoPoolFound()\n msgtmpl = _(u'register queue to pool: new flavor: None')\n LOG.info(msgtmpl)\n\n msgtmpl = _(u'register queue: project:%(project)s'\n ' queue:%(queue)s pool:%(pool)s')\n LOG.info(msgtmpl,\n {'project': project,\n 'queue': queue,\n 'pool': pool})\n self._catalogue_ctrl.insert(project, queue, pool)", "def _ns_queue(self, queue, consumer_id):\n return self._ns(queue, consumer_id, \"messages\")", "def get_current_project():\n return get_from_session(KEY_PROJECT)", "def findSubscription(name):\n return Subscription(Cuebot.getStub('subscription').Find(\n subscription_pb2.SubscriptionFindRequest(name=name), timeout=Cuebot.Timeout).subscription)", "def _shard_id(self, queue, project=None):\n cache_key = _shard_cache_key(queue, project)\n shard_id = self._cache.get(cache_key)\n\n if shard_id is None:\n shard_id = self._catalogue_ctrl.get(project, queue)['shard']\n\n if not self._cache.set(cache_key, shard_id, _SHARD_CACHE_TTL):\n LOG.warn('Failed to cache shard ID')\n\n return shard_id", "def get_project(self, project_name):\n raise self._get_notimplementederror(\"get_project\")", "def ctrlqueue_device_handle(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(5), ctypes.c_int32(0))", "def get(self, instance, database):\n key = instance + '/' + database\n\n if not key in self.queues:\n queue = Queue(self.poolsize)\n self.queues[key] = queue\n\n queue = self.queues[key]\n\n if queue.empty():\n cnxn = cnxn_ctor(instance, database)\n else:\n cnxn = queue.get()\n # Make sure the connection is still good.\n cnxn.ping()\n cnxn.commit()\n\n return cnxn", "def get_project_specific(self, project_format='id'):\n if self.api_version == 2:\n return self.creds.get('tenant_%s' % project_format)\n else:\n return self.creds.get('project_%s' % project_format)", "def get_project(self, project_id):\n res = self.conn.cursor().execute(\"SELECT * FROM projects where id=?\", (project_id,))\n return res.fetchone()", "def get_controller(cls):\n if not cls.hnd:\n raise Exception('A handler is to be set for getting contoller.')\n if not cls.controller:\n cls.controller = cls.config.controller_class(cls.hnd)\n cls.session = cls.controller.session\n return cls.controller", "def get_project(self):\n project_id = self.kwargs['project_id']\n try:\n project = Project.objects.get(pk=project_id)\n except ObjectDoesNotExist:\n raise ObjectNotFound('Not found')\n contributors = CustomUser.objects.filter(contributor__project=project.pk)\n if self.request.user not in contributors:\n raise ObjectNotFound('Not found')\n return project", "def get_project(self):\n project_id = self.kwargs['project_id']\n try:\n project = Project.objects.get(pk=project_id)\n except ObjectDoesNotExist:\n raise ObjectNotFound('Not found')\n contributors = CustomUser.objects.filter(contributor__project=project.pk)\n if self.request.user not in contributors:\n raise ObjectNotFound('Not found')\n return project", "def get_project(self):\n raise NotImplementedError(\"get_project is not implemented\")", "async def get_queue(self, ctx: commands.Context) -> Optional[QueueManager]:\n\n return self.queue[ctx.guild.id]", "def find_by_project_id(cls, project_id, is_prod: bool) -> OIDCConfig:\n return cls.query.filter(OIDCConfig.project_id == project_id and\n OIDCConfig.is_prod == is_prod).first()", "def get_project(self):\n project_id = self.kwargs['project_id']\n try:\n project = Project.objects.get(pk=project_id)\n except ObjectDoesNotExist:\n raise ObjectNotFound('Not found')\n self.contributors = CustomUser.objects.filter(contributor__project=project.pk)\n if self.request.user not in self.contributors:\n raise ObjectNotFound('Not found')\n return project", "def get_one(cls, sid):\n return Subscription.query.get_or_404(sid)", "def get_project(self, project_id):\n endpoint = '/projects/{}'.format(project_id)\n return self._api_call('get', endpoint)", "def request_project_by_key(cfg, project_key):\n\n url = cjm.request.make_cj_url(cfg, \"project\", project_key)\n response = cjm.request.make_cj_request(cfg, url)\n return response.json()", "def get(self, orgname):\n cus = None\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n private_repos = model.user.get_private_repo_count(orgname)\n organization = model.organization.get_organization(orgname)\n if organization.stripe_id:\n try:\n cus = billing.Customer.retrieve(organization.stripe_id)\n except stripe.error.APIConnectionError as e:\n abort(503, message=\"Cannot contact Stripe\")\n\n if cus.subscription:\n return subscription_view(cus.subscription, private_repos)\n\n return {\n \"hasSubscription\": False,\n \"isExistingCustomer\": cus is not None,\n \"plan\": \"free\",\n \"usedPrivateRepos\": private_repos,\n }\n\n raise Unauthorized()", "def get_project(project):\n command = 'openstack project show %s' % project\n try:\n project_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n except:\n print \"Project '%s' not found.\" % project\n sys.exit(-1)\n return project_info", "def get(self):\n cus = None\n user = get_authenticated_user()\n private_repos = model.user.get_private_repo_count(user.username)\n\n if user.stripe_id:\n try:\n cus = billing.Customer.retrieve(user.stripe_id)\n except stripe.error.APIConnectionError as e:\n abort(503, message=\"Cannot contact Stripe\")\n\n if cus.subscription:\n return subscription_view(cus.subscription, private_repos)\n\n return {\n \"hasSubscription\": False,\n \"isExistingCustomer\": cus is not None,\n \"plan\": \"free\",\n \"usedPrivateRepos\": private_repos,\n }", "def ctrlqueue_show(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(7), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to get *Num Actions of CrlQueue* check if *Queue* is NOT empty\", e)", "def get_project(projectname):\n return jsonify(admin.get_project_info(current_app.scoped_session(), projectname))", "def get_customer_id_from_project(context):\n project_id = context.current_parameters.get('project_id')\n project = Project.get(project_id)\n return project.customer_id", "def get(self, queue=None, style=None, prettyPrint=None, countryCode=None):\r\n params = base.get_params(None, locals())\r\n params.update(self.lookup)\r\n\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def get_controller(cls, args, config):\n logging.debug(\"MOLNSController.get_controller(args={0})\".format(args))\n controller_obj = cls._get_controllerobj(args, config)\n if controller_obj is None:\n return\n\n if controller_obj.provider.type == constants.Constants.DockerProvider:\n raise NotImplementedError(\"DockerController does not support this feature yet.\")\n\n # Check if any instances are assigned to this controller\n instance_list = config.get_controller_instances(controller_id=controller_obj.id)\n\n # Check if they are running\n ip = None\n if len(instance_list) > 0:\n for i in instance_list:\n status = controller_obj.get_instance_status(i)\n logging.debug(\"instance={0} has status={1}\".format(i, status))\n if status == controller_obj.STATUS_RUNNING:\n ip = i.ip_address\n if ip is None:\n print \"No active instance for this controller\"\n return\n cmd = ['/usr/bin/scp','-oStrictHostKeyChecking=no','-oUserKnownHostsFile=/dev/null','-i',\n controller_obj.provider.sshkeyfilename(), 'ubuntu@{0}:{1}'.format(ip, args[1]), '.']\n print \" \".join(cmd)\n subprocess.call(cmd)\n print \"SSH process completed\"", "async def _get_work_pool_queue_id_from_name(\n self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str\n ) -> UUID:\n work_pool_queue = await models.workers.read_work_pool_queue_by_name(\n session=session,\n work_pool_name=work_pool_name,\n work_pool_queue_name=work_pool_queue_name,\n )\n if not work_pool_queue:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Worker queue '{work_pool_name}/{work_pool_queue_name}' not found.\",\n )\n\n return work_pool_queue.id", "def get_doc(doc_id):\n queue = get_doc_queue(app.config)\n data = queue.get_by_id(doc_id)\n if data:\n return jsonify(doc=data)\n return jsonify(err=f\"{doc_id} not found\"), 404", "def __init__(self,\n project_id='issue-label-bot-dev',\n topic_name='event_queue',\n subscription_name='subscription_for_event_queue',\n embedding_api_endpoint='https://embeddings.gh-issue-labeler.com/text'):\n # TODO(chunhsiang): change the embedding microservice to be an internal DNS of k8s service.\n # see: https://v1-12.docs.kubernetes.io/docs/concepts/services-networking/dns-pod-service/#services\n self.project_id = project_id\n self.topic_name = topic_name\n self.subscription_name = subscription_name\n self.embedding_api_endpoint = embedding_api_endpoint\n self.embedding_api_key = os.environ['GH_ISSUE_API_KEY']\n self.app_url = os.environ['APP_URL']\n\n # init GitHub app\n github_init()\n # init pubsub subscription\n self.create_subscription_if_not_exists()", "def get_subscription(self):\n if not hasattr(self, '_subscription'):\n self._subscription = self.admin.subscriptions.select_related('plan').get_overlapping(\n self.admin_id, DateRange(self.period, self.period_end, bounds='[]'))\n return self._subscription", "def get_project(db, id):\n \n for element in db:\n if element['project_no'] == id:\n return element\n return None", "def _get_target_client(self, initiator_iqn):\n config = self._get_config()\n target_config = config['targets'][self.target_iqn]\n if initiator_iqn in target_config['clients']:\n return target_config['clients'][initiator_iqn]", "def get_project_name(self, project_id):\n test = \"\"\"SELECT EXISTS(\n SELECT 1\n FROM barcodes.project\n WHERE project_id=%s\n )\"\"\"\n query = \"\"\"SELECT project\n FROM barcodes.project\n WHERE project_id=%s\"\"\"\n\n with self._transaction.cursor() as cur:\n cur.execute(test, [project_id, ])\n if not cur.fetchone()[0]:\n raise NotFound(f\"Project f'{project_id}' not found\")\n else:\n cur.execute(query, [project_id, ])\n return cur.fetchone()[0]", "def pull(self, subscription, project):\n response, content = self._http.request(\n '%s/%s/subscriptions/%s:pull' % (\n PUBSUB_BASE_URL, project, subscription),\n body=json.dumps({'maxMessages': 1, 'returnImmediately': False}),\n method='POST',\n )\n if response['status'] == '404':\n raise NotFoundError(response, json.loads(content))\n return json.loads(content)", "def searchClientProject(self, name):\n for client in self.getClients():\n try:\n for project in self.getClientProjects(client['id']):\n if project['name'] == name:\n return project\n except Exception:\n continue\n\n print('Could not find client by the name')\n return None", "def getController(self):\n return self.__controller", "def find(self, task_id):\n for task_obj in self._queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in dorm: '{}'\".format(task_id))", "def getQueue(serverName: str, queueType: str):\n if queueType is \"k\":\n queue = kitchenQueue\n elif queueType is \"b\":\n queue = bathroomQueue\n else:\n raise Exception(\"Incorrect parameters\")\n\n if serverName in queue.keys():\n return queue.get(serverName)\n else:\n queue[serverName] = []\n return queue.get(serverName)", "def get_id(self, name, tenant=None):\n queue = self._get(name, tenant, fields=[\"_id\"])\n return queue.get(\"_id\")", "def get_scoped_project(self, project_auth_scope):\n\n filter_params = {}\n url = \"{0}/{1}/{2}\".format(self.keystone_server_url, DEFAULT_KEYSTONE_API_VERSION, \"projects\")\n if project_auth_scope.tenant_id:\n if project_auth_scope.project_name:\n return {\"id\": project_auth_scope.tenant_id, \"name\": project_auth_scope.project_name}\n\n url = \"{}/{}\".format(url, project_auth_scope.tenant_id)\n else:\n filter_params = {\"name\": project_auth_scope.project_name, \"domain_id\": project_auth_scope.domain_id}\n\n headers = {'X-Auth-Token': project_auth_scope.auth_token}\n\n try:\n project_details = self._make_request_with_auth_fallback(url, headers, params=filter_params)\n if filter_params:\n assert len(project_details[\"projects\"]) == 1, \"Non-unique project credentials\"\n\n # Set the tenant_id so we won't have to fetch it next time\n project_auth_scope.tenant_id = project_details[\"projects\"][0].get(\"id\")\n return project_details[\"projects\"][0]\n else:\n project_auth_scope.project_name = project_details[\"project\"][\"name\"]\n return project_details[\"project\"]\n\n except Exception as e:\n self.warning('Unable to get the project details: %s', e)\n raise e\n\n return None", "def from_queue_key(cls, queue_key, connection=None):\n prefix = cls.redis_queue_namespace_prefix\n if not queue_key.startswith(prefix):\n raise ValueError('Not a valid RQ queue key: %s' % (queue_key,))\n name = queue_key[len(prefix):]\n return cls(name)", "def getProducer():\r\n\r\n # get the config and a producer\r\n config = ecommerce.config.getConfig()\r\n return ecommerce.queue.queue(config, queuePrefix)", "def get_project(self, name=None):\n if not name:\n name = self.get_project_name()\n projects = self.get_projects()\n for p in projects:\n if p.name == name:\n return p\n raise NotFound(name)", "def search_queue_number(self, Q_strip):\n if Q_strip is self.PF_Q_strip:\n out = self.dut.send_expect(\"cat config/common_base\", \"]# \", 10)\n pattern = \"(%s=)(\\d*)\" % Q_strip\n else :\n out = self.dut.send_expect(\"cat drivers/net/i40e/i40e_ethdev.c\", \"]# \", 10)\n pattern = \"#define %s\\s*(\\d*)\" % Q_strip\n s = re.compile(pattern)\n res = s.search(out)\n if res is None:\n print utils.RED('Search no queue number.')\n return None\n else:\n if Q_strip is self.VF_Q_strip:\n queue = res.group(1)\n else :\n queue = res.group(2)\n return int(queue)", "def sqs_lookup_url(session, queue_name):\n client = session.client('sqs')\n resp = client.get_queue_url(QueueName=queue_name)\n return resp['QueueUrl']", "def support_queue(self, queue_id):\r\n return support_queues.SupportQueue(self, queue_id)", "def clearQueue(self, queue_name, project_id=None):\n if project_id is None:\n project_id = self.project_id\n\n url = \"%sprojects/%s/queues/%s/clear?oauth=%s\" % (self.url, project_id, queue_name, self.token)\n body = self.__post(url)\n return json.loads(body)", "def subscribe_sqs_queue(self, topic, queue):\r\n t = queue.id.split('/')\r\n q_arn = 'arn:aws:sqs:%s:%s:%s' % (queue.connection.region.name,\r\n t[1], t[2])\r\n resp = self.subscribe(topic, 'sqs', q_arn)\r\n policy = queue.get_attributes('Policy')\r\n if 'Version' not in policy:\r\n policy['Version'] = '2008-10-17'\r\n if 'Statement' not in policy:\r\n policy['Statement'] = []\r\n statement = {'Action' : 'SQS:SendMessage',\r\n 'Effect' : 'Allow',\r\n 'Principal' : {'AWS' : '*'},\r\n 'Resource' : q_arn,\r\n 'Sid' : str(uuid.uuid4()),\r\n 'Condition' : {'StringLike' : {'aws:SourceArn' : topic}}}\r\n policy['Statement'].append(statement)\r\n queue.set_attribute('Policy', json.dumps(policy))\r\n return resp", "def get_controller(equipment, accessmethod, logfile=None):\n path = _CONTROLLERMAP[accessmethod]\n constructor = module.get_object(path)\n return constructor(equipment, logfile)", "def record_get_for_project(project_id, deleted=False, session=None):\n session = session or get_session()\n result = session.query(models.ProjectAccountRecord).\\\n filter_by(project_id=project_id).\\\n filter_by(deleted=deleted).\\\n first()\n\n if not result:\n raise exception.ProjectRecordNotFound()\n\n return result", "def retrieve(self, request, pk=None):\n try:\n project = Project.objects.get(pk=pk)\n serializer = ProjectSerializer(project, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def test_get_subscription(self):\n pass", "def get_pid(self, pid):\n for p in self._queue:\n if p.id == pid:\n return p\n else: return 0", "def get(cls, subdomain, record_id):\n return cls.get_by_key_name(subdomain + ':' + str(record_id))", "def getClientProject(self, clientName, projectName):\n for client in self.getClients():\n if client['name'] == clientName:\n cid = client['id']\n\n if not cid:\n print('Could not find such client name')\n return None\n\n for projct in self.getClientProjects(cid):\n if projct['name'] == projectName:\n pid = projct['id']\n\n if not pid:\n print('Could not find such project name')\n return None\n\n return self.getProject(pid)", "def get(self, name):\n try:\n return self.projects[name]\n except KeyError:\n print(\"No project called %s was found\" %name)", "def get_controller_doc(self, controller_name: str) -> ControllerDoc:\n if controller_name not in self.controller_docs:\n raise KeyError(f\"Controller {controller_name} not found\")\n\n return self.controller_docs[controller_name]", "def setup_pubsub(project):\n config = local_config.Config('pubsub.queues')\n client = pubsub.PubSubClient()\n\n queues = config.get('resources')\n\n for queue in queues:\n create_pubsub_topic(client, project, queue['name'])\n create_pubsub_subscription(client, project, queue['name'], queue['name'])", "def get_project(self, name=None):\n if not name:\n if not self.select_project:\n log.error(\"no default project name specified\")\n return\n name = self.select_project\n\n if name in self.projects:\n return self.projects[name]\n\n log.debug( \"project {} not found in {} projects \".format(name, len(self.projects)) )\n return None", "async def get_subscription(\r\n self, installed_app_id: str, subscription_id: str\r\n ) -> dict:\r\n return await self.get(\r\n API_SUBSCRIPTION.format(\r\n installed_app_id=installed_app_id, subscription_id=subscription_id\r\n )\r\n )", "def _get_subscription(self):\n response = requests.get(\n 'https://www.googleapis.com/youtube/v3/subscriptions',\n params={\n 'part': 'snippet',\n 'mine': 'true',\n 'forChannelId': self.channel_id\n },\n headers=self.headers\n )\n if response.status_code == 200:\n return response.json()\n return {}", "def get_issue(issue_number):\n backend_name = os.environ[\"ISSUE_BACKEND\"]\n backend_module = importlib.import_module(\n \"issuebranch.backends.{}\".format(backend_name)\n )\n\n return getattr(backend_module, \"Backend\")(issue_number)" ]
[ "0.76404476", "0.7447651", "0.69627404", "0.63039684", "0.62465614", "0.6126285", "0.60098344", "0.5344913", "0.52650946", "0.5261455", "0.5224008", "0.51507324", "0.51497537", "0.5039444", "0.5002221", "0.4991361", "0.49808443", "0.49703738", "0.4967121", "0.4910888", "0.48773262", "0.4868069", "0.48537984", "0.4815726", "0.477513", "0.47749656", "0.4774117", "0.47740763", "0.47739622", "0.4772419", "0.47548306", "0.4738839", "0.47186956", "0.47033867", "0.4700569", "0.4698959", "0.46972713", "0.46847624", "0.46538958", "0.46453416", "0.46362624", "0.46155372", "0.4615056", "0.4612712", "0.460169", "0.45966715", "0.45955315", "0.45910013", "0.45910013", "0.458533", "0.4580866", "0.45769414", "0.4575097", "0.4557505", "0.45493352", "0.45492956", "0.45348865", "0.45289645", "0.45209745", "0.4518364", "0.451836", "0.450826", "0.4507424", "0.45035905", "0.45005184", "0.44942716", "0.44851792", "0.44832316", "0.44788793", "0.44779786", "0.44766062", "0.44746995", "0.44640037", "0.44634625", "0.44547477", "0.44379354", "0.44352543", "0.44330317", "0.44292778", "0.4422066", "0.44182628", "0.4407485", "0.4406481", "0.44046786", "0.44020876", "0.43993792", "0.4396627", "0.43952975", "0.43923032", "0.43889555", "0.43812814", "0.43784544", "0.43779463", "0.43753666", "0.43738702", "0.43674254", "0.4361711", "0.43569115", "0.43565845", "0.43517092" ]
0.8589521
0
Lookup the topic controller for the given queue and project.
def get_topic_controller(self, topic, project=None): target = self.lookup(topic, project) return target and target.topic_controller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queue_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.queue_controller", "def get_subscription_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.subscription_controller", "def get_message_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.message_controller", "def get_claim_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.claim_controller", "def lookup(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Raise an exception if the queue\n # does not have a mapping (it does not exist).\n\n # TODO(kgriffs): SHARDING - Get ID from the catalog backend\n shard_id = '[insert_id]'\n try:\n shard = self._shards[shard_id]\n except KeyError:\n self._shards[shard_id] = shard = self._init_shard(shard_id)\n\n return shard", "def lookup(self, queue, project=None):\n\n try:\n shard_id = self._shard_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n # NOTE(kgriffs): Return `None`, rather than letting the\n # exception bubble up, so that the higher layer doesn't\n # have to duplicate the try..except..log code all over\n # the place.\n return None\n\n return self.get_driver(shard_id)", "def lookup(self, queue, project=None):\n\n try:\n pool_id = self._pool_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n return self.get_default_pool(use_listing=False)\n\n return self.get_driver(pool_id)", "def GetTopic(self, topic_id):\n return self._TopicSearchHelper(self.topics, topic_id)", "def get_topic(title):\n return Topic.get(Topic.title == title)", "def get_controller_id_for_topic(self, topic_name):\n request = MetadataRequest_v1(topics=[topic_name])\n response = self.send_request_and_get_response(request)\n return response.controller_id", "def topic_name_from_path(path, project):\n return _name_from_project_path(path, project, _TOPIC_TEMPLATE)", "def topic(self):\n return self.config.get('topic', f'{NAMESPACE}/{self.id}')", "def get_by_topic(cls, topic):\n\t\treturn cls.get_by_key_name(get_hash_key_name(topic))", "def _get_generic_topic(self):\n\n content_type = ContentType.objects.get_for_model(self.__class__)\n app_label = content_type.app_label\n\n return super(ProducerModel, self)._get_generic_topic(identifier=app_label)", "def get_topic(self):\n return self.topic", "def topic(self, topic_id):\r\n return topics.Topic(self, topic_id)", "def get(cls, topic_info):\n try: #to treat topic info as topic.id\n return Topic.query.get(int(topic_info))\n except Exception: #treat topic info as topic.name\n return Topic.query.filter_by(name=topic_info).first()", "def topic_index():\n topic = db.topic(request.args(0)) or redirect(URL('default', 'index'))\n return dict(topic=topic)", "def list_topics(project_id):\n project_path = f\"projects/{project_id}\"\n for topic in PUBLISHER_CLIENT.list_topics(request={\"project\": project_path}):\n print(topic)", "def sns_topic_lookup(session, topic_name):\n if session is None:\n return None\n\n client = session.client('sns')\n response = client.list_topics()\n topics_list = response['Topics']\n for topic in topics_list:\n arn_topic_name = topic[\"TopicArn\"].split(':').pop()\n if arn_topic_name == topic_name:\n return topic[\"TopicArn\"]\n return None", "def getTopicName(nd_proj):\n # does not line &\n return '-'.join(nd_proj.generateProjectInfo())", "def get_topic(self, label):\n\n for attr in self.parm_list:\n if attr.label == label:\n return attr.topic\n\n return \"\"", "def get_topic_of_question(question):\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n topic_table = dynamodb.Table(\"Topics\")\n\n topic_id = question.get(\"TopicId\")\n # query topic_id of the question\n try:\n response = topic_table.get_item(Key={\"TopicId\": topic_id})\n topic = response[\"Item\"]\n except:\n print(\"No topic found, returning None..\")\n return None\n return topic", "def topic(self, topic_id):\n return topics.Topic(self, topic_id)", "def topics(self, project: str) -> list:\n assert self.exists(project), f'Project {project} inesistente'\n\n cursor = self.projects(\n {\n 'url': project\n }\n )\n try:\n return cursor.next()['topics']\n except StopIteration:\n return []", "def get_project(self, project):\n project_name = project\n\n try:\n # FIXME: project should be an integer or str, no both\n project_id = int(project)\n except ValueError:\n project_id = None\n\n try:\n # Find the first project occurrence\n project_found = next(p for p in self.get_projects() if p[\"id\"] == project_id\n or p[\"name\"] == project_name)\n # FIXME: use namedtuple instead? create a self.project = dict()?\n self.project_name = project_found[\"name\"]\n self.project_id = project_found[\"id\"]\n self.project_address = \"projects/%s/\" % self.project_id\n except StopIteration:\n logger.error(\"Project %s not found\" % project)\n raise KeyError", "def find(self, task_id):\n for task_obj in self.queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in queue: '{}'\".format(task_id))", "def kafka_topic(self):\n from corehq.apps.change_feed.topics import get_topic_for_doc_type\n return get_topic_for_doc_type(self.document_class().to_json()['doc_type'])", "def get_topic(self,topic_path):\n twiki_cgi = \"{:s}/bin/view/{:s}\".format(self.url,topic_path)\n\n params = {'username': self.settings['auth']['username'],\n 'password': self.settings['auth']['password'],\n 'raw': 'text'}\n response = self.session.get(twiki_cgi, params=params)\n\n return response", "def topic(self, topic_id):\r\n return contents.Topic(self, topic_id)", "def register(self, queue, project=None):\n # NOTE(cpp-cabrera): only register a queue if the entry\n # doesn't exist\n if not self._catalogue_ctrl.exists(project, queue):\n # NOTE(cpp-cabrera): limit=0 implies unlimited - select from\n # all shards\n shard = select.weighted(self._shards_ctrl.list(limit=0))\n\n if not shard:\n raise errors.NoShardFound()\n\n self._catalogue_ctrl.insert(project, queue, shard['name'])", "def topic(self):\n return self._topic_name", "def get_queue_num(self, qos_id, queue_id):\n\n q_num = None\n queues = self.qos_dict[qos_id][\"ovsdb:qos-entries\"][0][\"queue-list\"]\n\n # Go through all queues\n for queue in queues:\n cur_queue_id = queue[\"queue-ref\"].split(\"'\")[-2]\n # If we have a match, get the q_num and break\n if cur_queue_id == queue_id:\n q_num = queue[\"queue-number\"]\n break\n\n # queue_id is not found in the qos\n if q_num is None:\n #print(json.dumps(self.qos_dict[qos_id], indent=3))\n raise KeyError\n\n return q_num", "def get_queue(self, task_name):\n for name, queue in self.queues.items():\n if task_name in queue:\n return name\n return self.default_queue", "def get_topic_reference(self, ref):\n kind, identity = ref\n if kind == consts.IN_FOCUS:\n return self.focus\n if kind == consts.VSLO:\n kind, identity = self._get_reference_by_variable((consts.VARIABLE, ref[1]))\n kind = consts.SLO\n elif kind == consts.VIID:\n kind, identity = self._get_reference_by_variable((consts.VARIABLE, ref[1]))\n kind = consts.IID\n if kind == consts.VARIABLE:\n kind, identity = self._get_reference_by_variable(ref)\n if kind in (consts.WILDCARD, consts.NAMED_WILDCARD):\n try:\n kind, identity = self._bindings.get((kind, identity))\n except TypeError:\n kind, identity = self._make_topic_identity((kind, identity))\n if kind in (consts.IID, consts.IRI, consts.SLO):\n return kind, identity\n raise mio.MIOException('Error: Unknown topic reference \"(%s, %s)\"' % (kind, identity))", "def describe_topic(self, index):\n assert(self.has_topics)\n assert(0 <= index < self.K)\n return self.topics[index]", "def get_parameter(self, topic):\n \n for attr in self.parm_list:\n if attr.topic == topic:\n return attr\n\n self.logging.error(\"Can't find topic: \"+topic)\n return None", "def get_controller(cls, args, config):\n logging.debug(\"MOLNSController.get_controller(args={0})\".format(args))\n controller_obj = cls._get_controllerobj(args, config)\n if controller_obj is None:\n return\n\n if controller_obj.provider.type == constants.Constants.DockerProvider:\n raise NotImplementedError(\"DockerController does not support this feature yet.\")\n\n # Check if any instances are assigned to this controller\n instance_list = config.get_controller_instances(controller_id=controller_obj.id)\n\n # Check if they are running\n ip = None\n if len(instance_list) > 0:\n for i in instance_list:\n status = controller_obj.get_instance_status(i)\n logging.debug(\"instance={0} has status={1}\".format(i, status))\n if status == controller_obj.STATUS_RUNNING:\n ip = i.ip_address\n if ip is None:\n print \"No active instance for this controller\"\n return\n cmd = ['/usr/bin/scp','-oStrictHostKeyChecking=no','-oUserKnownHostsFile=/dev/null','-i',\n controller_obj.provider.sshkeyfilename(), 'ubuntu@{0}:{1}'.format(ip, args[1]), '.']\n print \" \".join(cmd)\n subprocess.call(cmd)\n print \"SSH process completed\"", "def get_controller_doc(self, controller_name: str) -> ControllerDoc:\n if controller_name not in self.controller_docs:\n raise KeyError(f\"Controller {controller_name} not found\")\n\n return self.controller_docs[controller_name]", "def _ns_queue(self, queue, consumer_id):\n return self._ns(queue, consumer_id, \"messages\")", "def get_topic(self, id):\n TOPIC = \"\"\"SELECT COUNT(*) FROM Topic WHERE id = %s\"\"\"\n\n ret = None\n try:\n self.db_cursor.execute(\"\"\"SELECT name, id FROM Topic WHERE id = %s\"\"\", (id,))\n t = self.db_cursor.fetchall()\n ret = Topic()\n ret.name = t[0][0]\n ret.id = id\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve topic with id \" + str(id))\n return None\n\n return ret", "def __get_topic(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVER_TOPIC', 'app/book/#')", "def find(self, task_id):\n for task_obj in self._queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in dorm: '{}'\".format(task_id))", "def getQueueDetails(self, queue_name, project_id=\"\"):\n if project_id == \"\":\n project_id = self.project_id\n url = \"%sprojects/%s/queues/%s?oauth=%s\" % (self.url, project_id,\n queue_name, self.token)\n body = self.__get(url)\n queue = json.loads(body)\n return queue", "def magma_queue_get_device(queue):\n\n return _libmagma.magma_queue_get_device(queue)", "def _get_controller(self):\n return self.__controller", "def topic(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"topic\")", "def show_qos_queue(self, queue, **_params):\r\n return self.get(self.qos_queue_path % (queue),\r\n params=_params)", "def __get_topic(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVEN_TOPIC', 'app/event')", "def view_topic(request, topic_slug):\n view_topic = request.db[\"topic\"].find_one({\"url\": topic_slug.matchdict[\"url\"]})\n answers = request.db[\"answer\"].find({\"topic_index\": topic_slug.matchdict[\"url\"]})\n\n return render_to_response(\n \"templates/view_topic.html\",\n {\"view_topic\": view_topic, \"answers\": answers, \"count\": count(request),},\n request=request,\n )", "def _find_controller(self, controller):\n if controller is None:\n return None\n # If the output specified is a string controller e.g. \"WelcomeController@show\"\n elif isinstance(controller, str):\n if \"@\" in controller:\n controller_path, controller_method_str = controller.split(\"@\")\n else:\n controller_path = controller\n controller_method_str = \"__call__\"\n\n controller_path = modularize(controller_path).split(\".\")\n if len(controller_path) > 1:\n controller_name = controller_path.pop()\n prefix_path = \".\".join(controller_path)\n else:\n controller_name = controller_path[0]\n prefix_path = \"\"\n # build a list of all locations where the controller can be found\n # if the controller is defined such as auth.WelcomeController, append the prefix path to\n # the locations\n locations = list(\n map(\n lambda loc: f\"{loc}.{removeprefix(prefix_path, loc)}\"\n if prefix_path\n else loc,\n self.controllers_locations,\n )\n )\n try:\n self.controller_class = Loader.find(\n Controller, locations, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # controller is an instance with a bound method\n elif hasattr(controller, \"__self__\"):\n _, controller_method_str = controller.__qualname__.split(\".\")\n self.controller_instance = controller.__self__\n\n # it's a class or class.method, we don't have to find it, just get the class\n elif hasattr(controller, \"__qualname__\"):\n if \".\" in controller.__qualname__:\n controller_name, controller_method_str = controller.__qualname__.split(\n \".\"\n )\n else:\n controller_name = controller.__qualname__\n controller_method_str = \"__call__\"\n\n try:\n self.controller_class = Loader.get_object(\n controller.__module__, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # it's a controller instance\n else:\n self.controller_instance = controller\n controller_method_str = \"__call__\"\n\n # Set the controller method on class. This is a string\n self.controller_method = controller_method_str", "def _get_topic_for_response():\n return _get_topic_base() + \"res/\"", "def topic(self, topic):\n self.connection.topic(str(self), topic)", "def deregister(self, queue, project=None):\n self._catalogue_ctrl.delete(project, queue)", "def GetTopicName(args):\n if args.add_topic:\n topic_ref = args.CONCEPTS.add_topic.Parse()\n elif args.remove_topic:\n topic_ref = args.CONCEPTS.remove_topic.Parse()\n else:\n topic_ref = args.CONCEPTS.update_topic.Parse()\n\n return topic_ref.RelativeName()", "def get_curriculum_topic(self, curriculum_name, curriculum_topic):\n ret = None\n try:\n self.db_cursor.execute(\n \"\"\"SELECT level, subject_area, time_unit FROM CurriculumTopics WHERE curriculum_name = %s AND topic_id = %s\"\"\",\n (curriculum_name, curriculum_topic))\n ct = self.db_cursor.fetchall()\n if ct:\n ret = CurriculumTopic\n level = ct[0][0]\n subject_area = ct[0][1]\n time_unit = ct[0][2]\n ret.curriculum_name = curriculum_name\n ret.topic_id = curriculum_topic\n ret.time_unit = time_unit\n ret.subject_area = subject_area\n ret.level = level\n else:\n ret = None\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve curriculum topic: \" + str(id))\n\n return ret", "def declare_queue(self, topic):\n #from trove.rpc.impl_kombu import Connection\n from trove.openstack.common.rpc import create_connection\n with create_connection() as conn:\n consumer = conn.declare_topic_consumer(topic=topic)", "def topic(request, topic_id):\n\ttopic = Topic.objects.get(id=topic_id)\n\tvocabs = topic.vocab_set.all()\n\tcontext = {'topic': topic, 'vocabs':vocabs}\n\treturn render(request, 'Toeic/topic.html', context)", "def find_queue(queue):\n athena_queue = canonicalize_queue(queue)\n # If a queue isn't an Athena queue, punt straight to the default\n # CUPS server\n if not athena_queue:\n return SYSTEM_CUPS, None, queue\n queue = athena_queue\n\n # Get rid of any instance on the queue name\n # TODO The purpose of instances is to have different sets of default\n # options. Queues may also have default options on the null\n # instance. Figure out if we need to do anything about them\n queue = queue.split('/')[0]\n\n # If we're still here, the queue is definitely an Athena print\n # queue; it was either in the local cupsd pointing to Athena, or the\n # local cupsd didn't know about it.\n # Figure out what Athena thinks the backend server is, and whether\n # that server is running a cupsd; if not, fall back to LPRng\n\n rm = get_hesiod_print_server(queue)\n if not rm:\n # In the unlikely event we're wrong about it being an Athena\n # print queue, the local cupsd is good enough\n return SYSTEM_CUPS, None, queue\n\n # Give up and return rm and queue. If it's not running a cupsd,\n # too bad. It's not our job to check whether cupsd is running.\n return SYSTEM_CUPS, rm, queue", "def _ATTopic_query(self, **kw):\n del kw['path'] # don't want to limit to context as w/ folders\n return self.context.queryCatalog(self.context.REQUEST, \n False, # no batch here\n None, # no b_size here\n False,\n **kw)", "def show_controller(cls, args, config):\n if len(args) == 0:\n raise MOLNSException(\"USAGE: molns controller show name\")\n return {'msg': str(config.get_object(name=args[0], kind='Controller'))}", "def get_controller(cls):\n if not cls.hnd:\n raise Exception('A handler is to be set for getting contoller.')\n if not cls.controller:\n cls.controller = cls.config.controller_class(cls.hnd)\n cls.session = cls.controller.session\n return cls.controller", "def create(self):\n topic = self.__conn__.create_topic(self.__topic__)\n return topic.get_producer(*self.__args__, **self.__kargs__)", "def topic(request, topic_id):\n posts = Post.objects.filter(topic=topic_id).order_by(\"created\")\n posts = mk_paginator(request, posts, DJANGO_SIMPLE_FORUM_REPLIES_PER_PAGE)\n topic = Topic.objects.get(pk=topic_id)\n return render_to_response(\"forum/topic.html\", add_csrf(request, posts=posts, pk=topic_id,\n topic=topic), context_instance=RequestContext(request))", "def topics(self):\r\n return ProjectTopics(self)", "def _get_issue_tracker_project_name(testcase=None):\n from clusterfuzz._internal.datastore import data_handler\n job_type = testcase.job_type if testcase else None\n return data_handler.get_issue_tracker_name(job_type)", "def queue_path(self, project, location, queue):\n # This is value is not actually used, but it might be good for debugging.\n return \"projects/{project}/locations/{location}/queues/{queue}\".format(\n project=project, location=location, queue=queue)", "def get_project(con):\n try:\n return con.project_read(fq_name=conf.get('default_project', 'UNEXPECTED_VALUE'))\n except:\n log.debug('Unable to find project default-domain, admin:', exc_info=True)\n return None", "def _topic(self, topic):\n base = \"engine.%s\" % self.engine_id\n\n return f\"{base}.{topic}\".encode()", "def get_project(project_id):\n return Project.objects.get(id=project_id)", "def __init__(self,\n project_id='issue-label-bot-dev',\n topic_name='event_queue',\n subscription_name='subscription_for_event_queue',\n embedding_api_endpoint='https://embeddings.gh-issue-labeler.com/text'):\n # TODO(chunhsiang): change the embedding microservice to be an internal DNS of k8s service.\n # see: https://v1-12.docs.kubernetes.io/docs/concepts/services-networking/dns-pod-service/#services\n self.project_id = project_id\n self.topic_name = topic_name\n self.subscription_name = subscription_name\n self.embedding_api_endpoint = embedding_api_endpoint\n self.embedding_api_key = os.environ['GH_ISSUE_API_KEY']\n self.app_url = os.environ['APP_URL']\n\n # init GitHub app\n github_init()\n # init pubsub subscription\n self.create_subscription_if_not_exists()", "def get_project(arn=None):\n pass", "def get_controller_func(controller):\n\n if controller in CONTROLLERS:\n return CONTROLLERS[controller]\n\n return None", "def topic(self) -> str:\n return self._topic", "def project(self, request, pk=None):\n \n obj = self.get_object()\n obj_mapping = {\n 'teacher': obj\n }\n try:\n user = self.request.user\n query = models.Project.objects.filter(\n subject__teacher__user=user,\n subject=obj\n )\n serializer = self.get_serializer(query, many=True)\n\n id = self.request.query_params.get('id')\n\n if id:\n q = get_object_or_404(\n models.Project,\n pk=id,\n subject=obj\n )\n return self.filtering(request, q)\n\n self.actionhelper(request, query, obj_mapping)\n\n return Response(serializer.data)\n\n except:\n raise except_handler.ActionDecor()", "def find_project_for_story(story_id):\r\n\r\n for project in Project.all():\r\n story = project.load_story(story_id)\r\n if story is not None:\r\n return project\r\n\r\n #Not found\r\n print \"No project found for story: #{}\".format(story_id)\r\n return None", "def resolve_topics(self, info, **kwargs):\n return Topic.objects.all()", "def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)", "def controller( self ):\n\t\ttry:\n\t\t\treturn self._controller\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"controller: %s\" % str(e) )", "def find_top_and_topic(self, index, trs):\n\n top_result = \"kein_TOP\" \n topic_result = \"kein_TOPIC\" \n\n while index > 0:\n tree = html.fromstring(trs[index].get())\n top = tree.xpath('//tr/td[contains(@class, \"smc_tophn\")]/text()') \n topic = tree.xpath('//tr/td/a[contains(@class, \"smc_doc smc_field_voname smc_datatype_vo\")]/text()') \n\n if top != [] and top_result == \"kein_TOP\":\n top_result = top[0]\n if topic != [] and topic_result == \"kein_TOPIC\":\n topic_result = topic[0]\n\n index -= 1\n\n return top_result, topic_result", "def get_project_name(self, project_id):\n test = \"\"\"SELECT EXISTS(\n SELECT 1\n FROM barcodes.project\n WHERE project_id=%s\n )\"\"\"\n query = \"\"\"SELECT project\n FROM barcodes.project\n WHERE project_id=%s\"\"\"\n\n with self._transaction.cursor() as cur:\n cur.execute(test, [project_id, ])\n if not cur.fetchone()[0]:\n raise NotFound(f\"Project f'{project_id}' not found\")\n else:\n cur.execute(query, [project_id, ])\n return cur.fetchone()[0]", "def get_project(self, project_id):\n res = self.conn.cursor().execute(\"SELECT * FROM projects where id=?\", (project_id,))\n return res.fetchone()", "def get_project(db, id):\n \n for element in db:\n if element['project_no'] == id:\n return element\n return None", "async def get_queue(self, ctx: commands.Context) -> Optional[QueueManager]:\n\n return self.queue[ctx.guild.id]", "def find_project_for_story(story_id):\n\n for project in Project.all():\n story = project.load_story(story_id)\n if story is not None:\n return project\n\n #Not found\n print \"No project found for story: #{}\".format(story_id)\n return None", "def get_key(cls, obj, query):\n\n if hasattr(obj, 'config'):\n for item in obj.config.hardware.device:\n if query in item.deviceInfo.label:\n key = item.key\n controller_key = item.controllerKey\n\n return (key, controller_key)", "def ctrlqueue_show(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(7), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to get *Num Actions of CrlQueue* check if *Queue* is NOT empty\", e)", "def do_project_show(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n _, project = cs.projects.get(id)\n utils.print_dict(project)", "def get_topic_for_content(self, contentId):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Forum/GetTopicForContent/{contentId}/\"))", "def get_project(self, name=None):\n if not name:\n name = self.get_project_name()\n projects = self.get_projects()\n for p in projects:\n if p.name == name:\n return p\n raise NotFound(name)", "def get_renamed_topic(self, input_topic):\n output_topic = input_topic\n _log.debug(\n \"_topic_replace_list is {}\".format(self._topic_replace_map))\n input_topic_lower = input_topic.lower()\n # Only if we have some topics to replace.\n if self._topic_replace_map:\n # if we have already cached the topic then return it.\n if input_topic_lower in self._topic_replacement.keys():\n output_topic = self._topic_replacement[input_topic_lower]\n else:\n self._topic_replacement[input_topic_lower] = input_topic\n temptopics = {}\n for k, v in self._topic_replace_map.items():\n\n if k in input_topic_lower:\n # this allows multiple things to be replaced from\n # from a given topic.\n new_topic = temptopics.get(input_topic_lower,\n input_topic)\n # temptopics[input_topic] = new_topic.replace(\n # x['from'], x['to'])\n\n temptopics[input_topic_lower] = re.compile(\n re.escape(k), re.IGNORECASE).sub(v, new_topic)\n\n for k, v in temptopics.items():\n self._topic_replacement[k] = v\n output_topic = self._topic_replacement[input_topic_lower]\n _log.debug(\"Output topic after replacements {}\".format(output_topic))\n return output_topic", "def register(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def search_by_topic(query):\n data = {'query': str(query)}\n url = r'http://www.reddit.com/api/subreddits_by_topic.json'\n response = r.get(url, data=data)\n return response.json()['data']", "def test_retrieve_topic_viewset(self):\n\n topic = TopicFactory(author=self.user)\n response = self.client.get(reverse('api:topics-detail', kwargs={'topic_id': topic.id}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('title'), topic.title)", "def irc_TOPIC(self, prefix, params):\n user = re.match(self.user_regex, prefix)\n channel = params[0]\n topic = params[1]\n\n self.logger.debug(\n \"%s!%s@%s changed topic in %s to %s\" %\n (user.group(1), user.group(2), user.group(3), channel, topic)\n )\n\n self.event_manager.fire(\"irc.topic\", user, channel, topic)", "def get_doc(doc_id):\n queue = get_doc_queue(app.config)\n data = queue.get_by_id(doc_id)\n if data:\n return jsonify(doc=data)\n return jsonify(err=f\"{doc_id} not found\"), 404", "def on_topic_change(bot, trigger):\n\tif len(trigger.args) == 1:\n\t\treturn # Empty TOPIC gets the current topic.\n\tchannel = trigger.args[0]\n\tlog(bot, channel, '*** {} changes topic to \"{}\"', trigger.nick, trigger.args[1]);", "def get_project(project):\n command = 'openstack project show %s' % project\n try:\n project_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n except:\n print \"Project '%s' not found.\" % project\n sys.exit(-1)\n return project_info", "def _TopicSearchHelper(self, topic_list, topic_id):\n for topic_obj in topic_list:\n if topic_obj.topic_id == topic_id:\n return topic_obj\n elif topic_obj.children:\n children_result = self._TopicSearchHelper(topic_obj.children, topic_id)\n\n if children_result:\n return children_result\n\n return None", "def create_topic (self):\n return self.tm.create_topic()" ]
[ "0.7733818", "0.7603851", "0.74324155", "0.728382", "0.63744485", "0.6344827", "0.57456493", "0.5728621", "0.56051105", "0.55933976", "0.55091506", "0.5342035", "0.5326094", "0.53004175", "0.5222844", "0.520292", "0.5191208", "0.51723534", "0.5128026", "0.5117455", "0.508792", "0.5079124", "0.5064234", "0.5044502", "0.5033179", "0.5032762", "0.50282055", "0.50239825", "0.5002481", "0.4993937", "0.4988251", "0.4955433", "0.4906687", "0.48869", "0.48846516", "0.48810452", "0.4853596", "0.484603", "0.48294413", "0.48232833", "0.48230734", "0.4816057", "0.48132634", "0.4806995", "0.4798392", "0.47969776", "0.47926608", "0.47827923", "0.47751445", "0.47667953", "0.476025", "0.47381595", "0.4734385", "0.47271168", "0.47222555", "0.47193474", "0.4707797", "0.47064543", "0.47016254", "0.46944207", "0.46875194", "0.4659466", "0.4638633", "0.4637576", "0.46372387", "0.46275783", "0.46210718", "0.46201822", "0.46102926", "0.46040353", "0.45992893", "0.45984602", "0.45967302", "0.4596123", "0.45944285", "0.45852706", "0.45778513", "0.45737696", "0.45671892", "0.45622244", "0.4558508", "0.4545525", "0.4543562", "0.45404604", "0.4539835", "0.45394018", "0.45392817", "0.45315152", "0.45285234", "0.45148113", "0.4514723", "0.45082355", "0.4499443", "0.44903058", "0.44846278", "0.44700477", "0.44676515", "0.44571212", "0.44539973", "0.44503105" ]
0.7806749
0
Lookup a pool driver for the given queue and project.
def lookup(self, queue, project=None): try: pool_id = self._pool_id(queue, project) except errors.QueueNotMapped as ex: LOG.debug(ex) return self.get_default_pool(use_listing=False) return self.get_driver(pool_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lookup(self, queue, project=None):\n\n try:\n shard_id = self._shard_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n # NOTE(kgriffs): Return `None`, rather than letting the\n # exception bubble up, so that the higher layer doesn't\n # have to duplicate the try..except..log code all over\n # the place.\n return None\n\n return self.get_driver(shard_id)", "def _pool_id(self, queue, project=None):\n return self._catalogue_ctrl.get(project, queue)['pool']", "def get_driver(self, pool_id, pool_conf=None):\n\n try:\n return self._drivers[pool_id]\n except KeyError:\n # NOTE(cpp-cabrera): cache storage driver connection\n self._drivers[pool_id] = self._init_driver(pool_id, pool_conf)\n\n return self._drivers[pool_id]", "def lookup(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Raise an exception if the queue\n # does not have a mapping (it does not exist).\n\n # TODO(kgriffs): SHARDING - Get ID from the catalog backend\n shard_id = '[insert_id]'\n try:\n shard = self._shards[shard_id]\n except KeyError:\n self._shards[shard_id] = shard = self._init_shard(shard_id)\n\n return shard", "def register(self, queue, project=None, flavor=None):\n\n # NOTE(gengchc): if exist, get queue's pool.flavor:\n # if queue's pool.flavor is different, first delete it and add it.\n # Otherwise, if the flavor in the meteredata of the queue is\n # modified, the catalog will be inconsistent.\n if self._catalogue_ctrl.exists(project, queue):\n catalogue = self._catalogue_ctrl.get(project, queue)\n oldpoolids = catalogue['pool']\n oldpool = self._pools_ctrl.get(oldpoolids)\n oldflavor = oldpool['flavor']\n msgtmpl = _(u'register queue to pool: old flavor: %(oldflavor)s '\n ', new flavor: %(flavor)s')\n LOG.info(msgtmpl,\n {'oldflavor': oldflavor, 'flavor': flavor})\n if oldpool['flavor'] != flavor:\n self._catalogue_ctrl.delete(project, queue)\n\n if not self._catalogue_ctrl.exists(project, queue):\n if flavor is not None:\n flavor = self._flavor_ctrl.get(flavor, project=project)\n pools = self._pools_ctrl.get_pools_by_flavor(\n flavor=flavor,\n detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n msgtmpl = _(u'register queue to pool: new flavor:%(flavor)s')\n LOG.info(msgtmpl,\n {'flavor': flavor.get('name', None)})\n else:\n # NOTE(flaper87): Get pools assigned to the default\n # group `None`. We should consider adding a `default_group`\n # option in the future.\n pools = self._pools_ctrl.get_pools_by_flavor(detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n\n if not pool:\n # NOTE(flaper87): We used to raise NoPoolFound in this\n # case but we've decided to support automatic pool\n # creation. Note that we're now returning and the queue\n # is not being registered in the catalogue. This is done\n # on purpose since no pool exists and the \"dummy\" pool\n # doesn't exist in the storage\n if self.lookup(queue, project) is not None:\n return\n raise errors.NoPoolFound()\n msgtmpl = _(u'register queue to pool: new flavor: None')\n LOG.info(msgtmpl)\n\n msgtmpl = _(u'register queue: project:%(project)s'\n ' queue:%(queue)s pool:%(pool)s')\n LOG.info(msgtmpl,\n {'project': project,\n 'queue': queue,\n 'pool': pool})\n self._catalogue_ctrl.insert(project, queue, pool)", "def _get_pool_by_name(self, pool_name):\n pool_manager = PoolManager(organization_name=self._organization_name,\n project_name=self._project_name, creds=self._creds)\n pools = pool_manager.list_pools()\n return next((pool for pool in pools.value if pool.name == pool_name), None)", "def find_module(self, name):\n if name in self.pool:\n return self.pool[name]\n else:\n return None", "def find(cls, host, user):\n cls.__check_parameters(host=host, user=user)\n if not hasattr(Connection, \"__pool__\"):\n return None\n cid = cls.generate_id(host, user)\n return Connection.__pool__.get(cid) # by default None is returned", "def get_driver(drv):\n return GenericDriver.get_driver(drv)", "def _get_pool(name=None, session=None):\n if session is None:\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n if name in pool_record.get(\"name_label\"):\n return pool\n return None", "def _get_driver(self, driver_name):\n driver = lb_const.SERVICE_TYPE + driver_name\n return self.drivers[driver]", "async def _get_work_pool_queue_id_from_name(\n self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str\n ) -> UUID:\n work_pool_queue = await models.workers.read_work_pool_queue_by_name(\n session=session,\n work_pool_name=work_pool_name,\n work_pool_queue_name=work_pool_queue_name,\n )\n if not work_pool_queue:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Worker queue '{work_pool_name}/{work_pool_queue_name}' not found.\",\n )\n\n return work_pool_queue.id", "def find_queue(queue):\n athena_queue = canonicalize_queue(queue)\n # If a queue isn't an Athena queue, punt straight to the default\n # CUPS server\n if not athena_queue:\n return SYSTEM_CUPS, None, queue\n queue = athena_queue\n\n # Get rid of any instance on the queue name\n # TODO The purpose of instances is to have different sets of default\n # options. Queues may also have default options on the null\n # instance. Figure out if we need to do anything about them\n queue = queue.split('/')[0]\n\n # If we're still here, the queue is definitely an Athena print\n # queue; it was either in the local cupsd pointing to Athena, or the\n # local cupsd didn't know about it.\n # Figure out what Athena thinks the backend server is, and whether\n # that server is running a cupsd; if not, fall back to LPRng\n\n rm = get_hesiod_print_server(queue)\n if not rm:\n # In the unlikely event we're wrong about it being an Athena\n # print queue, the local cupsd is good enough\n return SYSTEM_CUPS, None, queue\n\n # Give up and return rm and queue. If it's not running a cupsd,\n # too bad. It's not our job to check whether cupsd is running.\n return SYSTEM_CUPS, rm, queue", "def get_pool(self, name, dc, cluster):\n cluster_obj = self.get_cluster(cluster, dc)\n for rp in cluster_obj.resourcePool.resourcePool:\n if rp.name == name:\n return rp", "def get_queue_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.queue_controller", "def get_pool(self):\n try:\n return self._pool\n except AttributeError:\n db_url = getattr(settings, self.name)\n self._pool = PostgresConnectionPool.for_url(db_url)\n return self._pool", "def _get_driver_from_dsn(self, dsn):\n\n return dsn.split(':')[0]", "def find_driver_class(self, scheme_or_url: str) -> Optional[Type[Driver]]:\n index = scheme_or_url.find(\":\")\n if index > 0:\n scheme = scheme_or_url[0:index]\n else:\n scheme = scheme_or_url\n\n return self.drivers.get(scheme.lower())", "def get_by_url(self, url, pool_name=None):\n\t\tif not pool_name:\n\t\t\treturn self.pool[url]\n\t\treturn getattr(self, pool_name)[url]", "def find_backend(cls) -> IBackend:\n cls.Lock.acquire()\n try:\n return cls._load_backend()\n finally:\n cls.Lock.release()", "def get_provider_from_backend(backend):\n known_providers = {\n 'BasicAerProvider': 'qiskit.BasicAer',\n 'AerProvider': 'qiskit.Aer',\n 'IBMQProvider': 'qiskit.IBMQ',\n }\n if isinstance(backend, BaseBackend):\n provider = backend.provider()\n if provider is None:\n raise ImportError(\"Backend object '{}' has no provider\".format(backend.name()))\n\n return known_providers.get(provider.__class__.__name__, provider.__class__.__qualname__)\n elif not isinstance(backend, str):\n raise ImportError(\"Invalid Backend '{}'\".format(backend))\n\n for provider in known_providers.values():\n try:\n if get_backend_from_provider(provider, backend) is not None:\n return provider\n except:\n pass\n\n raise ImportError(\"Backend '{}' not found in providers {}\".format(backend, list(known_providers.values())))", "def magma_queue_get_device(queue):\n\n return _libmagma.magma_queue_get_device(queue)", "def find_backend(path, backends):\n for backend in backends:\n if backend.path == path:\n return backend\n\n return None", "def find_backend(path, backends):\n for backend in backends:\n if backend.path == path:\n return backend\n\n return None", "def get_pool(name):\n if name not in _CONNECTIONS:\n add_pool(name)\n return _CONNECTIONS[name]", "async def read_work_pool_queue(\n work_pool_name: str = Path(..., description=\"The work pool name\"),\n work_pool_queue_name: str = Path(\n ..., description=\"The work pool queue name\", alias=\"name\"\n ),\n worker_lookups: WorkerLookups = Depends(WorkerLookups),\n db: OrionDBInterface = Depends(provide_database_interface),\n) -> schemas.core.WorkPoolQueue:\n\n async with db.session_context(begin_transaction=True) as session:\n work_pool_queue_id = await worker_lookups._get_work_pool_queue_id_from_name(\n session=session,\n work_pool_name=work_pool_name,\n work_pool_queue_name=work_pool_queue_name,\n )\n\n return await models.workers.read_work_pool_queue(\n session=session, work_pool_queue_id=work_pool_queue_id, db=db\n )", "def get_queue(self, task_name):\n for name, queue in self.queues.items():\n if task_name in queue:\n return name\n return self.default_queue", "def get_issue(issue_number):\n backend_name = os.environ[\"ISSUE_BACKEND\"]\n backend_module = importlib.import_module(\n \"issuebranch.backends.{}\".format(backend_name)\n )\n\n return getattr(backend_module, \"Backend\")(issue_number)", "def setup_device_pool(project_arn, device_pool_name):\n\n target_device_pool_arn = ''\n is_device_pool_exists = False\n for device_pool in device_farm.list_device_pools(arn=project_arn)[\n 'devicePools']:\n pool_name = device_pool['name']\n if pool_name == device_pool_name:\n print('{} already exists'.format(pool_name))\n target_device_pool_arn = device_pool['arn']\n is_device_pool_exists = True\n break\n else:\n is_device_pool_exists = False\n\n if not is_device_pool_exists:\n target_device_pool_arn = create_device_pool(\n device_pool_name, project_arn)\n\n return target_device_pool_arn\n\n raise KeyError('Problem finding device pool %r' % device_pool_name)", "def get_pool():\n app = get_app()\n return app['pool']", "def get_driver(self, shard_id):\n\n try:\n return self._drivers[shard_id]\n except KeyError:\n # NOTE(cpp-cabrera): cache storage driver connection\n self._drivers[shard_id] = self._init_driver(shard_id)\n\n return self._drivers[shard_id]", "def _load_driver(backend, **kargs):\n bk_module = importlib.import_module('backend', __package__)\n driver_cls = getattr(bk_module, str.capitalize(backend) + 'Backend')\n return driver_cls(**kargs)", "def _get_ibmq_provider():\n providers = OrderedDict()\n try:\n providers['qiskit.IBMQ'] = get_backends_from_provider('qiskit.IBMQ')\n except Exception as e:\n logger.warning(\"Failed to access IBMQ: {}\".format(str(e)))\n\n return providers", "def get_backend(name):\n return _DEFAULT_PROVIDER.get_backend(name)", "def get_device_pool(arn=None):\n pass", "def get_pool(self, pool_name=None, pool_id=None):\n\n id_or_name = pool_id if pool_id else pool_name\n errormsg = \"Failed to get the pool {0} with error {1}\"\n\n try:\n obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id)\n\n if pool_id and obj_pool.existed:\n LOG.info(\"Successfully got the pool object %s\",\n obj_pool)\n return obj_pool\n if pool_name:\n LOG.info(\"Successfully got pool %s\", obj_pool)\n return obj_pool\n else:\n msg = \"Failed to get the pool with {0}\".format(\n id_or_name)\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n\n except Exception as e:\n msg = errormsg.format(id_or_name, str(e))\n LOG.error(msg)\n self.module.fail_json(msg=msg)", "def register(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def search_queue_number(self, Q_strip):\n if Q_strip is self.PF_Q_strip:\n out = self.dut.send_expect(\"cat config/common_base\", \"]# \", 10)\n pattern = \"(%s=)(\\d*)\" % Q_strip\n else :\n out = self.dut.send_expect(\"cat drivers/net/i40e/i40e_ethdev.c\", \"]# \", 10)\n pattern = \"#define %s\\s*(\\d*)\" % Q_strip\n s = re.compile(pattern)\n res = s.search(out)\n if res is None:\n print utils.RED('Search no queue number.')\n return None\n else:\n if Q_strip is self.VF_Q_strip:\n queue = res.group(1)\n else :\n queue = res.group(2)\n return int(queue)", "def getScanner(self, hostname, projectname):\n if not (projectname, hostname, None) in self.scanSetups:\n sc = Scanner(self, hostname, projectname)\n self.scanSetups[(projectname, hostname, None)] = sc\n else:\n sc = self.scanSetups[(projectname, hostname, None)]\n return sc", "def get_backend_by_name(cls_str):\n # type: (str) -> Backend\n try:\n return globals()[cls_str]()\n except KeyError:\n raise InvalidBackendClass('Invalid backend class name: {cls}'.format(cls=cls_str))", "def lookForQueueingCommands():\n for queue, binary in queueBinaryMap.items():\n if checkForBinary(binary):\n return queue\n else:\n raise Exception(\"Cannot locate a queueing system. None of these executables were found in your PATH: %s\" % (queueBinaryMap.values(),))", "def get_default_pool(con):\n try:\n return con.floating_ip_pool_read(fq_name=conf.get('default_pool', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find pool.')\n return None", "def get_spider_queues(config):\n dbs_dir = config.get('dbs_dir', 'dbs')\n if not os.path.exists(dbs_dir):\n os.makedirs(dbs_dir)\n d = {}\n for project in get_project_list(config):\n db_path = os.path.join(dbs_dir, '%s.db' % project)\n d[project] = SqliteSpiderQueue(db_path)\n return d", "def get_project(db, id):\n \n for element in db:\n if element['project_no'] == id:\n return element\n return None", "def get(queue_name: str, **kwargs) -> Queue:\n return Queue(queue_name, **kwargs)", "def _get_pool(self, *args, **kwargs):\n\n pool_name = '_pool_%s' % getattr(self, 'alias', 'common')\n\n if not hasattr (self.__class__, pool_name):\n lock = thread.allocate_lock()\n lock.acquire()\n\n try:\n pool = cx_Oracle.SessionPool(\n user=self.user,\n password=self.password,\n dsn=self.tns,\n min=CX_POOL_SESSION_MIN,\n max=CX_POOL_SESSION_MAX,\n increment=CX_POOL_SESSION_INCREMENT,\n connectiontype=cx_Oracle.Connection,\n threaded=CX_POOL_THREADED,\n getmode=cx_Oracle.SPOOL_ATTRVAL_NOWAIT,\n homogeneous=True)\n except Exception as err:\n pool = None\n\n if pool:\n pool.timeout = CX_POOL_CONNECT_TIMEOUT\n setattr(self.__class__, pool_name, pool)\n else:\n msg = \"\"\" ### Database login failed or database not found ### \"\"\"\n raise self.Database_Error, ('%s') %(msg)\n\n lock.release()\n\n return getattr(self.__class__, pool_name)", "def _get_host_iqn_registered_in_target_by_name(\n self, port, target_name, host_iqn):\n for hba_iscsi in self.client.get_hba_iscsis_by_name(port, target_name):\n if host_iqn == hba_iscsi['iscsiName']:\n return hba_iscsi\n return None", "def lookup(cls, _db, short_name):\n def _lookup():\n library = get_one(_db, Library, short_name=short_name)\n return library, False\n library, is_new = cls.by_cache_key(_db, short_name, _lookup)\n return library", "def get_rabbit_queue():\n\n return \"metrics_queue\"", "def getQueueDetails(self, queue_name, project_id=\"\"):\n if project_id == \"\":\n project_id = self.project_id\n url = \"%sprojects/%s/queues/%s?oauth=%s\" % (self.url, project_id,\n queue_name, self.token)\n body = self.__get(url)\n queue = json.loads(body)\n return queue", "def support_queue(self, queue_id):\r\n return support_queues.SupportQueue(self, queue_id)", "def get_default_pool():\n return 'tank'", "def get_nic_driver(pci_id):\n driverlist = dict(zip(NICS.values(), DRIVERS.keys()))\n try:\n driver = DRIVERS[driverlist[pci_id]]\n except Exception as e:\n driver = None\n return driver", "def _get_backend(args):\n if args.backend == 'gatttool':\n backend = GatttoolBackend\n elif args.backend == 'bluepy':\n backend = BluepyBackend\n elif args.backend == 'pygatt':\n backend = PygattBackend\n else:\n raise Exception('unknown backend: {}'.format(args.backend))\n return backend", "def find(self, task_id):\n for task_obj in self.queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in queue: '{}'\".format(task_id))", "def get_driver(driver_name):\n if driver_name == 'sqlite3':\n import sqlite3 as db_driver\n elif driver_name == 'cx_Oracle':\n import cx_Oracle as db_driver\n elif driver_name == 'pyodbc':\n import pyodbc as db_driver\n elif driver_name == 'pypyodbc':\n import pypyodbc as db_driver\n elif driver_name == 'psycopg2':\n import psycopg2 as db_driver\n elif driver_name == 'PyMySql':\n import PyMySql as db_driver\n elif driver_name == 'pymssql':\n import pymssql as db_driver\n else:\n # TODO: pick a better exception type and message\n raise ImportError\n return db_driver", "def support_queue(self, queue_id):\n return support_queues.SupportQueue(self, queue_id)", "def find_by_project_id(cls, project_id, is_prod: bool) -> OIDCConfig:\n return cls.query.filter(OIDCConfig.project_id == project_id and\n OIDCConfig.is_prod == is_prod).first()", "def get(self, instance, database):\n key = instance + '/' + database\n\n if not key in self.queues:\n queue = Queue(self.poolsize)\n self.queues[key] = queue\n\n queue = self.queues[key]\n\n if queue.empty():\n cnxn = cnxn_ctor(instance, database)\n else:\n cnxn = queue.get()\n # Make sure the connection is still good.\n cnxn.ping()\n cnxn.commit()\n\n return cnxn", "def register(self, queue, project=None):\n # NOTE(cpp-cabrera): only register a queue if the entry\n # doesn't exist\n if not self._catalogue_ctrl.exists(project, queue):\n # NOTE(cpp-cabrera): limit=0 implies unlimited - select from\n # all shards\n shard = select.weighted(self._shards_ctrl.list(limit=0))\n\n if not shard:\n raise errors.NoShardFound()\n\n self._catalogue_ctrl.insert(project, queue, shard['name'])", "def _get_storage_backend(fq_classname):\n LOG.debug('Running _get_storage_backend with fq_classname [%s]'\n % fq_classname)\n\n if not fq_classname:\n return None\n\n (modname, clname) = fq_classname.rsplit('.', 1)\n # A test import of the backend storage class should have been undertaken\n # at app startup in django_drf_filepond.apps.ready so any failure\n # importing the backend should have been picked up then.\n mod = importlib.import_module(modname)\n storage_backend = getattr(mod, clname)()\n LOG.info('Storage backend instance [%s] created...' % fq_classname)\n\n return storage_backend", "def get_project(self, project):\n project_name = project\n\n try:\n # FIXME: project should be an integer or str, no both\n project_id = int(project)\n except ValueError:\n project_id = None\n\n try:\n # Find the first project occurrence\n project_found = next(p for p in self.get_projects() if p[\"id\"] == project_id\n or p[\"name\"] == project_name)\n # FIXME: use namedtuple instead? create a self.project = dict()?\n self.project_name = project_found[\"name\"]\n self.project_id = project_found[\"id\"]\n self.project_address = \"projects/%s/\" % self.project_id\n except StopIteration:\n logger.error(\"Project %s not found\" % project)\n raise KeyError", "async def create_work_pool_queue(\n work_pool_queue: schemas.actions.WorkPoolQueueCreate,\n work_pool_name: str = Path(..., description=\"The work pool name\"),\n worker_lookups: WorkerLookups = Depends(WorkerLookups),\n db: OrionDBInterface = Depends(provide_database_interface),\n) -> schemas.core.WorkPoolQueue:\n\n try:\n async with db.session_context(begin_transaction=True) as session:\n work_pool_id = await worker_lookups._get_work_pool_id_from_name(\n session=session,\n work_pool_name=work_pool_name,\n )\n\n model = await models.workers.create_work_pool_queue(\n session=session,\n work_pool_id=work_pool_id,\n work_pool_queue=work_pool_queue,\n db=db,\n )\n except sa.exc.IntegrityError:\n raise HTTPException(\n status_code=status.HTTP_409_CONFLICT,\n detail=\"A worker with this name already exists.\",\n )\n\n return model", "def get_claim_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.claim_controller", "def find(self, task_id):\n for task_obj in self._queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in dorm: '{}'\".format(task_id))", "async def _get_default_work_pool_queue_id_from_work_pool_name(\n self, session: AsyncSession, work_pool_name: str\n ):\n work_pool = await models.workers.read_work_pool_by_name(\n session=session,\n work_pool_name=work_pool_name,\n )\n if not work_pool:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f'Worker pool \"{work_pool_name}\" not found.',\n )\n\n return work_pool.default_queue_id", "def _load_driver_module(self):\n driver = get_dbapi_module(self.driver_module)\n exceptions.register(driver.DatabaseError)\n return driver", "def get_connection_pool(self, params):\r\n cp_params = dict(params)\r\n cp_params.update(self.pool_cls_kwargs)\r\n return self.pool_cls(**cp_params)", "def generate_queue(self,pool):\n\t\tqueue = []\n\t\tfor ele in self.elements:\n\t\t\tif ele.pool == pool and ele.status == 'pending':\n\t\t\t\tele.abs_path = \"/%s/%s/%s/%s\" % (\n\t\t\t\t\tself.base_dir,\n\t\t\t\t\tself.parent_dir,\n\t\t\t\t\tself.project,\n\t\t\t\t\tele.filename\n\t\t\t\t\t)\n\t\t\t\tqueue.append(ele)\n\t\treturn queue", "def get_driver(driver_name):\n try:\n o = drivers[driver_name]\n if type(o) == str:\n exec 'd = %s()' % o\n else:\n d = o()\n return d\n except KeyError:\n raise ValueError('Unknown driver name: \"{0}\"'.format(driver_name))", "def get_by_backend_name(cls, backend_name):\r\n cls._check_configured()\r\n for enabled in cls._ENABLED.values():\r\n if enabled.BACKEND_CLASS.name == backend_name:\r\n return enabled", "def deregister(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass", "def driver(self):\n return self.rpc.call(MsfRpcMethod.DbDriver, [{}])['driver']", "def get_subscription_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.subscription_controller", "def get_pool_id(pool_name, host=None):\n cmd = utils.XMS_CLI_HEADER + \"-f json pool list\"\n print cmd\n ret = utils.execute_cmd_in_host(cmd, host)\n if ret[2] != 0 or isinstance(ret[0], dict):\n print \"[Error] Failed to get pool info. Error message: [{err}]\".format(err=ret[1])\n return -1\n try:\n pool_info = json.loads(ret[0])\n pools = pool_info[\"pools\"]\n for p in pools:\n if pool_name == p[\"name\"]:\n return p[\"id\"]\n except Exception as e:\n print \"[Error] error message is: \" + e.message\n return -1", "def query_queue(self, queue_name, alt_exchange_name=None):\n return self._query(queue_name, \"queue\", \"org.apache.qpid.broker\", alt_exchange_name)", "def backend_pool_type(self) -> Optional[pulumi.Input[Union[str, 'BackendPoolType']]]:\n return pulumi.get(self, \"backend_pool_type\")", "def storage_backend_get_by_name(context, name, inactive=False):\n return _find_storage_backend(context, dict(name = name), True, None, inactive=inactive)", "def get_pool(b_dummy=True, num=4):\n if b_dummy:\n pool = ThreadPool(num)\n else:\n pool = ProcessPool(num)\n\n return pool", "def get_driver(secret_key=config.DEFAULT_SECRET_KEY, userid=config.DEFAULT_USERID,\n provider=config.DEFAULT_PROVIDER):\n\n if hasattr(config, 'get_driver'):\n logger.debug('get_driver %s' % config.get_driver)\n return config.get_driver()\n else:\n logger.debug('get_driver {0}@{1}'.format(userid, provider))\n return libcloud.compute.providers.get_driver(\n config.PROVIDERS[provider])(userid, secret_key)", "def get_from_backend(uri, **kwargs):\n\n parsed_uri = urlparse.urlparse(uri)\n scheme = parsed_uri.scheme\n\n backend_class = get_backend_class(scheme)\n\n return backend_class.get(parsed_uri, **kwargs)", "async def delete_work_pool_queue(\n work_pool_name: str = Path(..., description=\"The work pool name\"),\n work_pool_queue_name: str = Path(\n ..., description=\"The work pool queue name\", alias=\"name\"\n ),\n worker_lookups: WorkerLookups = Depends(WorkerLookups),\n db: OrionDBInterface = Depends(provide_database_interface),\n):\n\n async with db.session_context(begin_transaction=True) as session:\n work_pool_queue_id = await worker_lookups._get_work_pool_queue_id_from_name(\n session=session,\n work_pool_name=work_pool_name,\n work_pool_queue_name=work_pool_queue_name,\n )\n\n await models.workers.delete_work_pool_queue(\n session=session, work_pool_queue_id=work_pool_queue_id, db=db\n )", "async def get_queue(self, ctx: commands.Context) -> Optional[QueueManager]:\n\n return self.queue[ctx.guild.id]", "def show_pool(self, pool, **_params):\r\n return self.get(self.pool_path % (pool), params=_params)", "def which_backend(self, backend_name, type_name, conf):\n print(\"backend_name is : <{}>\".format(backend_name))\n if backend_name not in self.records.keys():\n print(\"first get object\")\n self.port_obj = PortFactory.backends.get(backend_name)(type_name, conf)\n print(\"get object from factory : {}\".format(self.port_obj))\n self.records[backend_name] = [type_name]\n else:\n print(\"re-init get object\")\n self.port_obj.reinit(type_name,conf)\n self.records[backend_name].append(type_name)\n print(\"factory records: {}\".format(self.records))\n return self.port_obj", "def _get_target_version_by_name(jira, conf, name):\n versions = jira.project_versions(conf.JIRA['project'])\n for version in versions:\n if getattr(version, 'name') == name:\n return version\n\n return None", "def fetch_build_queue(self, planet=None):\n print(\"Not implemented yet!\")", "def sqs_lookup_url(session, queue_name):\n client = session.client('sqs')\n resp = client.get_queue_url(QueueName=queue_name)\n return resp['QueueUrl']", "def _acquire_base_protocol(self, timeout=None):\n try:\n return self._queue.get(True, timeout)\n except Queue.Empty:\n raise NoConnectionsAvailable(\"No base_protocol available from pool within specified timeout\")", "def _get_rabbit_connection_info(queue_key):\n\n\tif consul_server == \"127.0.0.1\":\n\t\treturn \"Consul server is set to 127.0.0.1\", None\n\tlook_for_service_name = \"docker-rabbitmq-5672\"\n\tfound_service = config_manager.discover_service(consul_server, look_for_service_name)\n\tif found_service.__class__.__name__ not in (\"list\", \"tuple\"):\n\t\treturn \"Service class not in expected format\", None\n\tif len(found_service) == 0:\n\t\treturn \"No services found for `%s`\" % look_for_service_name, None\n\n\trabbitmq_host = found_service[0][\"Address\"]\n\trabbitmq_port = int(found_service[0][\"ServicePort\"])\n\n\tconfig_arr = config_manager.get_config(consul_server, [queue_key, \"rabbitmq_user\", \"rabbitmq_pass\"])\n\trabbit_username = config_arr[0][\"rabbitmq_user\"]\n\trabbit_password = config_arr[0][\"rabbitmq_pass\"]\n\n\ttry:\n\t\tcredentials = pika.PlainCredentials(rabbit_username, rabbit_password)\n\t\tparameters = pika.ConnectionParameters(rabbitmq_host, rabbitmq_port, \"/\", credentials,\n\t\t heartbeat=0, blocked_connection_timeout=None)\n\t\tconnection = pika.BlockingConnection(parameters)\n\t\treturn None, connection\n\texcept Exception, ex:\n\t\treturn traceback.print_exc(), None", "def get_mongo_db(host, port, name):\n client = MongoClient(host, port)\n db = client[name]\n return db", "def _determine_resource_pool(session, vm_):\n resource_pool = \"\"\n if \"resource_pool\" in vm_.keys():\n resource_pool = _get_pool(vm_[\"resource_pool\"], session)\n else:\n pool = session.xenapi.pool.get_all()\n if not pool:\n resource_pool = None\n else:\n first_pool = session.xenapi.pool.get_all()[0]\n resource_pool = first_pool\n pool_record = session.xenapi.pool.get_record(resource_pool)\n log.debug(\"resource pool: %s\", pool_record[\"name_label\"])\n return resource_pool", "def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None", "def get_by_name(cls, context, name):\n # get deployable_obj_list for one device_id\n dep_obj = Deployable.get_by_name(context, name)\n driver_ah_obj_list = DriverAttachHandle.list(context, dep_obj.id)\n # get driver_attr_obj_list fro this dep_obj\n driver_attr_obj_list = DriverAttribute.list(context, dep_obj.id)\n driver_dep_obj = cls(context=context, name=dep_obj.name,\n num_accelerators=dep_obj.num_accelerators,\n attribute_list=driver_attr_obj_list,\n attach_handle_list=driver_ah_obj_list)\n return driver_dep_obj", "def get_backend():\n return sys.modules[__name__]", "def get_backend():\n return sys.modules[__name__]", "def get_backend():\n\n return sys.modules[__name__]", "def get_backend():\n\n return sys.modules[__name__]", "def get_or_create_project(group, project_label):\n\n print(f\"Looking for prject.label {project_label}\")\n projects = group.projects.find(f\"label={project_label}\")\n if len(projects) > 0:\n print(f\"Found it.\")\n project = projects[0]\n print(f\"project.label {project.label}\")\n print(f\"project.id {project.id}\")\n else:\n print(\"Project not found - Creating it.\")\n project = group.add_project(label=f\"{project_label}\")\n print(f\"project.label {project.label}\")\n print(f\"project.id {project.id}\")\n return project", "def _get_driver(\n cls, platform: str, variant: Optional[str]\n ) -> Tuple[Union[Type[AsyncNetworkDriver], Type[AsyncGenericDriver]], Dict[str, Any]]:\n additional_kwargs: Dict[str, Any] = {}\n final_driver: Union[Type[AsyncGenericDriver], Type[AsyncNetworkDriver]]\n\n if platform in cls.CORE_PLATFORM_MAP:\n final_driver = cls.CORE_PLATFORM_MAP[platform]\n msg = f\"Driver '{final_driver}' selected from scrapli core drivers\"\n else:\n final_driver, additional_kwargs = cls._get_community_driver(\n community_platform_name=platform, variant=variant\n )\n msg = (\n f\"Driver '{final_driver}' selected from scrapli community platforms, with the \"\n f\"following platform arguments: '{additional_kwargs}'\"\n )\n\n logger.info(msg)\n return final_driver, additional_kwargs" ]
[ "0.7773466", "0.6421427", "0.61614996", "0.58672553", "0.58434623", "0.5762848", "0.56201243", "0.5590634", "0.5587302", "0.5487426", "0.54723954", "0.5447157", "0.5354828", "0.5348546", "0.53464115", "0.52886975", "0.5276325", "0.5234317", "0.521486", "0.518824", "0.51820177", "0.5158801", "0.5154775", "0.5154775", "0.5143757", "0.5136378", "0.51141834", "0.5110599", "0.510987", "0.5085572", "0.5047492", "0.5018092", "0.5009015", "0.4975758", "0.49750438", "0.4945455", "0.49354726", "0.4930682", "0.49247462", "0.49136448", "0.49136317", "0.490766", "0.49060932", "0.4905442", "0.4897388", "0.48716784", "0.48711646", "0.4842844", "0.48285022", "0.48188198", "0.48187637", "0.4798839", "0.4798032", "0.4790421", "0.47901636", "0.47896808", "0.4780815", "0.47679564", "0.47500598", "0.4744811", "0.4744025", "0.47400093", "0.47340822", "0.47323895", "0.47318834", "0.4729889", "0.4723564", "0.46995974", "0.469498", "0.46829015", "0.46824217", "0.46769503", "0.46675003", "0.4664264", "0.46609217", "0.46530432", "0.46387142", "0.4631424", "0.462783", "0.46207872", "0.461786", "0.46026388", "0.4594921", "0.45902088", "0.45901957", "0.4586921", "0.4579844", "0.45779046", "0.45752442", "0.45706338", "0.45627937", "0.45608371", "0.45583868", "0.45513704", "0.45421585", "0.45421585", "0.45393005", "0.45393005", "0.45392346", "0.4536857" ]
0.8907264
0
Get storage driver, preferably cached, from a pool name.
def get_driver(self, pool_id, pool_conf=None): try: return self._drivers[pool_id] except KeyError: # NOTE(cpp-cabrera): cache storage driver connection self._drivers[pool_id] = self._init_driver(pool_id, pool_conf) return self._drivers[pool_id]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_storage_backend(self):\n return self.client.info()['Driver']", "def storage_backend_get_by_name(context, name, inactive=False):\n return _find_storage_backend(context, dict(name = name), True, None, inactive=inactive)", "def _get_driver(self, driver_name):\n driver = lb_const.SERVICE_TYPE + driver_name\n return self.drivers[driver]", "def get_backend(name):\n return _DEFAULT_PROVIDER.get_backend(name)", "def get_driver(drv):\n return GenericDriver.get_driver(drv)", "def get_storage(self, name):\r\n if name not in self._storages:\r\n for suffix, engine in self.STORAGE_MAP.iteritems():\r\n if name.endswith(suffix):\r\n self._storages[name] = engine(self.get_filepath(name))\r\n break\r\n\r\n if name in self._storages:\r\n return self._storages[name]\r\n else:\r\n raise KeyError('{} does not have a valid suffix'.format(name))", "def get_driver(driver_name):\n try:\n o = drivers[driver_name]\n if type(o) == str:\n exec 'd = %s()' % o\n else:\n d = o()\n return d\n except KeyError:\n raise ValueError('Unknown driver name: \"{0}\"'.format(driver_name))", "def _get_pool(name=None, session=None):\n if session is None:\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n if name in pool_record.get(\"name_label\"):\n return pool\n return None", "def _get_storage_backend(fq_classname):\n LOG.debug('Running _get_storage_backend with fq_classname [%s]'\n % fq_classname)\n\n if not fq_classname:\n return None\n\n (modname, clname) = fq_classname.rsplit('.', 1)\n # A test import of the backend storage class should have been undertaken\n # at app startup in django_drf_filepond.apps.ready so any failure\n # importing the backend should have been picked up then.\n mod = importlib.import_module(modname)\n storage_backend = getattr(mod, clname)()\n LOG.info('Storage backend instance [%s] created...' % fq_classname)\n\n return storage_backend", "def get_storage(storage_dsn):\n storage_scheme = dsnparse.parse(storage_dsn).scheme\n storage_cls = STORAGE_REGISTRY.get(storage_scheme)\n if not storage_cls:\n logging.error(\"Can't find storage for given dsn.\")\n sys.exit(-1)\n return storage_cls(dsn=storage_dsn)", "def get_storage_provider(uri):\n for provider in ProviderFactory.get_storage_providers():\n try:\n supports = provider.supports_storage(uri) # type: ignore[union-attr]\n except BaseException as e:\n communication.warn(f\"Couldn't test provider {provider}: {e}\")\n else:\n if supports:\n return provider(uri=uri) # type: ignore[call-arg]\n\n raise errors.DatasetProviderNotFound(uri=uri)", "def _get_pool_by_name(self, pool_name):\n pool_manager = PoolManager(organization_name=self._organization_name,\n project_name=self._project_name, creds=self._creds)\n pools = pool_manager.list_pools()\n return next((pool for pool in pools.value if pool.name == pool_name), None)", "def get_driver(self, shard_id):\n\n try:\n return self._drivers[shard_id]\n except KeyError:\n # NOTE(cpp-cabrera): cache storage driver connection\n self._drivers[shard_id] = self._init_driver(shard_id)\n\n return self._drivers[shard_id]", "def storage_pool_get(context, storage_pool_id):\n return _storage_pool_get(context, storage_pool_id)", "def get_driver(driver_name):\n if driver_name == 'sqlite3':\n import sqlite3 as db_driver\n elif driver_name == 'cx_Oracle':\n import cx_Oracle as db_driver\n elif driver_name == 'pyodbc':\n import pyodbc as db_driver\n elif driver_name == 'pypyodbc':\n import pypyodbc as db_driver\n elif driver_name == 'psycopg2':\n import psycopg2 as db_driver\n elif driver_name == 'PyMySql':\n import PyMySql as db_driver\n elif driver_name == 'pymssql':\n import pymssql as db_driver\n else:\n # TODO: pick a better exception type and message\n raise ImportError\n return db_driver", "def _getDriver(self):\n if not hasattr(self, '_driver'):\n with self._getDatasetLock:\n if not self.dataset or not self.dataset.GetDriver():\n self._driver = None\n else:\n self._driver = self.dataset.GetDriver().ShortName\n return self._driver", "def _get_driver_from_dsn(self, dsn):\n\n return dsn.split(':')[0]", "def get_pool(name):\n if name not in _CONNECTIONS:\n add_pool(name)\n return _CONNECTIONS[name]", "def get_backend_by_name(cls_str):\n # type: (str) -> Backend\n try:\n return globals()[cls_str]()\n except KeyError:\n raise InvalidBackendClass('Invalid backend class name: {cls}'.format(cls=cls_str))", "def _get_pool_path( self, pool_name ):\n\t\ttry:\n\t\t\treturn self.storage_pools[pool_name].path\n\t\texcept KeyError:\n\t\t\treturn ''", "def lookup(self, queue, project=None):\n\n try:\n pool_id = self._pool_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n return self.get_default_pool(use_listing=False)\n\n return self.get_driver(pool_id)", "def get_by_url(self, url, pool_name=None):\n\t\tif not pool_name:\n\t\t\treturn self.pool[url]\n\t\treturn getattr(self, pool_name)[url]", "def driver(self):\n return self.rpc.call(MsfRpcMethod.DbDriver, [{}])['driver']", "def get_default_pool():\n return 'tank'", "def driver_from_file(input_file):\n file_ext = os.path.splitext(input_file)[1].split(\".\")[1]\n try:\n driver = _file_ext_to_driver()[file_ext]\n except KeyError:\n raise errors.MapcheteDriverError(\n \"no driver could be found for file extension %s\" % file_ext)\n if len(driver) == 1:\n return driver[0]\n else:\n raise errors.MapcheteDriverError(\n \"error determining read driver from file %s\" % input_file)", "def find_module(self, name):\n if name in self.pool:\n return self.pool[name]\n else:\n return None", "def get_storage(path=None, options=None):\n path = path or settings.STORAGE\n option = options or {}\n options = options or settings.STORAGE_OPTIONS\n if not path:\n raise ImproperlyConfigured('You must specify a storage class using '\n 'DBBACKUP_STORAGE settings.')\n storage_module = import_module(path)\n return storage_module.Storage(**options)", "def get_plugin_loader(name):\n try:\n mgr = stevedore.DriverManager(namespace=PLUGIN_NAMESPACE,\n invoke_on_load=True,\n name=name)\n except RuntimeError:\n raise exceptions.NoMatchingPlugin(name)\n\n return mgr.driver", "def get_pool(self):\n try:\n return self._pool\n except AttributeError:\n db_url = getattr(settings, self.name)\n self._pool = PostgresConnectionPool.for_url(db_url)\n return self._pool", "def load_backend(name, options=None):\n if name is None:\n assert options is None\n return get_default()\n if options is None:\n options = {}\n if name not in _backends:\n raise UnknownBackend(name)\n try:\n res = _backends[name]()(**options)\n except Exception as e:\n raise LoadingError(name) from e\n return res", "def get_global_storage(self, name: str) -> Any:\n return self.global_storage[name]", "def get_driver(self, **kwargs) -> Driver:\n from squirrel.framework.plugins.plugin_manager import squirrel_plugin_manager\n\n plugins: list[list[type[Driver]]] = squirrel_plugin_manager.hook.squirrel_drivers()\n for plugin in plugins:\n for driver_cls in plugin:\n if driver_cls.name == self.driver_name:\n # Problem: If users provide \"storage_options\" in the `kwargs` and the `self.driver_kwargs`\n # already defines \"storage_options\", then vanilla dict merging\n # (i.e., {**self.driver_kwargs, **kwargs}) will overwrite the \"storage_options\" in\n # `self.driver_kwargs` entirely. This is undesired, since important information like\n # bucket configurations (e.g., \"requester_pays\") may be stored in the `self.driver_kwargs`\n # \"storage_options\", which users don't want to provide again using `kwargs`.\n # Solution: The below mechanism merges the \"storage_options\" in `kwargs` with the existing\n # \"storage_options\" in `self.driver_kwargs` (while the newly passed \"storage_options\"\n # in `kwargs` take precendence).\n kwargs[\"storage_options\"] = {\n **self.driver_kwargs.get(\"storage_options\", {}),\n **kwargs.get(\"storage_options\", {}),\n }\n return driver_cls(catalog=self._catalog, **{**self.driver_kwargs, **kwargs})\n\n raise ValueError(f\"driver {self.driver_name} not found\")", "def driver(self):\n\n if not self._driver_cache:\n self._driver_cache = self._driver(self)\n\n return self._driver_cache", "def get_backend(self, name):\n if name == DATABASE_TYPE_MYSQL:\n ret = 2\n elif name == DATABASE_TYPE_POSTGRESQL:\n ret = 3\n elif name == DATABASE_TYPE_SQLITE:\n ret = 4\n # sqlcoder: this assignment fixes unicode problems for me with sqlite (windows, cp1252)\n # feel free to remove or improve this if you understand the problems\n # better than me (not hard!)\n Charset.not_needed1, Charset.not_needed2, Charset.not_needed3 = True, True, True\n else:\n raise ValueError('Unsupported database backend: %s' % self.supported_databases[name].db_server)\n\n return ret", "def find_driver_class(self, scheme_or_url: str) -> Optional[Type[Driver]]:\n index = scheme_or_url.find(\":\")\n if index > 0:\n scheme = scheme_or_url[0:index]\n else:\n scheme = scheme_or_url\n\n return self.drivers.get(scheme.lower())", "def get_device_pool(arn=None):\n pass", "def find_backend(cls) -> IBackend:\n cls.Lock.acquire()\n try:\n return cls._load_backend()\n finally:\n cls.Lock.release()", "def get_cache(url='memory://'):\n\n parsed = parse.urlparse(url)\n backend = parsed.scheme\n\n query = parsed.query\n # NOTE(flaper87): We need the following hack\n # for python versions < 2.7.5. Previous versions\n # of python parsed query params just for 'known'\n # schemes. This was changed in this patch:\n # http://hg.python.org/cpython/rev/79e6ff3d9afd\n if not query and '?' in parsed.path:\n query = parsed.path.split('?', 1)[-1]\n parameters = parse.parse_qsl(query)\n kwargs = {'options': dict(parameters)}\n\n mgr = driver.DriverManager('neutron.openstack.common.cache.backends', backend,\n invoke_on_load=True,\n invoke_args=[parsed],\n invoke_kwds=kwargs)\n return mgr.driver", "def get_pool(self, name, dc, cluster):\n cluster_obj = self.get_cluster(cluster, dc)\n for rp in cluster_obj.resourcePool.resourcePool:\n if rp.name == name:\n return rp", "def backend_pool_type(self) -> Optional[pulumi.Input[Union[str, 'BackendPoolType']]]:\n return pulumi.get(self, \"backend_pool_type\")", "def driver_load(self, name):\r\n return AbstractServiceManager.service_load(self, name)", "def storage_backend_get_by_id(context, id, inactive=False):\n return _find_storage_backend(context, dict(id = id), True, None, inactive=inactive)", "def by_name(cls, name):\n if name in cls._registry:\n result = cls._registry[name]\n else:\n result = cls._registry[name] = cls(bind=Session._datastores.get(name))\n return result", "def get_storage(self, schema, storage, path, params=None):\n return self.storages[storage](schema, path, params)", "def get_pool():\n app = get_app()\n return app['pool']", "def _get_driver():\n return etcd_driver.get_driver()", "def _load_driver(backend, **kargs):\n bk_module = importlib.import_module('backend', __package__)\n driver_cls = getattr(bk_module, str.capitalize(backend) + 'Backend')\n return driver_cls(**kargs)", "def _determine_storage_repo(session, resource_pool, vm_):\n storage_repo = \"\"\n if \"storage_repo\" in vm_.keys():\n storage_repo = _get_sr(vm_[\"storage_repo\"], session)\n else:\n storage_repo = None\n if resource_pool:\n default_sr = session.xenapi.pool.get_default_SR(resource_pool)\n sr_record = session.xenapi.SR.get_record(default_sr)\n log.debug(\"storage repository: %s\", sr_record[\"name_label\"])\n storage_repo = default_sr\n else:\n storage_repo = None\n log.debug(\"storage repository: %s\", storage_repo)\n return storage_repo", "def _get_backend_module(name):\n if name == \"numpy\":\n import numpy as np\n\n return np\n if name == \"numpy.ma\":\n import numpy as np\n\n return np.ma\n if name == \"torch\":\n import torch\n\n return torch\n if name == \"jax\":\n import jax\n import jax.numpy as jnp\n\n _JAX_KEY = jax.random.PRNGKey(0)\n return jnp\n if name == \"tensorflow\":\n import tensorflow as tf\n\n return tf", "def get_backend_class(backend):\n # NOTE(sirp): avoiding circular import\n from glance.store.http import HTTPBackend\n from glance.store.s3 import S3Backend\n from glance.store.swift import SwiftBackend\n from glance.store.filesystem import FilesystemBackend\n\n BACKENDS = {\n \"file\": FilesystemBackend,\n \"http\": HTTPBackend,\n \"https\": HTTPBackend,\n \"swift\": SwiftBackend,\n \"s3\": S3Backend}\n\n try:\n return BACKENDS[backend]\n except KeyError:\n raise UnsupportedBackend(\"No backend found for '%s'\" % backend)", "def open_storage(data_source_name, db_type=\"dbm\", mode=None):\n try:\n klass, supports_mode = _storage_types[db_type]\n except KeyError:\n raise NoSuchClassifierError(db_type)\n try:\n if supports_mode and mode is not None:\n return klass(data_source_name, mode)\n else:\n return klass(data_source_name)\n except dbmstorage.error, e:\n if str(e) == \"No dbm modules available!\":\n print >> sys.stderr, \"\\nYou do not have a dbm module available \" \\\n \"to use. You need to either use a pickle (see the FAQ)\" \\\n \", use Python 2.3 (or above), or install a dbm module \" \\\n \"such as bsddb (see http://sf.net/projects/pybsddb).\"\n sys.exit()", "def get_from_cache(cls, file_name):\n random.shuffle(cls.CACHE_BACKENDS)\n fname = None\n for cb in cls.CACHE_BACKENDS:\n if not cb.health_check():\n continue\n fname = cb.get_from_cache(file_name)\n if fname:\n break\n return fname", "def _instantiate_backend_from_name(name, options):\r\n # Parse backend name\r\n\r\n try:\r\n parts = name.split('.')\r\n module_name = '.'.join(parts[:-1])\r\n class_name = parts[-1]\r\n except IndexError:\r\n raise ValueError('Invalid event track backend %s' % name)\r\n\r\n # Get and verify the backend class\r\n\r\n try:\r\n module = import_module(module_name)\r\n cls = getattr(module, class_name)\r\n if not inspect.isclass(cls) or not issubclass(cls, BaseBackend):\r\n raise TypeError\r\n except (ValueError, AttributeError, TypeError, ImportError):\r\n raise ValueError('Cannot find event track backend %s' % name)\r\n\r\n backend = cls(**options)\r\n\r\n return backend", "def get_store(store_name: str):\n return store_handler.get_store(store_name)", "def get_storage(local_path=None, redis_index=None):\n from config import STORAGE\n if STORAGE[\"Method\"] == \"local\":\n return LocalStorage(path=local_path or STORAGE.get(\"LocalPath\"))\n elif STORAGE[\"Method\"] == \"redis\":\n return RedisStorage(\n index=redis_index or STORAGE.get(\"RedisIndex\"),\n redis_url=STORAGE.get(\"RedisURL\")\n )\n else:\n raise ValueError(\"Invalid storage method\")", "def get_backend():\n\n return sys.modules[__name__]", "def get_backend():\n\n return sys.modules[__name__]", "def get_backend():\n return sys.modules[__name__]", "def get_backend():\n return sys.modules[__name__]", "def get_storage_engine(settings=None):\n if not settings:\n settings = global_settings\n\n return _setup_engine(settings.STORAGE[\"engine\"], settings.STORAGE[\"params\"])", "def by_name(cls, name):\n datastore = Session._datastores.get(name)\n if datastore is None:\n return None\n\n for odmsession in cls._session_registry.values():\n if odmsession.bind is datastore:\n return odmsession\n else:\n return ThreadLocalODMSession(bind=datastore)", "def _init_driver(self, pool_id, pool_conf=None):\n if pool_id is not None:\n pool = self._pools_ctrl.get(pool_id, detailed=True)\n else:\n pool = pool_conf\n conf = utils.dynamic_conf(pool['uri'], pool['options'],\n conf=self._conf)\n storage = utils.load_storage_driver(conf,\n self._cache,\n control_driver=self.control)\n return pipeline.DataDriver(conf, storage, self.control)", "def get_driver_filename(self, os_name=None):\n raise NotImplementedError", "def get_driver(secret_key=config.DEFAULT_SECRET_KEY, userid=config.DEFAULT_USERID,\n provider=config.DEFAULT_PROVIDER):\n\n if hasattr(config, 'get_driver'):\n logger.debug('get_driver %s' % config.get_driver)\n return config.get_driver()\n else:\n logger.debug('get_driver {0}@{1}'.format(userid, provider))\n return libcloud.compute.providers.get_driver(\n config.PROVIDERS[provider])(userid, secret_key)", "def get_pool(self, pool_name=None, pool_id=None):\n\n id_or_name = pool_id if pool_id else pool_name\n errormsg = \"Failed to get the pool {0} with error {1}\"\n\n try:\n obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id)\n\n if pool_id and obj_pool.existed:\n LOG.info(\"Successfully got the pool object %s\",\n obj_pool)\n return obj_pool\n if pool_name:\n LOG.info(\"Successfully got pool %s\", obj_pool)\n return obj_pool\n else:\n msg = \"Failed to get the pool with {0}\".format(\n id_or_name)\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n\n except Exception as e:\n msg = errormsg.format(id_or_name, str(e))\n LOG.error(msg)\n self.module.fail_json(msg=msg)", "def disk_driver(self, disk_id):\n try:\n driver = self.disk(disk_id).find(\"DRIVER\").text\n except AttributeError:\n return None", "def get_default_pool(con):\n try:\n return con.floating_ip_pool_read(fq_name=conf.get('default_pool', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find pool.')\n return None", "def backend_name(self) -> str:\n return self._db_data.backend", "def find_backend(path, backends):\n for backend in backends:\n if backend.path == path:\n return backend\n\n return None", "def find_backend(path, backends):\n for backend in backends:\n if backend.path == path:\n return backend\n\n return None", "def detect_backend():\n try:\n from termpixels.unix import UnixBackend\n return UnixBackend()\n except:\n try:\n from termpixels.win32_vt import Win32VtBackend\n return Win32VtBackend()\n except Exception as e:\n raise e\n from termpixels.win32 import Win32Backend\n return Win32Backend()", "def get_driver(self):\n\t\treturn self.driver", "def get_cls(dataset_name):\n return find_dataset_using_name(dataset_name)", "def storage_class(self) -> Optional[Sequence['outputs.CSIPowerStoreSpecDriverStorageClass']]:\n return pulumi.get(self, \"storage_class\")", "def get_instance():\r\n try:\r\n module_instance = importlib.import_module(\r\n f\"{__name__}.{SETTINGS.db_type_ccgp_crawler.lower()}\")\r\n except ImportError as error:\r\n LOG.error(error)\r\n return module_instance.CCGPBidInfoStorage", "def driver_from_extension(file_extension: str) -> str:\n file_extension = file_extension.lstrip(\".\")\n all_drivers_extensions = {}\n for v in drivers:\n driver = v.load()\n try:\n driver_extensions = driver.METADATA.get(\"file_extensions\", []).copy()\n all_drivers_extensions[driver.METADATA[\"driver_name\"]] = driver_extensions\n if driver_extensions and file_extension in driver_extensions:\n return driver.METADATA[\"driver_name\"]\n except AttributeError: # pragma: no cover\n pass\n else:\n raise ValueError(\n f\"driver name for file extension {file_extension} could not be found: {all_drivers_extensions}\"\n )", "def get_storage(store: Optional[StorageEngine] = None) -> StorageEngine:\n if store is not None:\n return store\n else:\n if _storage_stack.top is not None:\n out: StorageEngine = _storage_stack.top\n return out\n else:\n raise RuntimeError(\"No Storage instance available.\")", "def driver_name(self):\n return self._driver_name", "def storage_class(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverStorageClass']]:\n return pulumi.get(self, \"storage_class\")", "def _get_backend(args):\n if args.backend == 'gatttool':\n backend = GatttoolBackend\n elif args.backend == 'bluepy':\n backend = BluepyBackend\n elif args.backend == 'pygatt':\n backend = PygattBackend\n else:\n raise Exception('unknown backend: {}'.format(args.backend))\n return backend", "def create_pool(self, device, tier, poolname):\n print \"Adding pool %s...\" % poolname\n pool = device.findRemoteStoragePool(StoragePoolPredicates.name(poolname))\n pool.setTier(tier)\n pool.save()\n return pool", "def get_driver(url='neo4j', neo4j_auth='neo4j/neo4j'):\n from neo4j import GraphDatabase\n\n auth_parts = neo4j_auth.split('/')\n if len(auth_parts) == 2:\n driver = GraphDatabase.driver('bolt://' + url + ':7687',\n auth=(auth_parts[0], auth_parts[1]))\n else:\n driver = GraphDatabase.driver('bolt://' + url + ':7687')\n\n return driver", "def get_backend(\n self,\n backend_id: str,\n ) -> Optional[Type[BaseCertificateStorageBackend]]:\n return self.get('backend_id', backend_id)", "def get(self, name, default=None):\n return self._storage.get(name, default)", "def get_backend():\n return Connection()", "def storage_class(self) -> Optional[Sequence['outputs.CSIPowerMaxSpecDriverStorageClass']]:\n return pulumi.get(self, \"storage_class\")", "def getStorageObject(implementation, the_element):\n module=__import__(implementation)\n for i in implementation.split(\".\")[1:]:\n module = getattr(module, i)\n if module:\n cls=None\n for key in module.__dict__.keys():\n import inspect\n if inspect.isclass(getattr(module, key)) and inspect.getclasstree([getattr(module, key)], True)[0][0] == Storage:\n cls=getattr(module, key)\n break\n if cls:\n try:\n inst=object.__new__(cls)\n Storage.log.debug(\"class is %s\" %(cls))\n inst.__init__(element=the_element)\n connname=inst.getConnectionName()\n if not StorageConnections.has_key(connname):\n Storage.log.debug(\"Creating new storage connection %s %s\" %(connname, StorageConnections.keys()))\n StorageConnections[connname]=inst\n return inst\n else:\n Storage.log.debug(\"Returning already established storage connection %s\" %(connname))\n return StorageConnections[connname]\n except:\n import traceback\n traceback.print_exc()\n raise IncompatibleObjectException(cls, Storage)\n else:\n raise IncompatibleObjectException(getattr(module, key), Storage)\n else:\n raise ModuleNotFoundException(implementation)", "def storage_class(self) -> Optional[Sequence['outputs.CSIUnitySpecDriverStorageClass']]:\n return pulumi.get(self, \"storage_class\")", "def get_driver(self):\n return self.driver", "def for_provider(provider_name):\n try:\n cls = _instance.providers_cls[provider_name]\n return StorageBuilder(provider_name, cls)\n except KeyError:\n raise ValueError('No provider implementation registered for name: %s' % provider_name)", "def get_store_from_store_identifier(store_identifier):\n scheme_map = {}\n enabled_backends = CONF.enabled_backends\n try:\n scheme = enabled_backends[store_identifier]\n except KeyError:\n msg = _(\"Store for identifier %s not found\") % store_identifier\n raise exceptions.UnknownScheme(msg)\n\n if scheme not in location.SCHEME_TO_CLS_BACKEND_MAP:\n raise exceptions.UnknownScheme(scheme=scheme)\n\n scheme_info = location.SCHEME_TO_CLS_BACKEND_MAP[scheme][store_identifier]\n store = scheme_info['store']\n\n if not store.is_capable(capabilities.BitMasks.DRIVER_REUSABLE):\n # Driver instance isn't stateless so it can't\n # be reused safely and need recreation.\n store_entry = scheme_info['store_entry']\n store = _load_multi_store(store.conf, store_entry, invoke_load=True,\n backend=store_identifier)\n store.configure()\n try:\n loc_cls = store.get_store_location_class()\n for new_scheme in store.get_schemes():\n if new_scheme not in scheme_map:\n scheme_map[new_scheme] = {}\n\n scheme_map[new_scheme][store_identifier] = {\n 'store': store,\n 'location_class': loc_cls,\n 'store_entry': store_entry\n }\n location.register_scheme_backend_map(scheme_map)\n except NotImplementedError:\n scheme_info['store'] = store\n\n return store", "def new_driver(name=\"chrome\"):\n if not name in DRIVERS:\n raise Exception(\"No driver support for '%s'\" % name)\n return DRIVERS[name]()", "def get_backend():\n from cryptography.hazmat.backends import default_backend\n return default_backend()", "def get_driver_filename(self, os_name=None):\n if os_name is None:\n os_name = platform.system()\n if os_name == \"Windows\":\n return \"operadriver.exe\"\n else:\n return \"operadriver\"", "def _init_driver(self, shard_id):\n shard = self._shards_ctrl.get(shard_id, detailed=True)\n conf = utils.dynamic_conf(shard['uri'], shard['options'])\n return utils.load_storage_driver(conf, self._cache)", "def pool(self):\n return self._properties.get('pool')", "def dataset_by_name(name):\n return _datasets[name.lower()]", "def get_by_backend_name(cls, backend_name):\r\n cls._check_configured()\r\n for enabled in cls._ENABLED.values():\r\n if enabled.BACKEND_CLASS.name == backend_name:\r\n return enabled", "def get_dbapi_module(name):\n return import_module(name)", "def get_nic_driver(pci_id):\n driverlist = dict(zip(NICS.values(), DRIVERS.keys()))\n try:\n driver = DRIVERS[driverlist[pci_id]]\n except Exception as e:\n driver = None\n return driver" ]
[ "0.6834399", "0.68316096", "0.6733757", "0.67019266", "0.66133344", "0.6580576", "0.65020525", "0.63906044", "0.6277041", "0.6253371", "0.6227523", "0.6190082", "0.6147456", "0.61158353", "0.60346544", "0.60328054", "0.60253716", "0.6005569", "0.5980574", "0.5942194", "0.5914757", "0.585105", "0.5825521", "0.5786914", "0.5762829", "0.5761035", "0.5738884", "0.5709518", "0.5702852", "0.57024425", "0.5635209", "0.56323373", "0.5628557", "0.5590597", "0.5586431", "0.5561974", "0.5559952", "0.55588394", "0.55333066", "0.5528252", "0.5526499", "0.5524993", "0.5508844", "0.54991597", "0.54952186", "0.5491723", "0.54661477", "0.5463851", "0.5457377", "0.54535496", "0.54294676", "0.5425603", "0.5424789", "0.54175365", "0.54026973", "0.53883016", "0.53883016", "0.5351938", "0.5351938", "0.53498894", "0.5334141", "0.5332859", "0.5331702", "0.5325422", "0.5320128", "0.5316971", "0.53151804", "0.52889603", "0.52856046", "0.52856046", "0.52845895", "0.5280519", "0.5279146", "0.527339", "0.5273282", "0.52730805", "0.5272682", "0.5260566", "0.52599454", "0.5252669", "0.5232944", "0.5225961", "0.5225599", "0.5199945", "0.51885414", "0.51761955", "0.51585037", "0.5157284", "0.5143318", "0.5122966", "0.51211023", "0.5116392", "0.5107412", "0.51061535", "0.510195", "0.5090322", "0.50888616", "0.507966", "0.5078465", "0.50783724" ]
0.7380291
0
Open the URL for the given DOI in the default browser
def open_doi(doi): webbrowser.open_new_tab(DOI_URL % doi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_in_browser(self):\n webbrowser.open(self.url)", "def open(url):\r\n webbrowser.open(url)", "def browser_open(story_id, arguments):\r\n\r\n story = load_story(story_id, arguments)\r\n\r\n webbrowser.open(story.url)", "def open_url(name):\n url = localReadConfig.get_webServer(name)\n browser = open_browser()\n browser.get(url)\n return browser", "def open_browser(url):\n import webbrowser\n webbrowser.open_new(url)", "def openurl(url):\n\n # Open the URL\n webbrowser.open(url)", "def open_url(self):\n QDesktopServices.openUrl(self.url)", "def run(self, url=''):\n if url:\n webbrowser.open(url)", "def browse( self ):\n webbrowser.open(self.url())", "def open_webpage(browser, url, case, version, package):\n browser_obj = Browser(browser, version, case, package, url)\n if browser == \"firefox\":\n firefox(browser_obj)\n elif browser == \"opera\":\n opera(browser_obj)\n elif package == \"chromium\":\n chromium(browser_obj)\n elif browser == \"ie\":\n iexplorer(browser_obj)\n elif browser == \"edge\":\n edge(browser_obj)", "def openDocsUrl(self):\r\n url = QUrl(\"http://freeseer.readthedocs.org\")\r\n QDesktopServices.openUrl(url)", "def open_doc(self, widget, data=None):\n\t\twebbrowser.open('file://'+os.path.abspath(\"./doc/build/html/index.html\"))", "def web_view(self):\n try:\n webbrowser.open(\"https://editor.openeo.org/?server={}\".format(self.backend.url))\n except:\n pass\n # QWebEngineView, QWebView...", "def _on_articles_open_doi(self, evt=None):\n \n # get selected articles\n articles = self._articles_view.GetSelectedArticles()\n if not articles:\n return\n \n # open web\n for article in articles:\n if article.doi:\n link = \"https://dx.doi.org/%s\" % article.doi\n try: webbrowser.open(link, autoraise=1)\n except: pass", "def openSite(url):\n\timport webbrowser\n\twebbrowser.open('http://www.' + url + '.com', 2)", "def open(self, event=None, url=None):\n url = url or self.server.url\n try:\n import webbrowser\n webbrowser.open(url)\n except ImportError: # pre-webbrowser.py compatibility\n if sys.platform == 'win32':\n os.system('start \"%s\"' % url)\n elif sys.platform == 'mac':\n try:\n import ic\n ic.launchurl(url)\n except ImportError: pass\n else:\n rc = os.system('netscape -remote \"openURL(%s)\" &' % url)\n if rc: os.system('netscape \"%s\" &' % url)", "def open_web_browser(url: str):\n Popen(web_browser + [url], stdout=DEVNULL, stderr=DEVNULL)", "def on_OpenExplorer_clicked(self):\n # TODO: not implemented yet\n #raise NotImplementedError\n\n url=\"http://kfc.matrix.io\"\n\n self.browser.openurl(url)\n self.OnlyDisplay(f\"start {url}\")\n #MATRIXWebutil.open_new(url)\n #MATRIXWebutil.open_new_tab(url)", "def _open_browser(self, single_doc_html):\n url = os.path.join(\"file://\", DOC_PATH, \"build\", \"html\", single_doc_html)\n webbrowser.open(url, new=2)", "def _open_browser(self, single_doc_html):\n url = os.path.join(\n \"file://\", DOC_PATH, \"build\", \"html\", single_doc_html\n )\n webbrowser.open(url, new=2)", "def _open_project(project):\n api_segment = '/_apis/'\n pos = project.url.find(api_segment)\n if pos >= 0:\n url = project.url[:pos + 1] + uri_quote(project.name)\n logger.debug('Opening web page: %s', url)\n webbrowser.open_new(url=url)\n else:\n raise CLIError(\"Failed to open web browser, due to unrecognized url in response.\")", "def open_manual() -> None:\n\n path = os.path.abspath(\n os.path.join(\"docs\", \"_build\", \"html\", \"index.html\"))\n webbrowser.open(\"file:{}\".format(pathname2url(path)))", "def open_url(self, url: str):\n self.driver.get(url)", "def browser_open(url):\n FNULL = open(os.devnull, 'w')\n subprocess.Popen([udata.browser, url], stdout=FNULL, stderr=subprocess.STDOUT )", "def test_open(self):\n page, resources = self.ghost.open(base_url)\n self.assertEqual(page.url, base_url)\n \n self.ghost.click(\"#run\")", "def open_url(self, url):\n if url is None:\n return\n QDesktopServices.openUrl(QUrl(url))", "def open_link(self) -> None:\n\n webbrowser.open_new(self.link)", "def open_browser(self):\n\n webbrowser.open(self.trailer_youtube_url)", "def open_url(self, url):\n\n self.driver.get(url)", "def open_in_explorer(file):\n webbrowser.open(os.path.dirname(p['paths'][file]))", "def open_link(self):\n try:\n # webbrowser.open(self.url) # if you are on Windows OS\n webbrowser.get('safari').open_new_tab(self.url) # if you are on Mac OS\n except(AttributeError):\n self.ids.label.text = self.error_msg", "def open(self):\n self.browser = self._browser()\n\n return self.browser", "def openOnlineHelp(self):\r\n url = QUrl(\"http://freeseer.readthedocs.org\")\r\n QDesktopServices.openUrl(url)", "def browse(notebook):\n nb = select_notebook(notebook)\n click.launch('http://localhost:{0}/{1}/'.format(conf.PORT, nb.path.rel))", "def _docs():\n url = \"https://vanheeringen-lab.github.io/seq2science\"\n if not webbrowser.open(url):\n print(url)", "def open_in_browser(filename):\n subprocess.call([\"firefox\", filename])", "def open_in_web(self):\n self._client_api._open_in_web(url=self.platform_url)", "def open_website(url):\n browser = webdriver.Firefox()\n browser.get(url)\n return browser", "def open_top():\n _open_url_path('')", "def browse():\n rino.browse.open()", "def preview():\n url = \"http://{}:{}\".format(_hostname, _port)\n webbrowser.open(url)", "def open(alias):\n s = db.Series.alias_lookup(alias)\n click.launch(s.url)", "def open_docs(host: str, category: str, package: str):\n url = _build_pkg_url(\n host=host,\n category=category, \n pkg_name=package\n )\n webbrowser.open_new_tab(url=url)", "def open(self, index):\n\n index = int(index.strip())\n index -= 1\n section = self.program.state.last_viewed\n storyid = getattr(self.program.state, section)[index]\n data = self.program.state.stories[storyid]\n webbrowser.open(data['url'])", "def onAboutLeoUrl(self,event=None):\n \n try:\n import webbrowser\n webbrowser.open(self.url)\n except:\n g.es(\"not found: \" + self.url)", "def cli(repo, milestone):\n webbrowser.open_new(repo.milestone(milestone).data[\"html_url\"])", "def open_browser():\n def _open_browser():\n if AIPS_WEBSERVER_HOST == \"localhost\":\n webbrowser.open(WEBSERVER_URL + '/%s' % FILE)\n thread = threading.Timer(0.5, _open_browser)\n thread.start()", "def go_to_url(self, url):\n if self.browser is not None:\n self.browser.get(url)\n else:\n print('Browser is not running')", "def argI():\n sentenceID = args.i[0]\n webbrowser.open('https://tatoeba.org/eng/sentences/show/' + sentenceID, new=2)", "def open_url(self):\n self.window = QMainWindow()\n self.ui_linkreader = Ui_LinkReader()\n self.ui_linkreader.setup_ui(self.window)\n self.ui_linkreader.pushButton.setDefault(True)\n self.window.setFixedSize(self.window.width(), self.window.height())\n self.ui_linkreader.pushButton.clicked.connect(self.process_url)\n self.window.show()", "def open_link(self):\n try:\n webbrowser.open(self.url)\n except:\n self.ids.link.text=self.link_message", "def resolveDoi(doi):\n logging.debug('Resolving DOI %s' % doi)\n doiUrl = 'https://doi.org/' + urllib.quote(doi.encode('utf8'))\n page = httpGetDelay(doiUrl)\n trgUrl = page['url']\n logging.debug('DOI %s redirects to %s' % (doi, trgUrl))\n return trgUrl", "def openurl(device, url):\n command = 'openurl \"%s\" \"%s\"' % (device.udid, url)\n _run_command(command)", "def on_axuda_activate(self, widget):\n\n os.system('pydoc -p 1234')\n webbrowser.open_new('http://localhost:1234')", "def openNewIssueUrl(self):\r\n url = QUrl(\"https://github.com/Freeseer/freeseer/issues/new\")\r\n QDesktopServices.openUrl(url)", "def open_web_crawler_window(self, event):\n self.gui.open_web_crawler_window(self.root)", "def open_url(url):\n logger.debug('Opening %s', url)\n _stderr = os.dup(2)\n os.close(2)\n _stdout = os.dup(1)\n os.close(1)\n fd = os.open(os.devnull, os.O_RDWR)\n os.dup2(fd, 2)\n os.dup2(fd, 1)\n try:\n webbrowser.open(url)\n finally:\n os.close(fd)\n os.dup2(_stderr, 2)\n os.dup2(_stdout, 1)", "def open_url(self, url):\n try:\n if url != \"\":\n self.driver.maximize_window()\n self.driver.get(url)\n print(url + \" : url is opened\")\n else:\n print(\"Please enter valid url\")\n except Exception as e:\n print(str(e))", "def open_browser_window(url):\n logger.debug('about to open url \"{url}\" in browser \"{browser}\"'.format(url=url, browser=BROWSER_NAME))\n browser = webbrowser.get(BROWSER_NAME)\n browser.open(url, new=(1 if BROWSER_NEW_WINDOW else 2))", "def followlink(self, event):\n webbrowser.open(self.url)", "def gotoWeb(self,page:str)->None:\n if page=='repo':\n webbrowser.open('http://github.com/ivan866/readTobiiGlasses')\n elif page=='wiki':\n webbrowser.open('http://github.com/ivan866/readTobiiGlasses/wiki')\n elif page=='glasses2API':\n webbrowser.open('http://tobiipro.com/product-listing/tobii-pro-glasses-2-sdk/')\n elif page=='coordSys':\n webbrowser.open('http://developer.tobiipro.com/commonconcepts.html')", "def open_browser(url):\n # Treat Windows separately because:\n # 1. /dev/null doesn't exist.\n # 2. subprocess.Popen(['start', url]) doesn't actually pop up the\n # browser even though 'start url' works from the command prompt.\n # Fun!\n # Also, use webbrowser if we are on Linux and xdg-open is not installed.\n #\n # We don't use the webbrowser module on Linux and Mac because some browsers\n # (ahem... Chrome) always print \"Opening in existing browser session\" to\n # the terminal, which is spammy and annoying. So instead we start the\n # browser ourselves and send all its output to /dev/null.\n\n if env_util.IS_WINDOWS:\n _open_browser_with_webbrowser(url)\n return\n if env_util.IS_LINUX_OR_BSD:\n if env_util.is_executable_in_path(\"xdg-open\"):\n _open_browser_with_command(\"xdg-open\", url)\n return\n _open_browser_with_webbrowser(url)\n return\n if env_util.IS_DARWIN:\n _open_browser_with_command(\"open\", url)\n return\n\n import platform\n\n raise Error('Cannot open browser in platform \"%s\"' % platform.system())", "def i_open_the_browser(context, alias):\n def eval_value(value):\n if value.startswith('`') and value.endswith('`'):\n return ast.literal_eval(value[1:-1])\n else:\n return value\n\n context.table.require_column(\"name\")\n context.table.require_column(\"value\")\n\n attrs = {row[\"name\"]: eval_value(row[\"value\"])\n for row in context.table\n if row[\"value\"] != \"---\"}\n\n command_executor = attrs.pop(\"executor\")\n desired_capabilities = attrs\n\n browser = Browser(command_executor, desired_capabilities)\n\n context.selenium_browsers[alias] = browser\n context.selenium_exitstack.enter_context(browser)", "def open_in_web(self, model=None, model_id=None):\n if model is not None:\n model.open_in_web()\n elif model_id is not None:\n self._client_api._open_in_web(url=self.platform_url + '/' + str(model_id) + '/main')\n else:\n self._client_api._open_in_web(url=self.platform_url)", "def explorative_manual(self):\n try:\n webbrowser.open(\"https://openeo.org/documentation/1.0/qgis/#exploring-a-backend\")\n except:\n pass", "def smartx_cmd(ctx):\n webbrowser.open(\"https://smartx.ont.io/\")", "def on_OpenExplorerAccount_clicked(self):\n # TODO: not implemented yet\n #raise NotImplementedError\n url = f\"http://kfc.matrix.io/{self.a0_Address}\"\n\n self.browser.openurl(url)\n self.OnlyDisplay(f\"start {url}\")", "def open_browser():\n def _open_browser():\n webbrowser.open('http://localhost:%s/%s' % (PORT, FILE))\n pass\n thread = threading.Timer(0.5, _open_browser)\n thread.start()", "def show(self):\n webopen(str(self))", "def _about_dialogue(self):\n webbrowser.open('https://github.com/ldrumm/yubikey-totp-gui')", "def button1_press(self):\n\n ext = nuke_link(str(self.lineEdit.text()))\n url = 'https://learn.foundry.com/nuke/developers/70/pythonreference/{}'.format(ext)\n webbrowser.open(url)", "def open(self):\n if self.id_ is None:\n raise MqValueError('DataGrid must be created or saved before opening.')\n domain = GsSession.current.domain.replace(\".web\", \"\")\n if domain == 'https://api.gs.com':\n domain = 'https://marquee.gs.com'\n url = f'{domain}/s/markets/grids/{self.id_}'\n webbrowser.open(url)", "def _help_dialogue(self):\n webbrowser.open('https://github.com/ldrumm/yubikey-totp-gui/wiki')", "def test_open_mainpage(open_browser, url_param):\n open_browser.get(url_param)\n assert open_browser.current_url == url_param\n open_browser.close()", "def open_web_browser(whac_config: WhacConfig) -> None:\n if whac_config.open_web_browser:\n browser = webbrowser.get('chrome')\n browser.open('http://localhost:' + str(whac_config.host_port), new=2, autoraise=True)", "def newwindow(url):\n\n # Open the URL\n webbrowser.open_new(url)", "def open(self):\n self.driver.get('{}/submit'.format(self.config.get('Test', 'url')))\n return self", "def open_file_browser(path):\n if sys.platform == 'win32':\n #subprocess.Popen(['start', path], shell=True)\n os.startfile(path)\n\n elif sys.platform == 'darwin':\n subprocess.Popen(['open', path])\n\n else:\n try:\n subprocess.Popen(['xdg-open', path])\n except OSError:\n logger.error(\"Presumably *nix system xdg-open failed for path: {}\".format(path))", "def open(url, *args, **kwargs):\n\tscheme = DEFAULT_SCHEME\n\tif '://' in url:\n\t\tscheme, url = url.split('://')\n\treturn DRIVERS[scheme](url, *args, **kwargs)", "def simple_browser(self, naviguator: str, urlSearch: str):\n\n BrowserNaviguate(\n naviguator,\n 0\n ).browse_with(\n SimpleStarterclient(),\n ExampleUrl(urlSearch)\n )\n\n\n return self", "def _make_doi_clickable(link):\n return f\"https://doi.org/{link}\"", "def open(webpage_url):\n\twith youtube_dl.YoutubeDL(dict(forceurl=True)) as ydl:\n\t\tr = ydl.extract_info(webpage_url, download=False)\n\t\tmedia_url = r['formats'][-1]['url']\n\twebbrowser.open('googlechromes://' + media_url[8:] )", "def open_news_url(self, url):\n\n try:\n if not webbrowser.open_new_tab(url):\n raise webbrowser.Error\n except webbrowser.Error:\n print('Unable to open a web browser, try accessing this URL manually instead:\\n{0}'.format(url))", "def launch_gs_app(name, browser, url):\n print('Opening {} in {}...'.format(name, browser), end=' ', flush=True)\n _gs_web_doc.set_uri(url)\n _gs_web_doc.launch(browser)\n print('Done')", "def show_help():\n\n url = (\n r\"https://agcloud.sharepoint.com/:p:/r/sites/\"\n r\"O365-UG-2HEngineeringSoftware/Shared%20Documents/2H%20Datalab/\"\n r\"DataLab%20Guidance.pptx?d=wcabe347939784784b8d7270cdf7938e7&csf=1&e=9LJsCD\"\n )\n webbrowser.open(url)", "def open_firefox():\r\n driver = install_firefox_proxy(LOCALHOST, PROXY_PORT_NUMBER)\r\n driver.get(STARTING_WEBSITE)", "def get(self, url):\n self.browser.get(url)", "def i_am_on_the_zoo_website():\n driver.get(\"http://www.thetestroom.com/webapp/\")", "def newtab(url):\n\n # Open the URL\n webbrowser.open_new_tab(url)", "def open_pastes(all_pastes):\n #Open all links in browser\n for paste in all_pastes:\n #print(\"[+] URL Opened >> \", all_pastes[paste])\n write_paste_log(all_pastes[paste])\n webbrowser.open(all_pastes[paste])", "def _open_workspace(self, name, workspace):\n workspace_url = self.environment_driver.extract_workspace_url(\n name, workspace)\n result = webbrowser.open(workspace_url, new=2)\n\n return result", "def get_url_from_doi(doi):\n\n try:\n r = requests.head(f\"https://doi.org/{doi}\", allow_redirects=True)\n except requests.exceptions.ConnectionError:\n return None\n\n return r.url", "def open_dataset_page(dataset, year='2017-2018'):\n year_code = get_nhanes_year_code_dict()[year]\n url = 'https://wwwn.cdc.gov/Nchs/Nhanes/%s/%s_%s.htm' % (year, dataset, year_code)\n webbrowser.open(url)", "def go(self, url):\n self.driver.get(url)", "def browser(browser):\n browser.url = settings[\"threescale\"][\"admin\"][\"url\"]\n return browser", "def open_site(site, debug=False):\n\n openercmd = get_browser_command()\n if openercmd:\n fullcmd = ' '.join([openercmd, '\"{}\"'.format(site)])\n if debug:\n print('Found browser opener: {}'.format(openercmd))\n print('Running: {}'.format(fullcmd))\n retcode = ossystem(fullcmd)\n if retcode:\n # opener errored\n print('Error opening site: {}'.format(fullcmd))\n return retcode\n else:\n print('No valid command found to open that site: {}'.format(site))\n return 1", "def openemail(event):\n import webbrowser\n webbrowser.open(emailurl)\n close(event)", "def _open_magnet(ctx, link):\n\n system = platform.system().lower()\n if system == 'darwin':\n webbrowser.open(link)\n elif system == 'linux':\n subprocess.Popen(['xdg-open', link]).wait()\n else:\n os.startfile(link)", "def open_file_browser(path: str):\n call(file_browser + [path])", "def open(self):\n self._command = \"open\"" ]
[ "0.7175303", "0.70528334", "0.6925984", "0.68447846", "0.66982275", "0.6694972", "0.654901", "0.65459543", "0.64876926", "0.6484498", "0.6445093", "0.64057773", "0.63870794", "0.6381969", "0.63739616", "0.6357972", "0.6353263", "0.62887174", "0.62686133", "0.6265236", "0.62349075", "0.62276495", "0.62122947", "0.6192641", "0.6192392", "0.6187522", "0.6183133", "0.6181441", "0.6176669", "0.61599994", "0.61454713", "0.61432064", "0.61346906", "0.6111855", "0.6098031", "0.6081805", "0.6064603", "0.6019327", "0.6014426", "0.597341", "0.5926799", "0.5918444", "0.5916501", "0.5915391", "0.5892825", "0.58884686", "0.5883753", "0.58752733", "0.58306086", "0.5830435", "0.5819908", "0.58192307", "0.5816553", "0.5800203", "0.5796929", "0.5769874", "0.5763208", "0.57453895", "0.57293135", "0.57212037", "0.57210916", "0.5707435", "0.56969005", "0.56926924", "0.56821126", "0.5679393", "0.56641364", "0.56607085", "0.5657824", "0.56574166", "0.5645106", "0.56439954", "0.56374526", "0.5628941", "0.5621738", "0.5618889", "0.5608997", "0.56001055", "0.55829865", "0.5580352", "0.554812", "0.5546795", "0.55462146", "0.5530479", "0.5511428", "0.55010474", "0.54902935", "0.5486779", "0.54820144", "0.54788333", "0.54722446", "0.5460957", "0.54544634", "0.54520845", "0.54440403", "0.54058725", "0.5401825", "0.54000413", "0.5397414", "0.53923064" ]
0.82702404
0
Given a FireBrowser object will then check to see if there are notifications then will go through the list and respond appropriately to them.
def run(browser: FireBrowser): midline("STATUS UPDATING") if browser.find_tab("Portal") is not -1: browser.switch_tab("Portal") elif not browser.tab_names: login.run(browser) else: login.run(browser) file = FileHandle("status") data = file.read_to_data() browser.remind_me_later() if browser.check_selector(data['notify_check']): write("Looking at Notifications...") while delete_notifications(browser, data): browser.click(data['notify_dropdown_click']) while __find_updateable_notification(browser, data): """Check to see if asurion or if appointment.""" browser.click(data['lead_textarea']) headings = browser.get_elements_text(data['lead_type_heading']) if contain_list("Appointment", headings): browser.click(data['status_select']) browser.click(data['status_select_awaiting']) browser.send_text(data['appointment_text'], 'lead_textarea') elif contain_list("Asurion", headings): browser.send_text(data['asurion_text'], 'lead_textarea')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notifications(self):\r\n return notifications.Notifications(self)", "def notifications(self):\r\n return notifications.Notifications(self)", "def get_user_notifications(self, login):", "async def update_cache_from_notification(self) -> List[Notification]:\n new_notifications = []\n try:\n notifications = await self.get_user_notifications()\n\n if not notifications:\n return new_notifications\n\n new_notifications = self.get_new_notifications()\n for notification in new_notifications:\n await self.__manage_notification_posts(notification)\n except Exception as e:\n if self.verbose:\n print(f\"Failed to update Weverse Cache - {e}\")\n return new_notifications", "def sendAllNotifications():\n delta = prefs.getDaysToNotifyMinistriesQuestionsPendingResponse()\n date = datetime.date.today()\n sendNotificationToMinistry(date)\n sendNotificationToClerksOffice(date)\n sendNotificationToMP(date)", "def notifications(id):\n return core.query(schema.notify, id)", "def send_notification(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n m1 = Members(\"Richard\", \"Blackmore\", \"14-04-1945\", \"Weston\")\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), None)\n s1.add_resource(b1)\n s1.lending_process(b1, m1)\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), \"-Please return boo- \")", "def _notify_handlers(self):\n\n # Notify all handlers \n for handler_callback in self._registered_handlers:\n try:\n handler_callback(self._balloon_position)\n except Exception as e:\n # A receiver failed, catch and move on\n pass", "def notification_listener(self, interval=60):\n while True:\n for notification in self.get_notifications():\n yield notification\n time.sleep(interval)", "def handleNotification(self, notification):\n pass", "def wait_for_notification(page):\r\n def _is_saving():\r\n num_notifications = len(page.q(css='.wrapper-notification-mini.is-shown'))\r\n return (num_notifications == 1, num_notifications)\r\n\r\n def _is_saving_done():\r\n num_notifications = len(page.q(css='.wrapper-notification-mini.is-hiding'))\r\n return (num_notifications == 1, num_notifications)\r\n\r\n Promise(_is_saving, 'Notification showing.').fulfill()\r\n Promise(_is_saving_done, 'Notification hidden.').fulfill()", "def test_notification(self, mock):\n mock.register_uri(\n CONST_HTTP_METHOD_POST,\n pyflume.constants.URL_OAUTH_TOKEN,\n text=load_fixture(CONST_TOKEN_FILE),\n )\n mock.register_uri(\n \"get\",\n pyflume.constants.API_NOTIFICATIONS_URL.format(user_id=CONST_USER_ID),\n text=load_fixture(\"notification.json\"),\n )\n flume_auth = pyflume.FlumeAuth(\n CONST_USERNAME,\n CONST_PASSWORD,\n CONST_CLIENT_ID,\n CONST_CLIENT_SECRET,\n CONST_FLUME_TOKEN,\n )\n\n flume_notifications = pyflume.FlumeNotificationList(flume_auth)\n notifications = flume_notifications.get_notifications()\n assert len(notifications) == 1 # noqa: S101\n assert notifications[0][CONST_USER_ID] == 1111 # noqa: S101,WPS432\n assert flume_notifications.has_next # noqa: S101\n\n mock.register_uri(\n \"get\",\n flume_notifications.next_page,\n text=load_fixture(\"notification_next.json\"),\n )\n\n notifications_next = flume_notifications.get_next_notifications()\n assert len(notifications_next) == 1 # noqa: S101\n assert notifications_next[0][CONST_USER_ID] == 1111 # noqa: S101,WPS432\n assert flume_notifications.has_next is False # noqa: S101\n\n mock.register_uri(\n \"get\",\n pyflume.constants.API_NOTIFICATIONS_URL.format(user_id=CONST_USER_ID),\n text=load_fixture(\"notification_nopage.json\"),\n )\n\n notifications_nopage = flume_notifications.get_notifications()\n assert len(notifications_nopage) == 1 # noqa: S101\n assert notifications_nopage[0][CONST_USER_ID] == 1111 # noqa: S101,WPS432\n assert flume_notifications.has_next is False # noqa: S101", "def open_notifications(self) -> 'WebDriver':\n ext_name = 'mobile: openNotifications'\n try:\n self.assert_extension_exists(ext_name).execute_script(ext_name)\n except UnknownMethodException:\n # TODO: Remove the fallback\n self.mark_extension_absence(ext_name).execute(Command.OPEN_NOTIFICATIONS, {})\n return cast('WebDriver', self)", "def cmd_notification_all(client, args):\n notifications_all = client.get_notifications(args.new)\n notifications_all['messages'] = [message.__dict__ for message in\n notifications_all['messages']]\n formatted_replies = []\n for reply in notifications_all['replies']:\n formatted_reply = reply.__dict__\n formatted_reply['content'] = format_comment_tree(formatted_reply['content'])\n formatted_replies.append(formatted_reply)\n notifications_all['replies'] = formatted_replies\n generate_output({'notifications_all': notifications_all}, args.output_file)", "async def check_notify(self) -> None:\n async with self.lock:\n # We loop through a list of keys because we are going to\n # mutate the dictionary as we loop through it.\n for message_id in copy.copy(list(self.upcoming_events.keys())):\n upcoming_event = self.upcoming_events[message_id]\n if not upcoming_event.time_to_notify():\n continue\n\n # Delete upcoming event if it's a member event\n if isinstance(upcoming_event, MemberEvent):\n # Delete upcoming if it's a member event\n await self.delete_upcoming_event(message_id)\n\n # Prepare message from the queue if it's recurring\n stop_notifying = False\n if isinstance(upcoming_event, RecurringEvent):\n stop_notifying = (\n upcoming_event.event_cancelled\n or upcoming_event.notified\n )\n\n if not stop_notifying:\n # Send ongoing event message\n ongoing_message = await upcoming_event.send_ongoing_message(\n notif_message=self.ongoing_template,\n channel=self.calendar_channel\n )\n\n # Distribute DM\n await upcoming_event.distribute_dm(\n self.dm_template,\n self.organizer_dm_template\n )\n\n # Create new ongoing event\n ongoing_event = OngoingEvent(\n countdown_time=upcoming_event.start_time,\n timeout_length=self.event_timeout,\n organizer_id=upcoming_event.organizer.id,\n message_text=ongoing_message.content,\n message_embed=ongoing_message.embeds[0]\n )\n\n self.ongoing_events[ongoing_message.id] = ongoing_event", "def queue(self):\n return self._notifications", "def list_notifications():\n token = request.args.get('token')\n user = User.query.filter_by(token=token).first()\n\n if user is None:\n return jsonify({\"error\": \"Access Denied!\"})\n\n # Filter Posts so the user doesn't have to filter it\n notifications = Notifications.query.filter_by(user_id=user.id).order_by(desc('created'))\n result = notification_schema.dump(notifications)\n\n # Notifications have been read delete them\n toDelete = Notifications.query.filter_by(user_id=user.id)\n toDelete.delete()\n\n return jsonify({\n \"notifications\": result\n })", "async def notify_users():\n if not app['websockets']:\n return\n\n message = build_user_message()\n await wait([user.send(message) for user in app['websockets']])", "async def get_user_notifications(self):\n self._old_notifications = self.user_notifications # important for keeping track of what is new.\n\n async with self.web_session.get(self._api_notifications_url, headers=self._headers) as resp:\n if self.check_status(resp.status, self._api_notifications_url):\n data = await resp.json()\n self.user_notifications = create_notification_objects(data.get('notifications'))\n for user_notification in self.user_notifications:\n self.all_notifications[user_notification.id] = user_notification\n return self.user_notifications", "async def check_retrieve(self) -> None:\n async with self.lock:\n for upcoming_event in self.upcoming_events.values():\n if not isinstance(upcoming_event, RecurringEvent):\n continue\n\n if not upcoming_event.time_to_notify():\n continue\n\n if isinstance(upcoming_event, RecurringEvent):\n try:\n await upcoming_event.retrieve_content()\n except NoMoreItems:\n continue", "def process_notifications():\n notification_processed= 0\n for notification in EventNotification.objects.filter(awaiting=True):\n if notification.action == 'active':\n # Process the notification of an element become 'active'.\n is_active= False\n try:\n is_active= notification.item.content_object.active\n except:\n pass\n if is_active:\n if send_notification(notification):\n notification.awaiting= False\n notification.save()\n notification_processed += 1\n else:\n print >> sys.stderr, '[%s] %s' % (datetime.now().isoformat(),\n AppMessage('NotificFailed').message % notification.__unicode__())\n return \"Completed processing notifications: %d sent.\" % notification_processed", "def open_notifications(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell cmd statusbar expand-notifications\").wait()", "def get_notifications(self):\n return self.ws.events['notifications']", "def test_registered_no_notifications(self):\n msg = self._send(self.reg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.no_reminders)", "def test_registered_no_notifications(self):\n msg = self._send(self.reg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.no_reminders)", "def test_retrieve_notifications_list(client):\n create_user_response = create_user(client, TEST_USER_NAME, TEST_USER_PASS)\n assert create_user_response.status_code == HttpStatus.created_201.value\n\n new_notification_message_one = 'The winners will be announced in 1 minute'\n new_notification_category_one = 'Information'\n post_response = create_notification(\n client,\n new_notification_message_one,\n 15,\n new_notification_category_one)\n assert post_response.status_code == HttpStatus.created_201.value\n assert Notification.query.count() == 1\n\n new_notification_message_two = 'There is a problem with one score'\n new_notification_category_two = 'Error'\n post_response = create_notification(\n client,\n new_notification_message_two,\n 10,\n new_notification_category_two)\n assert post_response.status_code == HttpStatus.created_201.value\n assert Notification.query.count() == 2\n\n get_first_page_url = url_for('service.notificationlistresource', _external=True)\n get_first_page_response = client.get(\n get_first_page_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n assert get_first_page_response.status_code == HttpStatus.ok_200.value\n\n get_first_page_response_data = json.loads(\n get_first_page_response.get_data(as_text=True))\n assert get_first_page_response_data['count'] == 2\n assert get_first_page_response_data['previous'] is None\n assert get_first_page_response_data['next'] is None\n assert get_first_page_response_data['results'] is not None\n assert len(get_first_page_response_data['results']) == 2\n assert get_first_page_response_data['results'][0]['message'] == \\\n new_notification_message_one\n assert get_first_page_response_data['results'][1]['message'] == \\\n new_notification_message_two\n\n get_second_page_url = url_for('service.notificationlistresource', page=2)\n get_second_page_response = client.get(\n get_second_page_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n assert get_second_page_response.status_code == HttpStatus.ok_200.value\n\n get_second_page_response_data = json.loads(\n get_second_page_response.get_data(as_text=True))\n assert get_second_page_response_data['previous'] is not None\n assert get_second_page_response_data['previous'] == url_for(\n 'service.notificationlistresource', page=1)\n assert get_second_page_response_data['next'] is None\n assert get_second_page_response_data['results'] is not None\n assert len(get_second_page_response_data['results']) == 0", "def accept_notifications(self):\n return self._accept_notifications", "def _parse_notification(self, node):\n\n if not node:\n return []\n\n messages = []\n notify_node = node.find('div', class_='notify')\n if notify_node:\n for p in notify_node.select('p.notification'):\n messages.append(p.get_text())\n\n return messages", "def get_notifications() -> INotifications:\n notifiers = {\"django\": DjangoNotifier, \"govuk-notify\": GovUKNotifyEmail}\n notifier = getattr(settings, \"NOTIFIER\", \"django\")\n notifier_class = notifiers[notifier]\n return notifier_class()", "def test_notify_users(self):\n foo = Foo.objects.create(name='foo', description='foo object')\n notify_users(User.objects.all(), foo, notification_type='foo')\n self.assertEqual(len(mail.outbox), 2)", "def notify(*values):\r\n data = {\"value\"+str(i+1): value for i, value in enumerate(values[:3])}\r\n\r\n response = requests.request(\"POST\", notification_url, data=data)\r\n response.raise_for_status()", "def notifies(self) -> Iterator[Notify]:\n while 1:\n with self.lock:\n ns = self.wait(notifies(self.pgconn))\n enc = self.client_encoding\n for pgn in ns:\n n = Notify(\n pgn.relname.decode(enc), pgn.extra.decode(enc), pgn.be_pid\n )\n yield n", "async def object_event_notify(event):\n try:\n sitemanager = getSiteManager()\n except ComponentLookupError:\n # Oh blast, no site manager. This should *never* happen!\n return []\n\n return await sitemanager.adapters.asubscribers((event.object, event), None)", "def notification():\n # pop-up notification\n notifies = NotifyModel.get_notify(current_user.get_id())\n return jsonify(notifications=notifies)", "def get_notifications(self):\n res = self.get_object(\"/integrationServices/v3/notification\")\n return res.get(\"notifications\", [])", "async def plaguenotify(self, ctx):\n notifications = await self.config.user(ctx.author).notifications()\n if notifications != False:\n await self.config.user(ctx.author).notifications.set(False)\n message = \"You will no longer be sent Plague Game notifications.\"\n else:\n await self.config.user(ctx.author).notifications.set(True)\n message = \"You will now be sent Plague Game notifications.\"\n\n await ctx.send(message)", "def command_list(self):\n # Get buckets\n project_bucket_mappings = {\n 'all-of-us-rdr-prod': PUBSUB_NOTIFICATION_BUCKETS_PROD,\n 'all-of-us-rdr-stable': PUBSUB_NOTIFICATION_BUCKETS_STABLE,\n 'all-of-us-rdr-sandbox': PUBSUB_NOTIFICATION_BUCKETS_SANDBOX,\n }\n\n bucket_list = [self.args.bucket] if self.args.bucket else project_bucket_mappings[self.gcp_env.project]\n\n notifications_dict = {\n \"notifications\": []\n }\n\n for bucket_name in bucket_list:\n # call storage api\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n notifications = bucket.list_notifications(client)\n\n for notification in notifications:\n # Skip the default topic notification (which won't have an integer ID\"\n try:\n id_int = int(notification.notification_id)\n except ValueError:\n continue\n\n if self.args.id and self.args.id != id_int:\n continue\n\n output_dict = dict()\n\n try:\n output_dict['bucket'] = bucket_name\n output_dict['id'] = notification.notification_id\n output_dict['topic_name'] = notification.topic_name\n output_dict['topic_project'] = notification.topic_project\n output_dict['payload_format'] = notification.payload_format\n output_dict['object_name_prefix'] = notification._properties['object_name_prefix']\n output_dict['event_types'] = notification.event_types\n except KeyError:\n pass\n\n notifications_dict['notifications'].append(output_dict)\n\n pprint(notifications_dict)\n\n return 0", "def check_for_updates(context):\n browser = create_browser()\n url = URL_BASE + URL_NEWS\n try:\n browser.open(url)\n assert browser.page.select(\"div.logout-button\")\n except AssertionError:\n logger.debug(\"Session stale, force cookie update\")\n browser.close()\n browser = create_browser(True)\n browser.open(url)\n\n entry = browser.page.select(\"div.ui.form\")[0]\n entry_id = entry.attrs.get(\"data-element\")\n entry_title_full = entry.select(\"div.title.alf-click-acctitle\")[0]\n entry_date = entry_title_full.select(\"div.ui.label\")[0].text.strip()\n entry_title = entry_title_full.text.strip()\n entry_title = re.sub(pattern, \" \", entry_title)\n entry_title = entry_title.replace(entry_date, \"\").strip()\n entry_doc = entry.select(\"a.item.alf-file-show\")[0].attrs.get(\"href\")\n entry_doc = remove_id(entry_doc)\n\n res = {\n \"entry_id\": html.escape(str(entry_id)),\n \"entry_date\": html.escape(str(entry_date)),\n \"entry_title\": html.escape(str(entry_title)),\n \"entry_doc\": html.escape(str(entry_doc)),\n }\n\n # Create ENTRY_FILE if not exist\n if not os.path.isfile(ENTRY_FILE):\n with open(ENTRY_FILE, \"w\") as f:\n json.dump(res, f)\n notify_users(context, res)\n\n with open(ENTRY_FILE, \"r+\") as f:\n last_sent_entry = json.load(f)\n if res != last_sent_entry:\n f.seek(0)\n json.dump(res, f)\n f.truncate()\n notify_users(context, res)\n\n logger.debug(res)\n browser.close()", "def test_notify_user(self):\n foo = Foo.objects.create(name='foo', description='foo object')\n notify_users([self.user_a], foo, notification_type='foo')\n self.assertEqual(len(mail.outbox), 1)", "def getInfo(notification):", "def notify(self) -> None:\n for s in self.subscribers:\n s()", "def get_notifications(self, new=True):\n url = (\"https://api.imgur.com/3/account/{0}/\"\n \"notifications\".format(self.name))\n resp = self._imgur._send_request(url, params=locals(), needs_auth=True)\n msgs = [Message(msg_dict, self._imgur, has_fetched=True) for msg_dict\n in resp['messages']]\n replies = [Comment(msg_dict, self._imgur, has_fetched=True) for\n com_dict in resp['replies']]\n return {'messages': msgs, 'replies': replies}", "def list_webhooks(self):\n response = requests.get(\n '%spreferences/notifications' % self._url,\n **self._auth\n )\n\n if response.status_code == 401:\n raise MoipAuthorizationException(response.json())\n else:\n pretty_print(response.json())\n return response.json()", "def get_notifications(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetNotificationsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetNotificationsV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def delete_notification():\r\n name = request.args.get('notif')\r\n logging.info(\"Notification deleted in delete_notification(): \" + name)\r\n for notif in notifications:\r\n if notif['title'] == name:\r\n notifications.remove(notif)", "def on_notify(self, name):\r\n pass", "def notify(self) -> None:\n pass", "def notify(self) -> None:\n pass", "def notify_users(title, message, currentusers=[]):\n if args.test_mode:\n send_pushover_notification(config['pushoverusers'][args.test_mode], \"[TEST] \" + title, message)\n else:\n for u in config['pushoverusers'].keys(): # list of names for those with pushover\n if u not in currentusers:\n send_pushover_notification(config['pushoverusers'][u], title, message)\n time.sleep(0.5) # be nice to the api", "def checkClassChanges():\n try:\n newStatus = checkClasses()\n for className in newStatus:\n for _id in newStatus[className]:\n if (className not in currentStatus or _id not in currentStatus[className] or newStatus[className][_id] != currentStatus[className][_id]): #Notification not sent before \n print(\"Sending notification...\")\n f = urllib.request.urlopen(WEBHOOK_URL + \"value1=%s&value2=%s\" %(_id, newStatus[className][_id])) #Run the request\n f.read() #download data\n return newStatus\n except:\n print(\"Error while checking classes... Trying again\")\n return currentStatus #Default back to current status", "def watch_for_discovery_messages(self):\n while True:\n message = self.socket_manager.get_discovery_message()\n if message.disconnect == \"1\":\n self.handle_disconnect(message)\n elif message.direction == \"0\":\n self.respond_to_discovery_message(message)\n elif message.direction == \"1\":\n serialized_directory = message.get_payload()\n self.directory.merge_serialized_directory(serialized_directory)\n self.initiate_rtt_calculation()", "def test_send_notification(self):\n management.call_command('send_first_report_notification', [], {})\n eq_(len(mail.outbox), 4)", "def handle_notification(self, type, notification):\n print \"Notified ! %s\"%type\n if type != \"contentInstances\":\n return super(NGSI_10, self).handle_notification(type, notification)\n\n if not notification[\"currentNrOfInstances\"]:\n return False\n\n container_id = notification[\"subscriptionsReference\"].rsplit(\"/\", 2)[0].rpartition(\"/\")[-1]\n app_id = notification[\"subscriptionsReference\"].rsplit(\"/\", 4)[0].rpartition(\"/\")[-1]\n\n app_type, app_id = self.split_app_id(app_id)\n subscriptions = self.db.find_container_subscriptions(app_type, app_id, attribute=container_id)\n if subscriptions:\n data = self._get_context_attribute_value(notification)\n \"\"\"\n notify_request = NotifyContextRequest(\n contextResponseList = [\n ContextElementResponse(\n statusCode = 200,\n contextElement = ContextElement(\n entityId = EntityId(type = app_type, id = app_id, isPattern = False),\n contextAttributeList = [\n ContextAttribute(\n name = data[\"name\"] or container_id,\n contextValue = data[\"contextValue\"],\n metadata = data[\"metadata\"],\n type = data[\"type\"]\n )\n ]\n )\n )\n ]\n )\n \"\"\"\n update_request = UpdateContextRequest(\n contextElementList=[\n ContextElement(\n entityId=EntityId(type=app_type, id=app_id, isPattern=False),\n contextAttributeList=[\n ContextAttribute(\n name=data[\"name\"] or container_id,\n contextValue=str(data[\"contextValue\"]),\n metadata=data[\"metadata\"],\n type=data[\"type\"]\n )\n ]\n )\n ],\n updateAction=\"UPDATE\"\n )\n\n self._send_notifications(subscriptions, update_request)\n\n #raise Exception(latest, container_id, app_id)\n\n return True", "def _notifications(\n self,\n all: bool = False,\n participating: bool = False,\n since: Optional[datetime] = None,\n before: Optional[datetime] = None,\n page: int = 1,\n per_page: int = 10,\n ) -> str:\n headers = {\n \"Authorization\": \"token {}\".format(self.auth_token),\n \"accept\": \"application/vnd.github.v3+json\",\n }\n params = {\n \"all\": \"true\" if all else \"false\",\n \"participating\": \"true\" if participating else \"false\",\n \"page\": page,\n \"per_page\": per_page,\n }\n if since is not None:\n params[\"since\"] = since.isoformat()\n if before is not None:\n params[\"before\"] = before.isoformat()\n if per_page > 100:\n raise Exception(\n \"Github API support maximum 100 notifications per page for api calls\"\n )\n res = request(\"GET\", self.NOTIFICATIONS_URL, headers=headers, params=params)\n return res.text", "def _notify_all(self, event_data):\n for subs in self._subscribers:\n subs.notify(event_data)", "def send_notifications():\n due_notifications = Notification.query.filter(Notification.delivery_date <= datetime.now(timezone.utc))\n for notification in due_notifications:\n send_notification.delay(notification.id)", "def _notify():\n for observer in Bots._observers:\n observer.update(Bots.BOT_UPDATE)", "def retrieve_pushes(self):\n\n self.listener = Listener(account=self.pushbullet, on_push=self.on_push)\n _LOGGER.debug(\"Getting pushes\")\n try:\n self.listener.run_forever()\n finally:\n self.listener.close()", "async def watchlist(self, ctx):\r\n channel_list = await self.config.guild(ctx.guild).watching()\r\n msg = \"Bad gifs will be removed in:\\n\"\r\n for channel in channel_list:\r\n channel_obj = self.bot.get_channel(channel)\r\n if channel_obj is None: # Catch deleted/unexisting channels\r\n continue\r\n msg += f\"{channel_obj.mention}\\n\"\r\n await ctx.send(msg)", "async def __manage_notification_posts(self, notification: Notification):\n notification_type = self.determine_notification_type(notification.message)\n community = self.get_community_by_id(notification.community_id)\n if notification_type == 'comment':\n artist_comments = await self.fetch_artist_comments(notification.community_id, notification.contents_id)\n if artist_comments:\n comment = artist_comments[0]\n comment.post = self.get_post_by_id(comment.post_id)\n if comment.post:\n if comment.post.artist_comments:\n comment.post.artist_comments.insert(0, comment)\n else:\n comment.post.artist_comments = [comment]\n self.all_comments[comment.id] = comment\n elif notification_type in [\"tofans\", \"post\"]:\n post = await self.create_post(community, notification.contents_id)\n if post:\n self.all_posts[post.id] = post\n elif notification_type == 'media':\n media = await self.fetch_media(community.id, notification.contents_id)\n if media:\n self.all_media[media.id] = media\n elif notification_type == 'announcement':\n announcement = await self.fetch_announcement(community.id, notification.contents_id)\n if announcement:\n self.all_announcements[announcement.id] = announcement", "def test_notify_following(self):\n # We'll use users[0] as the \"followed\"\n\n self.users[1].follow(self.users[0], \"test\")\n self.users[2].follow(self.users[0], \"test\")\n self.users[3].follow(self.users[0], \"test\")\n\n notify_following(self.users[0], \"test\", CancelledTicketNotification,\n {\"ticket\": \"test\", \"event\": {\"id\": \"1\"},\n \"event_name\": \"test\"},\n ignore=[self.users[1]])\n\n for user in self.staff + self.admins + [self.users[0],\n self.users[1],\n self.users[4]]:\n self.assertEquals(Notification.objects.get_for_user(\n user).count(), 0)\n self.assertEquals(Notification.objects.get_for_user(\n self.users[2]).count(), 1)\n self.assertEquals(Notification.objects.get_for_user(\n self.users[3]).count(), 1)", "def notification_interface():\n return render_template(\"notifications.html\")", "def notify_users_of_reminders():\n\n #Get current date into dd/mm/YYYY format.\n now = datetime.datetime.now()\n todays_date = now.strftime(\"%d/%m/%Y\")\n\n #Get current time and convert it to hh:mm.\n todays_time = now.strftime(\"%H:%M\")\n print(todays_time)\n\n #Select all notifications from the database based on that date and time.\n notifications_query = \"\"\"SELECT user, reminder_msg FROM reminders WHERE (date=%s AND time=%s);\"\"\"\n\n #Setup our parameters\n notifications_params = (todays_date, todays_time)\n\n #TODO: Add in cursor.\n #TODO: Run query and get reminder data.\n #TODO: Loop over returned rows, and notify users with send_message_to_irc()", "def map_from_app_notifications(self, app):\n if 'log_notifications' in app and len(app['log_notifications']) > 0:\n empty_fieldlist(self.log_notifications)\n for log_notification in app.get('log_notifications', []):\n self.log_notifications.append_entry()\n form_log_notification = self.log_notifications.entries[-1].form\n form_log_notification.map_from_app(log_notification)", "def _notifies(self):\r\n\r\n # starts the counter that is going to be used to count\r\n # the number of processed notifications, start at zero\r\n count = 0\r\n\r\n # iterates while there are pending notifications to be\r\n # processed, the complete set of bind callables will be\r\n # called for each of the notifications\r\n while self._notified:\r\n event, data = self._notified.pop(0)\r\n binds = self._events.pop(event, [])\r\n for callable in binds: callable(data)\r\n count += 1\r\n\r\n # returns the number of processed notifications to the\r\n # the caller method\r\n return count", "def send_notifications(self):\n self.logger.debug(\"{}.send_notifications - Notifications: {} \\nTelemetry Entries: {}\"\n .format(LOG_TAG, self.notifications, self.telemetry_entries))\n\n if not self.notifications and not self.telemetry_entries:\n return\n\n _id = self.request.id\n context = self.request.context\n experience_cloud = self.request.experience_cloud\n\n notifications = self.notifications if self.notifications else None\n telemetry = Telemetry(entries=self.telemetry_entries) if self.telemetry_entries else None\n request = DeliveryRequest(id=_id, context=context, experience_cloud=experience_cloud,\n notifications=notifications, telemetry=telemetry)\n send_notification_opts = {\n \"request\": request,\n \"visitor\": self.visitor\n }\n\n async_send = threading.Thread(target=self.send_notification_func, args=(send_notification_opts,))\n async_send.start()\n self.notifications = []\n self.telemetry_entries = []", "def listen_for_new_updates(event):\n\n if event.retval:\n news_indicator.create_and_update_menu(event.retval)\n if NewsIndicator.notifications:\n show_notifications(event.scheduled_run_time)\n Gtk.main()", "def setServiceAddedNotificationsOn(self, b: bool) -> None:\n ...", "def run(self):\n most_recent = self.__most_recent\n while True:\n emails = self.__get_emails()\n\n if most_recent != emails[0]:\n print(f'{self.__source} New messsage recieved')\n\n # Dispatch event for new email\n self.__email_event()\n\n # Reset most recent\n most_recent = self.__get_emails()[0]\n\n else:\n time.sleep(0.3)", "def write(self, notification):", "def notified(self, notified):\n\n self._notified = notified", "def query_notifications(self: object, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/QueryNotificationsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"QueryNotificationsV1\",\n keywords=kwargs,\n params=parameters\n )", "def notifyObservers(self):", "def check_for_new_data():\n SCOPES = ['https://mail.google.com/']\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('creds_4.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('gmail', 'v1', credentials=creds)\n stamp = int(time.time()) - 3600\n # Call the Gmail API\n results = service.users().messages().list(userId='me',q=f\"from:notify@google.com after:{stamp}\").execute()\n if results[\"resultSizeEstimate\"] > 0:\n populate_database()", "def get_notification(limit=10, username=None):\n if username is not None:\n user_code = get_user_code(username)\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n u = models.Log.query.filter_by(user_code=user_code).order_by(models.Log.timestamp.desc()).limit(limit).all()\n else:\n u = models.Log.query.order_by(models.Log.timestamp.desc()).limit(limit).all()\n notifications = []\n for i in u:\n parameters = extract_parameters(i.parameters)\n item_name = get_item_name(i.item_code)\n username = get_username(i.user_code)\n if i.action == 'U':\n notifications.append(\"%s: %s %s %s of item %s(code:%s)\" %(i.timestamp, username, \"Updated\", parameters, item_name, i.item_code))\n elif i.action == 'D':\n notifications.append(\"%s: %s %s %s of item %s(code:%s)\" %(i.timestamp, username, \"Deleted\", \"entry\", item_name, i.item_code))\n elif i.action == 'IV':\n notifications.append(\"%s: %s %s %s of item %s(code:%s)\" %(i.timestamp, username, \"Inserted\", \"new variants\", item_name, i.item_code))\n else:\n notifications.append(\"%s: %s %s %s of item %s(code:%s)\" %(i.timestamp, username, \"Inserted\", \"new entry\", item_name, i.item_code))\n return make_response(jsonify(notifications))", "def action_show():\n try:\n notification = read_notification()\n except IOError:\n raise HTTPResponse(body=\"Error reading notification IO\", status=400)\n except:\n raise HTTPResponse(body=\"Unexpected error\", status=400)\n \n if notification is not None:\n return dict(msg=\"\", notification=notification.to_json())\n else:\n return dict(msg=\"No notification\")", "def notify(self):\n\n def remind():\n \"\"\"\n this function shows a pop-up using windows notification\n \"\"\"\n ntftion.notify('reminder', f\"{self.notification}:\\n{self.work_name}\\n{self.work_datetime.hour}: \"\n f\"{self.work_datetime.minute} \", app_icon='reminder.ico', timeout=3)\n\n self.eisenhower_priority()\n if self.priority:\n while dt.now().day <= self.time_ntf.day and self.status != \"done\":\n if self.priority == 1 and dt.now().time() >= self.time_ntf.time():\n remind()\n time.sleep(5*60)\n\n elif (self.priority == 2) and ((dt.now().hour == self.time_ntf.hour)\n and (dt.now().time().minute == self.time_ntf.time().minute)):\n remind()\n break\n elif self.priority == 3 and dt.now().time().hour == 18:\n remind()\n time.sleep(24 * 3600)\n elif self.priority == 4 and dt.now().weekday() == 6:\n remind()\n time.sleep(7 * 24 * 3600)\n else:\n pass", "def test_notify(self):\n disco = create_disco()\n messages = [object(), NodeActive(create_node(\"hello\"))]\n result = []\n disco.notify(result.append)\n for m in messages:\n disco.onMessage(None, m)\n self.assertEqual(messages, result)", "async def check_new_user_notifications(self) -> bool:\n async with self.web_session.get(self._api_new_notifications_url, headers=self._headers) as resp:\n if self.check_status(resp.status, self._api_new_notifications_url):\n data = await resp.json()\n has_new = data.get('has_new')\n if has_new:\n # update cache\n # Not that cache_loaded necessarily matters here,\n # but just in case other checks are happening concurrently.\n self.cache_loaded = False\n await self.update_cache_from_notification()\n self.cache_loaded = True\n return has_new", "def get(self):\n SearchNotifications.__init__(self)\n kwargs = self.parser.parse_args()\n search = '*' + kwargs['search'] + '*'\n query = self.notifications_db.construct_lucene_complex_query([\n ('target_role', {'value': self.role}),\n ('targets', {'value': self.username, 'join_operator': 'OR'}),\n ('action_objects', {'value': search, 'join_operator': 'AND', 'open_parenthesis': True}),\n ('title', {'value': search, 'join_operator': 'OR', 'close_parenthesis': True})])\n notifications = self.notifications_db.full_text_search('search', query, page=kwargs['page'], limit=10, sort=\"\\_id\")\n self.set_seen(notifications)\n self.logger.info(\"Searched text %s in notifications\" % search)\n return notifications", "def notify(message):\n # TODO: clean up this ugly mess\n\n global notify_flag\n\n if not notify_flag:\n notify_flag = True\n message.reply(\":gear: Started expiration checking process; users will now \"\n \"be notified if their access is about to expire.\")\n else:\n message.reply(\"Cannot have more than one running instance of the notify \"\n \"function.\")\n return\n\n flag = \"tenmins\"\n while True:\n if flag is \"deleted\":\n info = sql.notify_users(\"hour\")\n flag = \"hour\"\n elif flag is \"hour\":\n info = sql.notify_users(\"tenmins\")\n flag = \"tenmins\"\n elif flag is \"tenmins\":\n info = sql.notify_users(\"deleted\")\n flag = \"deleted\"\n\n for person in info:\n if len(info[person]) == 0:\n continue\n try:\n users = hf.get_users()\n for user in users:\n if user[\"name\"] == person:\n dbs = []\n servers = []\n for grant in info[person]:\n dbs.append(grant[\"db\"])\n servers.append(grant[\"server\"])\n chan = hf.find_channel(message._client.channels, user[\"id\"])\n\n if flag is \"hour\":\n message._client.send_message(chan,\n Strings['NOTIFY_EXPIRE_HOUR'].format(\", \".join(dbs)) + \"\\n\"\n \"\" + Strings[\"NOTIFY_EXPIRE_INFO\"])\n for db, server in zip(dbs, servers):\n logging.info(\"{} reason=[NOTIFIED OF DATABASE ACCESS EXPIRING IN AN HOUR]\\n\".format(user[\"name\"]), server, db, \"notifyhour\")\n elif flag is \"tenmins\":\n message._client.send_message(chan,\n Strings['NOTIFY_EXPIRE_TENMINS'].format(\", \".join(dbs)) + \"\\n\"\n \"\" + Strings[\"NOTIFY_EXPIRE_INFO\"])\n for db, server in zip(dbs, servers):\n logging.info(\"{} reason=[NOTIFIED OF DATABASE ACCESS EXPIRING IN TEN MINUTES]\\n\".format(user[\"name\"]), server, db, \"notifyten\")\n elif flag is \"deleted\":\n message._client.send_message(chan,\n Strings['EXPIRE'].format(\", \".join(dbs)))\n message._client.send_message(public_channel,\n Strings[\"EXPIRE_PING\"].format(user[\"name\"],\n \", \".join(dbs)))\n for db, server in zip(dbs, servers):\n logging.info(\"{} reason=[NOTIFIED OF DATABASE ACCESS EXPIRING]\\n\".format(user[\"name\"]), server, db, \"notifyexpire\")\n\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))\n\n with open(\"data/jobs.json\") as f:\n jobs = json.load(f)\n\n new_jobs = []\n if len(jobs) > 0:\n for job in jobs:\n if not job.endswith(\"DONE\"):\n job_string = job.replace(\"10.132.140.160\", \"SQLCLUSTER02\").replace(\"10.132.140.150\", \"SQLCLUSTER01\")\n message._client.send_message(public_channel,\n Strings[\"LOGOUT_PLEASE\"].format(job_string.split(\":\")[0],\n job_string.split(\":\")[1]))\n new_jobs.append(job + \":DONE\")\n else:\n new_jobs.append(job)\n\n with open(\"data/jobs.json\", \"w\") as f:\n json.dump(new_jobs, f)\n\n # For use with Datadog\n with open(\"/opt/opsbot35/data/status.txt\", \"w\") as f:\n f.write(str(datetime.now()))\n\n time.sleep(5)", "def get_notifications(ceilometer, base_id):\n\n _filter = [{\"field\": \"base_id\", \"op\": \"eq\", \"value\": base_id}]\n # limit is hardcoded in this code state. Later that will be changed via\n # connection string usage\n return [n.to_dict()\n for n in ceilometer.events.list(_filter, limit=100000)]", "def get_notifications(\n self,\n all: bool = False,\n participating: bool = False,\n since: Optional[datetime] = None,\n before: Optional[datetime] = None,\n per_page: int = 10,\n page: int = 1,\n ) -> List[Notification]:\n raw_res = self._notifications(\n all=all,\n participating=participating,\n since=since,\n before=before,\n per_page=per_page,\n page=page,\n )\n return Notification.load_from_json_str(raw_res)", "def notification_list(request):\n try:\n validator = NotificationListValidator(request.GET)\n valid = validator.validate() # Validate the request\n if valid:\n current_user_id = request.user_id\n page_limit = int(request.GET['page_limit'])\n page_offset = int(request.GET['page_offset'])\n\n # notification listing\n notification_list = Notification.objects.filter(user_id=current_user_id).all().order_by('-created_on')[page_offset:page_limit+page_offset]\n serializer = NoitifcationListSerializer(notification_list, many=True)\n\n # set is_read = 1\n Notification.objects.filter(user_id=current_user_id).update(\n is_read=1\n )\n \n return Response({'data':serializer.data}, status=status.HTTP_200_OK)\n else:\n return Response({'error':requestErrorMessagesFormate(validator.get_message())}, status=status.HTTP_200_OK)\n except Exception as exception:\n logerror('notifications/views.py/notification_list', str(exception))\n return Response({'error':str(exception)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def get_notification():\n condition.acquire()\n if not notifications:\n ret = condition.wait(2)\n if not ret:\n condition.release()\n raise TimeoutError(\"Timed out while waiting for notification\")\n\n notice = notifications.pop(0)\n condition.release()\n return notice", "def __process_notification_events(self, notification_event):\n if notification_event is not None:\n if isinstance(notification_event, NotificationEvent):\n print(notification_event.message)", "def test_registered_with_notification(self):\n now = datetime.datetime.now()\n notification = reminders.Notification.objects.create(num_days=1,\n time_of_day=now)\n reminders.SentNotification.objects.create(notification=notification,\n recipient=self.contact,\n status='sent',\n message='abc',\n appt_date=now,\n date_to_send=now,\n date_queued=now)\n msg = self._send(self.reg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.thank_you)\n sent_notif = reminders.SentNotification.objects.all()\n self.assertEqual(sent_notif.count(), 1)\n self.assertEqual(sent_notif[0].status, 'confirmed')", "def test_registered_with_notification(self):\n now = datetime.datetime.now()\n notification = reminders.Notification.objects.create(num_days=1,\n time_of_day=now)\n reminders.SentNotification.objects.create(notification=notification,\n recipient=self.contact,\n status='sent',\n message='abc',\n appt_date=now,\n date_to_send=now)\n msg = self._send(self.reg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.thank_you)\n sent_notif = reminders.SentNotification.objects.all()\n self.assertEqual(sent_notif.count(), 1)\n self.assertEqual(sent_notif[0].status, 'confirmed')", "def listen():\n if request.method == 'GET':\n print request\n return verify_webhook(request)\n\n if request.method == 'POST':\n payload = request.json\n event = payload['entry'][0]['messaging']\n for x in event:\n if is_user_message(x):\n text = x['message']['text']\n sender_id = x['sender']['id']\n respond(sender_id, text)\n\n return \"ok\"", "def wait_for_multiple_notification(self, device_id, expected_notifications, timeout=30, assert_errors=False):\n item_list = []\n for _ in range(timeout):\n notifications = self.get_notifications()\n for item in notifications:\n if item['ep'] == device_id:\n # Check if received notification contains any combinations defined in expected_notifications.\n # If found, append item to item_list. If as many items are found as are expected, return list.\n if [expect_item for expect_item in expected_notifications if item['path'] in expect_item.keys()\n and base64.b64decode(item['payload']).decode('utf8') in expect_item.values()]:\n item_list.append(item)\n if len(item_list) == len(expected_notifications):\n return item_list\n sleep(1)\n log.debug('Expected {}, found only {}!'.format(expected_notifications, item_list))\n if assert_errors:\n assert False, 'Failed to receive notifications'\n return False", "def notifications_processor(request):\r\n\r\n now = datetime.now()\r\n today = date.today()\r\n\r\n # DISABLED--seems confusing to have different behavior\r\n # On Fridays, get notified for the weekend and next Monday\r\n #\r\n # weekday = today.weekday()\r\n # if weekday == 4:\r\n # days_diff = 4\r\n # else:\r\n\r\n # Get notified for classes on the next day\r\n days_diff = 2\r\n\r\n end_day = today + timedelta(days=days_diff)\r\n end_datetime = datetime.combine(end_day, time(0, 0))\r\n \r\n if request.user.is_authenticated:\r\n lessons = Lesson.objects.filter(teacher=request.user, notified=False, start_at__gte=now, start_at__lt=end_datetime)\r\n\r\n # Combine all classes into one message\r\n messages = \"\"\r\n\r\n for lesson in lessons:\r\n lesson.notified = True\r\n lesson.save()\r\n lesson_start_at = datetime.strftime(lesson.start_at, \"%a, %b. %d, %I:%M %p\")\r\n messages += f\"{lesson.student.name}'s class on {lesson_start_at}<br>\"\r\n\r\n if messages != \"\":\r\n Notification.objects.create(teacher=request.user,\r\n message=messages,\r\n due_at=end_datetime)\r\n \r\n notifications = Notification.objects.filter(teacher=request.user, is_new=True)\r\n confirmations = Confirmation.objects.filter(teacher=request.user, is_new=True)\r\n\r\n for c in confirmations:\r\n c.is_new = False\r\n c.save()\r\n\r\n confirmations = confirmations[:1]\r\n \r\n return {'notifications': notifications,\r\n 'confirmations': confirmations}\r\n \r\n return {'notifications': [],\r\n 'confirmations': []}", "def register_notifications(self, hosts): \n grp = GrowlRegistrationPacket()\n for n in self.avail_sources:\n grp.addNotification(n, n in self.sources)\n gs = GrowlSender(self.env)\n gs.notify(hosts, grp)", "def receive_notification(self, *args, **kwargs):\n\t\tprint(f\"{self.__location} is now hearing \\\"{args[0]}\\\" on {args[1]}\")", "def on_new_notification(data, index, *args):\n print(data)", "def test_messaging_list(self):\n response = self.client.get(reverse('messaging'))\n self.assertEqual(response.status_code, 200)", "def check_notifications(func):\n\n def _check_notifications(ctx, *args, **kwargs):\n config = None\n client = None\n # protect against early cli failures\n if ctx.obj and 'config' in ctx.obj and 'client' in ctx.obj:\n config = ctx.obj['config']\n client = ctx.obj['client']\n\n res = func(ctx, *args, **kwargs)\n\n if client and config:\n notifications_resp = client.get_notifications(config.username)\n notification_json = notifications_resp.json()\n urgent_notifications = notification_json[\"urgent_count\"]\n if urgent_notifications > 0:\n logger.info(uxstring.UxString.unread_notifications.format(urgent_notifications))\n\n return res\n\n return functools.update_wrapper(_check_notifications, func)", "def notify_searchers(search_id):\n\n searchers = Searcher.objects.filter(search=search_id)\n\n for searcher in searchers:\n pass # Notify via email about clasification results", "def test_multiple_notifications_unconfirmed(self):\n appt_date = datetime.date.today()\n self.test_patient.next_visit = appt_date\n self.test_patient.save()\n notified = self.create_unconfirmed_notification(self.test_patient, appt_date)\n notified_again = self.create_unconfirmed_notification(self.test_patient, appt_date)\n qs = reminders.Patient.objects.unconfirmed_for_date(appt_date)\n logging.debug(qs)\n self.assertTrue(self.test_patient in qs)\n self.assertTrue(qs.count(), 1)", "def test_notifications(self):\n program = RsyncSystemBackup(destination='/backups/system')\n # Right now we just make sure the Python code doesn't contain any silly\n # mistakes. It would be nice to have a more thorough test though, e.g.\n # make sure that `notify-send' is called and make sure that we don't\n # fail when `notify-send' does fail.\n program.notify_starting()\n program.notify_finished(Timer())\n program.notify_failed(Timer())", "def _get_notification_data(\n self, current_notification, last_notification\n ): # pylint: disable=unused-argument\n return {}" ]
[ "0.5953895", "0.5953895", "0.5901647", "0.584553", "0.5815878", "0.58042645", "0.5801946", "0.5765564", "0.57637656", "0.5749498", "0.5722716", "0.5661563", "0.5660522", "0.56066173", "0.5565932", "0.55315346", "0.55289805", "0.5522268", "0.5481762", "0.54630154", "0.5443189", "0.5437607", "0.5435906", "0.5406709", "0.5406709", "0.5395858", "0.53760695", "0.53640187", "0.5363538", "0.5353103", "0.5331184", "0.53260815", "0.5319196", "0.5307621", "0.52707154", "0.52452713", "0.5211944", "0.520764", "0.51957196", "0.51945305", "0.51830274", "0.51766175", "0.51662165", "0.51640606", "0.51607436", "0.51569873", "0.51531005", "0.51531005", "0.5148517", "0.5135323", "0.5129643", "0.51178044", "0.5115818", "0.5111995", "0.5102855", "0.50999296", "0.5097962", "0.5082577", "0.50814027", "0.5076276", "0.50671285", "0.50638676", "0.5058658", "0.5055009", "0.5045198", "0.50433755", "0.5032222", "0.50308985", "0.5026752", "0.50203997", "0.5007563", "0.50015986", "0.499964", "0.49950922", "0.4990629", "0.49880886", "0.49871895", "0.49766764", "0.49709415", "0.49704447", "0.49650174", "0.49589676", "0.49563232", "0.49546686", "0.49488023", "0.4931395", "0.4927822", "0.4927351", "0.4922443", "0.4921849", "0.49198908", "0.49196607", "0.49170825", "0.4912414", "0.49096066", "0.49067494", "0.49026352", "0.49023542", "0.4900091", "0.48951226" ]
0.6035755
0
figure out filename (or eventually URI) of pregenerated NEMSformat recording for a given cell/batch/loader string very baphyspecific. Needs to be coordinated with loader processing in nems0.xform_helper
def generate_recording_uri(cellid, batch, loader): options = {} if loader in ["ozgf100ch18", "ozgf100ch18n"]: options = {'rasterfs': 100, 'includeprestim': True, 'stimfmt': 'ozgf', 'chancount': 18} elif loader in ["ozgf100ch18pup", "ozgf100ch18npup"]: options = {'rasterfs': 100, 'stimfmt': 'ozgf', 'chancount': 18, 'pupil': True, 'stim': True, 'pupil_deblink': True, 'pupil_median': 2} elif (loader.startswith("nostim200pup") or loader.startswith("psth200pup") or loader.startswith("psths200pup")): options = {'rasterfs': 200, 'stimfmt': 'parm', 'chancount': 0, 'pupil': True, 'stim': False, 'pupil_deblink': 1, 'pupil_median': 0.5} elif loader.startswith("nostim10pup") or loader.startswith("psth10pup"): options = {'rasterfs': 10, 'stimfmt': 'parm', 'chancount': 0, 'pupil': True, 'stim': False, 'pupil_deblink': True, 'pupil_median': 2} elif (loader.startswith("nostim20pup") or loader.startswith("psth20pup") or loader.startswith("psths20pup") or loader.startswith("evt20pup")): options = {'rasterfs': 20, 'stimfmt': 'parm', 'chancount': 0, 'pupil': True, 'stim': False, 'pupil_deblink': 1, 'pupil_median': 0.5} elif (loader.startswith("nostim20") or loader.startswith("psth20") or loader.startswith("psthm20") or loader.startswith("psths20")): options = {'rasterfs': 20, 'stimfmt': 'parm', 'chancount': 0, 'pupil': False, 'stim': False} elif (loader.startswith("env100") or loader.startswith("envm100")): options = {'rasterfs': 100, 'stimfmt': 'envelope', 'chancount': 0} elif loader.startswith("env200"): options = {'rasterfs': 200, 'stimfmt': 'envelope', 'chancount': 0} else: raise ValueError('unknown loader string') recording_uri = get_recording_file(cellid, batch, options) return recording_uri
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getWaveformFileName(self):\n return self.waveform_info.split(\":\")[1][:20]", "def _get_output_filename(dataset_dir, split_name):\n return '%s/fer_%s.tfrecord' % (dataset_dir, split_name)", "def parse_rarefaction_fname(name_string):\r\n\r\n root, ext = os.path.splitext(name_string)\r\n root_list = root.split(\"_\")\r\n iters = int(root_list.pop())\r\n seqs_per_sam = int(root_list.pop())\r\n base_name = \"_\".join(root_list)\r\n return base_name, seqs_per_sam, iters, ext", "def which_band_is_file(filename):\n if not is_galex_file(filename):\n return None\n return \"fuv\" if \"-fd-\" in filename else \"nuv\" if \"-nd-\" in filename \\\n else \"unknown\"", "def filename(N, Dr, g, launch):\n\n return 'N%s_R%s_G%s_E%s.datR' % tuple(map(float_to_letters,\n (N, Dr, g, launch)))", "def getTrackRecordingFileName(*args, **kwargs):\n pass", "def construct_basename(self, row, obstime=None):\n _obstime = self.construct_obstime(row) if obstime is None else obstime\n tiso = time.Time(_obstime, format='isot')\n dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')\n return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0],\n self['target'][row].replace(\" \", \"\"),\n self.spectrograph.camera,\n datetime.datetime.strftime(dtime, '%Y%m%dT'),\n tiso.value.split(\"T\")[1].replace(':',''))", "def extractFileName(fileType, modelName, modelVersion, modelState):\n fileName = '{}_{}_{}'.format(modelName, modelVersion, fileType) if modelState == 'national' else '{}_{}_{}_{}'.format(modelName, modelVersion, modelState, fileType)\n return fileName", "def getGenomeName(recordID, filename):\n global FILENAMES\n global GENOMENAMES\n recordID = str(recordID)\n\n\n #Look for lcl followed by the possible genome name\n if re.search('lcl\\|([\\w-]*)', recordID):\n match = re.search('lcl\\|([\\w-]*)', recordID)\n match = str(match.group())\n genome_name = match.split('|')[1]\n\n #Look for a possible genome name at the beginning of the record ID\n elif re.search('(^[a-zA-Z][a-zA-Z]\\w{6}\\.\\d)',recordID):\n match = re.search('(\\w{8}\\.\\d)',recordID)\n genome_name = str(match.group())\n\n #Look for ref, gb, emb or dbj followed by the possible genome name\n elif re.search('(ref\\|\\w{2}_\\w{6}|gb\\|\\w{8}|emb\\|\\w{8}|dbj\\|\\w{8})',recordID):\n match = re.search('(ref\\|\\w{2}_\\w{6}|gb\\|\\w{8}|emb\\|\\w{8}|dbj\\|\\w{8})',recordID)\n match = str(match.group())\n genome_name = match.split('|')[1]\n\n #Look for gi followed by the possible genome name\n elif re.search('gi\\|\\d{8}', recordID):\n match = re.search('gi\\|\\d{8}', recordID)\n match = str(match.group())\n genome_name = match.split('|')[1]\n #Assign the file name as genome name\n else:\n genome_name = filename\n\n if recordID not in FILENAMES:\n FILENAMES[recordID] = filename\n if filename not in GENOMENAMES:\n GENOMENAMES[filename] = recordID\n\n return genome_name", "def _get_datafile_name(self, field_name, saveformat, timestep):\n # These formats produce a new file each time\n counted_formats = ('xml', 'xml.gz')\n\n metadata = {}\n\n # Make filename, with or without save count in name\n if saveformat in counted_formats:\n filename = \"%s%d.%s\" % (field_name, timestep, saveformat)\n # If we have a new filename each time, store the name in metadata\n #metadata = [('filename', filename)]\n metadata['filename'] = filename\n elif saveformat == \"shelve\":\n filename = \"%s.%s\" % (field_name, \"db\")\n else:\n filename = \"%s.%s\" % (field_name, saveformat)\n if saveformat == 'hdf5':\n metadata['dataset'] = field_name+str(timestep)\n\n savedir = self.get_savedir(field_name)\n fullname = os.path.join(savedir, filename)\n return fullname, metadata", "def parse_modelname(string,labellist,ensemblesfolder):\n ## We need to account for two different prefixes now. \n split_ens_temp = ensemble_template.split(\"{f}\")\n template_prefix = split_ens_temp[0]\n\n template_seedind = split_ens_temp[1].split(\"{s}\")[0]\n if string.startswith(template_prefix): ## TODO or other prefix\n frames,seedext = string.split(template_prefix)[-1].split(template_seedind)\n seed=seedext.split(\"results.json\")[0]\n return {\"name\":string,\n \"frames\":int(frames),\n \"seed\":int(seed),\n \"template\":ensemble_template,\n \"outliers\":determine_outliers(labellist,int(seed),int(frames)),\n }", "def _generate_raw_file_name(self, well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def whisper_filename(self):\r\n source_name = self.source_id and self.source.name or ''\r\n return get_valid_filename(\"{0}__{1}.wsp\".format(source_name,\r\n self.name))", "def __extractFileName(self, line):\n f = line.split(None, 1)[1]\n f = f.rsplit(None, 6)[0]\n if f == \"/dev/null\":\n f = \"__NULL__\"\n else:\n f = f.split(\"/\", 1)[1]\n return f", "def bandname(self):\n if self._properties['bandname'] is None:\n self._properties['bandname'] = \"fuv\" if \"-fd-\" in self.filename else \"nuv\" if \"-nd-\" in self.filename \\\n else \"unknown\"\n return self._properties['bandname']", "def parse_filename(cls, filename):\n #from nose.tools import set_trace; set_trace()\n m = re.match(cls._pattern, os.path.basename(filename))\n basename = m.group(1)\n bandname = cls._bandmap.get(m.group(2), m.group(2))\n return basename, bandname", "def _get_output_filename(dataset_dir, split_name):\n return '%s/%s*.tfrecord' % (dataset_dir, split_name)", "def frame_string(path):\n filename = os.path.split(path)[1]\n return os.path.splitext(filename)[0]", "def _get_seq_filename(self):\n fnd = self._get_session_dir()\n self.seq_number += 1\n fn = os.path.join(fnd, 'S%4.4d.tif' % self.seq_number)\n return fn", "def _get_output_filename(dataset_dir, split_name):\n return '%s/cifar100_%s.tfrecord' % (dataset_dir, split_name)", "def getNoteFileName(self, show, sequence, id):\n idPadded = self.__getPaddedId(id)\n fileName = Mode(show, sequence).get(\"[noteBaseName]\", {\"id\":idPadded})\n\n# log(\"getNoteFileName id: %s fileName: %s\" % (id, fileName))\n\n return fileName", "def parse_path(mode, image_type, is_label):\n suffix = \"labels\" if is_label else (\"images\" if image_type == ImageType.DIGIT else \"\")\n if image_type == ImageType.DIGIT:\n return f\"data/digitdata/{mode.path_infix}{suffix}\"\n if mode == Mode.TRAINING:\n return f\"data/facedata/facedatatrain{suffix}\"\n return f\"data/facedata/facedata{mode.path_infix}{suffix}\"", "def __get_parsed_video_file_path(season: int, episode: int) -> str:\n return rootpath.detect() + \"/\" + SAVE_FOLDER + \"s\" + str(season) + \"e\" + str(episode) + \".data\"", "def sas_file(self):\n\n return os.path.normpath(self.path +'\\\\'+ cfg_dict['format_pgm'])", "def _getfilename(self):\n pass", "def get_res_img_fname(base_fname, t_ref, t_float, trsf_types):\n if isinstance(trsf_types, str):\n trsf_types = [trsf_types]\n base_fname, ext = splitext_zip(base_fname)\n if ext == \"\":\n ext = '.inr'\n else:\n if ext==\".lsm\" or ext not in POSS_EXT:\n ext = '.inr'\n compo_trsf = '_o_'.join(trsf_types)\n return base_fname + \"-T{}_on_T{}-{}{}\".format(t_float, t_ref, compo_trsf, ext)", "def ConvertFileName(cls,infile,band):\r\n try:\r\n import os\r\n except:\r\n raise ImportError(\"Can not find module os\")\r\n try:\r\n base = str.split(infile,\"_metadata.xml\")[0]\r\n print base\r\n ext=\"_band\"+str(band)+\".ntf\"\r\n outfile=base+ext\r\n return outfile\r\n except:\r\n raise ImportError(\"Can not covert file names\")", "def Fragment_a_spin_file_name(self):\n return self.Fragment_a_spin_file", "def imagefile(self):\n if self.__filetype==\"flatWarp\" :\n ext = \".fw\"\n elif self.__filetype==\"camWarp\" :\n ext = \".camWarp\"\n elif self.__filetype==\"raw\" :\n ext = \".Data.dat\"\n else :\n raise ValueError(f\"requested file type {self.__filetype} not recognized\")\n\n return self.__imagefolder/self.file.replace(\".im3\", ext)", "def name_woext(self):\n return os.path.splitext(self._job)[0]", "def get_filename(self) -> str:\n fname = self.url.split(\"/\")[-1]\n if \",\" in fname:\n _fname, _i = fname.split(\",\")\n _split_fname = _fname.split(\".\")\n _name = _split_fname[0]\n _extension = _split_fname[-1]\n return _name + _i + \".\" + _extension\n else:\n return fname", "def get_nomenclature_segmentation_name(czi_fname, nomenclature_file, channel_name='PI', ext='.inr.gz'):\n # - Read NOMENCLATURE file defining naming conventions:\n return get_nomenclature_channel_fname(czi_fname, nomenclature_file, channel_name+'_segmented', ext)", "def parse_path_experiment(proto_path, instance, model_path):\n\n # split the file name into parts\n name_parts = proto_path.split('_')\n # get the base path for the current model\n base_path = eval('instance.request.user.profile.'+model_path)\n # get the different parameters from the model\n # get the date\n date = datetime.datetime.strptime('_'.join(name_parts[:6]), '%m_%d_%Y_%H_%M_%S')\n date = date.replace(tzinfo=pytz.UTC)\n # get whether there was imaging\n if not (any(el in proto_path for el in ['nomini', 'nofluo'])):\n if 'WF' in proto_path:\n imaging = 'wirefree'\n elif 'UC3' in proto_path:\n imaging = 'uc3'\n elif 'UC4' in proto_path:\n imaging = 'uc4'\n else:\n imaging = 'doric'\n else:\n imaging = 'no'\n # initialize list for second animal coordinates\n is_animal2 = False\n # set the position counter\n animal_last = 8\n # define the rig\n if name_parts[6] in ['miniscope', 'social', 'other', 'VPrey', 'VScreen',\n 'ARPrey', 'VTuning', 'VWheel', 'VTuningWF', 'VWheelWF']:\n # set the rig variable\n rig = name_parts[6]\n # if the rig is social, set the second animal flag to true\n if rig == 'social':\n is_animal2 = True\n # increase the counter\n animal_last += 1\n else:\n rig = 'VR'\n\n # if imaging is detected, create the paths for fluo and tif\n if imaging != 'no':\n # define the calcium data path\n fluo_path = join(base_path, proto_path + '_calcium.hdf5')\n tif_path = join(base_path, proto_path + '.tif')\n else:\n fluo_path = ''\n tif_path = ''\n # get the animal\n animal = Mouse.objects.get(mouse_name='_'.join(name_parts[animal_last-2:animal_last+1]))\n # get the second animal if present\n if is_animal2:\n animal2 = Mouse.objects.get(mouse_name='_'.join(name_parts[animal_last+1:animal_last + 3]))\n animal_last += 3\n else:\n animal2 = ''\n # increase the counter\n animal_last += 1\n # get the result\n result = name_parts[animal_last]\n # increase the counter\n animal_last += 1\n # check if there is a lighting condition\n if (len(name_parts) > animal_last) and (name_parts[animal_last] in ['dark']):\n lighting = name_parts[animal_last]\n # increase the counter\n animal_last += 1\n else:\n lighting = 'normal'\n\n # add any extra info as notes\n if len(name_parts) > animal_last:\n notes = '_'.join((name_parts[animal_last:]))\n else:\n # if not, add blank to keep it searchable\n notes = 'BLANK'\n\n # define the path for the bonsai file\n bonsai_path = join(base_path, proto_path + '.csv')\n # define the path for the avi file\n avi_path = join(base_path, proto_path + '.avi')\n # define the path for the tracking and sync file depending on date\n track_path = join(base_path, proto_path + '.txt')\n # define the path for the h5 file from unity\n screen_path = join(base_path, proto_path + '.h5')\n # define the path for the sync file\n sync_path = proto_path\n # define the path for the dlc file\n dlc_path = join(base_path, proto_path + '_dlc.h5')\n if rig == 'miniscope':\n sync_path = sync_path.replace('_miniscope_', '_syncMini_')\n elif rig == 'VPrey':\n sync_path = sync_path.replace('_VPrey_', '_syncVPrey_')\n elif rig == 'VScreen':\n sync_path = sync_path.replace('_VScreen_', '_syncVScreen_')\n elif rig == 'ARPrey':\n sync_path = sync_path.replace('ARPrey', '_syncARPrey_')\n elif rig == 'VTuning':\n sync_path = sync_path.replace('_VTuning_', '_syncVTuning_')\n elif rig == 'VWheel':\n sync_path = sync_path.replace('_VWheel_', '_syncVWheel_')\n elif rig == 'VTuningWF':\n sync_path = sync_path.replace('_VTuningWF_', '_syncVTuningWF_')\n elif rig == 'VWheelWF':\n sync_path = sync_path.replace('_VWheelWF_', '_syncVWheelWF_')\n else:\n sync_path = sync_path[:19] + '_syncVR' + sync_path[19:]\n sync_path = join(base_path, sync_path) + '.csv'\n\n new_entry = {'owner': instance.request.user,\n 'mouse': animal,\n 'date': date,\n 'result': result,\n 'lighting': lighting,\n 'rig': rig,\n 'imaging': imaging,\n 'bonsai_path': bonsai_path,\n 'avi_path': avi_path,\n 'track_path': track_path,\n 'sync_path': sync_path,\n 'fluo_path': fluo_path,\n 'tif_path': tif_path,\n 'screen_path': screen_path,\n 'dlc_path': dlc_path,\n 'notes': notes,\n 'animal2': animal2}\n pprint(new_entry)\n return new_entry", "def get_file_format(file):\n flag = None\n with open(file) as f:\n for line in f.readlines():\n MAT, MF, MT = read_control(line)[:3]\n if MF == 1 and MT == 451:\n i = 0\n C, i = read_cont([line], i)\n flag = C.N1\n break\n if flag is None:\n ftype = None\n elif flag == -11 or flag == -12:\n ftype = \"errorr\"\n elif flag == -1:\n ftype = \"gendf\"\n else:\n if C.L1 == 2:\n ftype = \"pendf\"\n else:\n ftype = \"endf6\"\n return ftype", "def band_url(scene, band):\n\n img = scene + '_B' + str(band) + '.TIF'\n url_components = scene.split('_')\n sensor, level, path, row = url_components[0], url_components[5], url_components[2][:3], url_components[2][3:]\n \n return GOOGLE_STORAGE + sensor + '/' + level + '/' + path + '/' + row + '/' + scene + '/' + img", "def Fragment_b_file(self):\n return self.Fragment_b_file_name", "def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))", "def _get_output_filename(dataset_dir):\n return os.path.join(dataset_dir, 'pokemon.tfrecord')", "def name(self):\n\t\tnam = super( textureFile, self ).name\n\t\tif self.hasUdim:\n\t\t\ttry:\n\t\t\t\treturn nam[:nam.rindex( '.' )]\n\t\t\texcept:\n\t\t\t\treturn nam\n\t\treturn nam", "def _get_abgp_file_basename(OPTIONS):\n if OPTIONS.target:\n try:\n num_loci = OPTIONS.selected_num_loci\n except:\n num_loci = len(OPTIONS.loci) + len(OPTIONS.dnafiles)\n return \"%s.%s%sSL.\" % (ABFGP_VERSION,OPTIONS.target,num_loci)\n else:\n return \"%s.\" % ABGP_VERSION", "def get_filename(self):\n name, ext = self.fkit.filename.rsplit('.', 1)\n if self._field.extension():\n ext = self._field.extension()\n return '.'.join((name, ext))", "def _file_name(self, dtype_out_time, extension='nc'):\n out_lbl = utils.io.data_out_label(self.intvl_out, dtype_out_time,\n dtype_vert=self.dtype_out_vert)\n in_lbl = utils.io.data_in_label(self.intvl_in, self.dtype_in_time,\n self.dtype_in_vert)\n ens_lbl = utils.io.ens_label(self.ens_mem)\n yr_lbl = utils.io.yr_label((self.start_date.year, self.end_date.year))\n return '.'.join(\n [self.name, out_lbl, in_lbl, self.model.name,\n self.run.name, ens_lbl, yr_lbl, extension]\n ).replace('..', '.')", "def dna_fref(self):\n return os.path.basename( self.dirname )", "def get_bf_name(qcowf, backing_file_offset_start, backing_file_size):\n if int(backing_file_offset_start) == 0:\n return -1 #if backing missed\n else:\n int_bf_offset = int(backing_file_offset_start)\n int_bf_size = int(backing_file_size)\n\n qcowf.seek(int_bf_offset)\n info = qcowf.read(int_bf_size) #read all backing file bytes\n info = struct.unpack(str(int_bf_size)+'s', info) #unpack bf info\n return str(info[0])", "def Fragment_b_spin_file_name(self):\n return self.Fragment_b_spin_file", "def _recordIsWGS(recordStr) :\n lines = recordStr.split(\"\\n\")\n WGS_lines = [x for x in lines if x.startswith(\"WGS \")]\n if len(WGS_lines) == 1 :\n return WGS_lines[0]\n elif len(WGS_lines) == 0 :\n return False\n else :\n raise Exception(\"Several lines starting with \\\"WGS \\\" in a GenBank record\")", "def _retrosheet_filename(game_id, data_root):\n # game id is TTTYYYYMMDDN.\n team = game_id[:3]\n year = game_id[3:7]\n file_pattern = year + team + \".EV*\"\n file_path = os.path.join(data_root, \"retrosheet\", year, file_pattern)\n file_matches = glob.glob(file_path)\n return file_matches[0] if len(file_matches) else None", "def get_chip_fname_fmt(ibs=None, suffix=None):\n if suffix is None:\n chip_cfg = ibs.cfg.chip_cfg\n chip_cfgstr = chip_cfg.get_cfgstr() # algo settings cfgstr\n chip_cfgfmt = chip_cfg['chipfmt'] # png / jpeg (BUGS WILL BE INTRODUCED IF THIS CHANGES)\n suffix = chip_cfgstr + chip_cfgfmt\n # Chip filenames are a function of annotation_rowid and cfgstr\n _cfname_fmt = ('aid_%d' + suffix)\n return _cfname_fmt", "def get_validation_file_name(self):\n name = self.test_name + \" (T\" + str(self.test_index) + \"_P\" + str(self.parameters_common_index) + \".\" + \\\n str(self.parameters_fs_index) + \".\" + \\\n str(self.parameters_helper_index) + \".\" + \\\n str(self.parameters_incremental_index)\n\n if self.replay_source is not None:\n name = name + \"_\"+ self.replay_source\n\n if self.helper_decoders_one_class:\n name = name + \"_1\"\n\n name = name + \")\"\n\n return name", "def get_record_name(extension, presentation=None, filename=None, path=\".\"):\r\n if presentation is not None:\r\n recordname = make_record_name(presentation)\r\n elif filename is not None:\r\n recordname = filename\r\n else:\r\n return None\r\n\r\n count = 0\r\n tempname = recordname\r\n\r\n # Add a number to the end of a duplicate record name so we don't\r\n # overwrite existing files\r\n while(os.path.exists(os.path.join(path, \"%s.%s\" % (tempname, extension)))):\r\n tempname = \"{0}-{1}\".format(recordname, count)\r\n count += 1\r\n\r\n recordname = \"%s.%s\" % (tempname, extension)\r\n\r\n return recordname", "def get_filename_from_stage(stage: str, device: TorchDevice) ->str:\n if stage not in [PREPROCESSOR, PREDICTOR, POSTPROCESSOR]:\n raise ValueError(f'Invalid stage: {stage}.')\n if stage == PREDICTOR:\n return f'inference_{stage}-{device}.pt'\n else:\n return f'inference_{stage}.pt'", "def hap_filename(self, filetype):\n if filetype == 'events':\n return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)\n # return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'aeff':\n return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'edisp':\n return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'psf_3gauss':\n return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)\n else:\n raise ValueError('Invalid {} {}'.format(filetype))", "def hap_filename(self, filetype):\n if filetype == 'events':\n return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)\n # return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'aeff':\n return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'edisp':\n return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'psf_3gauss':\n return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)\n else:\n raise ValueError('Invalid {} {}'.format(filetype))", "def hap_filename(self, filetype):\n if filetype == 'events':\n return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)\n # return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'aeff':\n return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'edisp':\n return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'psf_3gauss':\n return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)\n else:\n raise ValueError('Invalid {} {}'.format(filetype))", "def filename(self):\n return os.path.basename(self._spatial_filename)", "def GetFileName(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF2_GetFileName(self)", "def t9_loadSMFile(self):\n print \"spectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"data/\" + self.filenameparser(filename)\n orgin_file = self.filenameparser(filename).split('.')[0]\n self.t9_smfilename = filename\n self.t9_orginfilename = orgin_file", "def get_format(fstr):\n fstr = fstr.lower() # support uppercase letters\n if os.sep in fstr:\n fstr = fstr.split(os.sep)[-1]\n try:\n fname, ext = fstr.split(\".\", 1)\n except:\n fname, ext = (\"\", \"\")\n\n if ext.startswith(\"bm\"):\n return FORMAT_BMP\n elif ext == \"txt\":\n return FORMAT_CAR\n elif ext == \"ta.csv\":\n return FORMAT_TA_CSV\n elif ext == \"fin\":\n return FORMAT_FIN\n elif ext == \"hul\":\n return FORMAT_HUL\n elif ext in [\"ncp\"]:\n return FORMAT_NCP\n elif ext in [\"prm\", \"m\"]:\n return FORMAT_PRM\n elif ext == \"rim\":\n return FORMAT_RIM\n elif ext == \"w\":\n return FORMAT_W\n else:\n return FORMAT_UNK", "def _get_station_filename():\n output_dir = os.path.join(output, state, station)\n if not os.path.isdir(output_dir):\n logger.debug(\"Creating directory %s\", output_dir)\n os.makedirs(output_dir)\n return os.path.join(output_dir, \"%s.%s\" % (c_time, format))", "def file(self):\n if photos_settings.FORMATED_PHOTO_FILENAME is not None:\n return photos_settings.FORMATED_PHOTO_FILENAME(self)\n source_file = path.split(self.photo.image.name)\n return path.join(source_file[0], str(self.format.id) + '-' + source_file[1])", "def filename(self):\n return '%s%s' % (self.identifier, self.extension)", "def _parse_ext(raw_fname, verbose=False):\n fname, ext = os.path.splitext(raw_fname)\n # BTi data is the only file format that does not have a file extension\n if ext == '' or 'c,rf' in fname:\n if verbose is True:\n print('Found no extension for raw file, assuming \"BTi\" format and '\n 'appending extension .pdf')\n ext = '.pdf'\n # If ending on .gz, check whether it is an .nii.gz file\n elif ext == '.gz' and raw_fname.endswith('.nii.gz'):\n ext = '.nii.gz'\n fname = fname[:-4] # cut off the .nii\n return fname, ext", "def get_filename(key):\n filename = str(key)\n filename = filename.replace('/', '_')\n filename = filename.replace('InceptionResnetV2_', '')\n\n # remove \"Repeat\" scope from filename\n filename = re_repeat.sub('B', filename)\n\n if re_block8.match(filename):\n # the last block8 has different name with the previous 9 occurrences\n filename = filename.replace('Block8', 'Block8_10')\n elif filename.startswith('Logits'):\n # remove duplicate \"Logits\" scope\n filename = filename.replace('Logits_', '', 1)\n\n # from TF to Keras naming\n filename = filename.replace('_weights', '_kernel')\n filename = filename.replace('_biases', '_bias')\n\n return filename + '.npy'", "def dataset_part_filename(dataset_part, num_data):\n if num_data >= 0:\n return '{}_data_{}.npz'.format(dataset_part, str(num_data))\n return '{}_data.npz'.format(dataset_part)", "def GetFileName(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF3_GetFileName(self)", "def title(self):\n if self.file_name is None:\n return None\n else:\n fname = os.path.split(self.file_name)[-1]\n fname, *ext = fname.rsplit('.', 1)\n procgen = ext and ext[0] in ('json', 'yaml')\n if procgen and self._seed and self._seed.spawn_key:\n # Append the spawn key as the episode number\n fname += '-e' + str(self._seed.spawn_key[-1])\n return fname", "def _get_file_path(self, epoch, logs):\n # pylint: disable=protected-access\n if not self.model._in_multi_worker_mode(\n ) or multi_worker_util.should_save_checkpoint():\n try:\n # `filepath` may contain placeholders such as `{epoch:02d}` and\n # `{mape:.2f}`. A mismatch between logged metrics and the path's\n # placeholders can cause formatting to fail.\n return self.filepath.format(epoch=epoch + 1, **logs)\n except KeyError as e:\n raise KeyError('Failed to format this callback filepath: \"{}\". '\n 'Reason: {}'.format(self.filepath, e))\n else:\n # If this is multi-worker training, and this worker should not\n # save checkpoint, we use a temp filepath to store a dummy checkpoint, so\n # it writes to a file that will be removed at the end of `_save_model()`\n # call. This is because the SyncOnReadVariable needs to be synced across\n # all the workers in order to be read, and all workers need to initiate\n # that.\n self._temp_file_dir = tempfile.mkdtemp()\n extension = os.path.splitext(self.filepath)[1]\n return os.path.join(self._temp_file_dir, 'temp' + extension)", "def data_fname(self):\n # NOTE: second argument of rsplit() is 'maxsplit'\n return self.fname.rsplit('.', 1)[0] + '.dat'", "def epoch_filepath(epoch, source):\n full_source = full_source_labels[source]\n filename = f'{full_source}_{epoch}.dat'\n return os.path.join(OBS_DATA_PATH, source, filename)", "def _get_available_wav_basename(label, basedir):\n cont = 0\n label = os.path.join(basedir, label)\n wav_name = label + \".wav\"\n if os.path.exists(wav_name):\n while True: # search an inexistent name for new gmm\n wav_name = label + \"\" + str(cont) + \".wav\"\n if not os.path.exists(wav_name):\n break\n cont = cont + 1\n else:\n open(label+\".wav\",'w').close()\n return label\n open(label+str(cont)+\".wav\",'w').close()\n return label + str(cont)\n #end _get_available_wav_basename", "def reFileName(str_):\n rv = 'None', str_\n m = re.match(r'((?:[a-zA-Z0-9-]){4,})_(.*)$', str_)\n if m:\n rv = m.group(1), m.group(2)\n else:\n m = re.match(r'(\\d+-\\d+)\\.-\\.(.*)$', str_)\n if m:\n rv = m.group(1), m.group(2)\n return rv", "def get_url_from_era_def(_era,is_signal,maod_version): \n var_format_val=10\n\n GD_File='config/GoogleDocLink'+maod_version+'.txt'\n if is_signal:\n GD_File='config/GoogleDocLinkSignal'+maod_version+'.txt'\n\n GD_File_READ = open (GD_File,\"r\")\n\n for line in GD_File_READ:\n if len(line.split()) ==2 :\n if line.split()[0] == _era:\n GD_File_READ.close()\n return line.split()[1]\n print ('Error in assigning GD page from era')\n return '-11111'", "def getSlavename():", "def get_filename(label):\n return op.splitext(op.splitext(op.basename(label))[0])[0]", "def get_run(fname, full=False):\n basename = path.basename(fname)\n run = \"\"\n if \"GA_\" in basename:\n if \"_S_\" in basename:\n # example: GA_za05to35_8_1743990to1745989_S_wr_2.root\n if \"_S_\" in basename:\n run = basename.split(\"_\")[3]\n # GA_M1_za35to50_8_1740993_Y_wr.root\n else:\n run = basename.split(\"_\")[4]\n elif (\"_Y_\" in basename) or (\"_I_\" in basename):\n # 20161008_M1_05057440.005_Y_CrabNebula-W0.40+215.root or\n # eventfiltered_20161008_05057440.005_Y_CrabNebula-W0.40+215.root\n runid = basename.split(\"_\")[2]\n if full is False:\n run = runid.split(\".\")[0]\n else:\n run = runid\n elif (\"_S_\" in basename) or (\"_Q_\" in basename):\n run = basename.split(\"_\")[1]\n else:\n raise NotImplementedError(f\"Wrong filename: {fname}, runname retrieval is undefined\")\n return run", "def get_file_name(x, feature_name, ext='npy'):\n # this is kind-of standard\n name = '.'.join(x.split('.')[:-1])\n filename = '{}.{}.{}'.format(name, feature_name, ext)\n return filename", "def filename(self):\n _, tail = os.path.split(self.url)\n return self.folder + '/' + tail[:-4] + '/' + tail[:-3] + 'shp'", "def which_band_is_file(filename):\n if not is_wise_file(filename):\n return None\n if \"-w1-\" in filename: return \"wisew1\"\n elif \"-w2-\" in filename: return \"wisew2\"\n elif \"-w3-\" in filename: return \"wisew3\"\n elif \"-w4-\" in filename: return \"wisew4\"\n else: return \"unknown\"", "def filename(self, age, metal, imf=None):\n imf = 1.3 if imf is None else imf\n msign = \"p\" if metal >= 0. else \"m\"\n azero = \"0\" if age < 10. else \"\"\n fname = \"Ebi{0:.2f}Z{1}{2:.2f}T{3}{4:02.4f}_iTp0.00_baseFe.fits\".format(\n imf, msign, abs(metal), azero, age)\n return os.path.join(self.data_dir, fname)", "def Fragment_a_file(self):\n return self.Fragment_a_file_name", "def get_filename(\n self,\n name,\n ext=\".npz\",\n map_tag=None,\n iter_index=None,\n extra_tag=None,\n bp_opts=False,\n ):\n if self.output_root is None:\n return None\n\n if bp_opts:\n if self.ensemble_mean:\n name = \"{}_mean\".format(name)\n elif self.ensemble_median:\n name = \"{}_median\".format(name)\n elif self.sim_index is not None:\n name = \"{}_sim{:04d}\".format(name, self.sim_index)\n if self.signal_type_sim:\n name = \"{}_{}\".format(name, self.signal_type_sim)\n if self.noise_type_sim:\n name = \"{}_{}\".format(name, self.noise_type_sim)\n else:\n if self.data_type != \"raw\":\n name = \"{}_{}\".format(name, self.data_type)\n if getattr(self, \"template_cleaned\", False):\n name = \"{}_clean_{}\".format(name, self.template_type)\n if getattr(self, \"planck_sub\", False):\n name = \"{}_planck_sub\".format(name)\n if self.weighted_bins:\n name = \"{}_wbins\".format(name)\n if getattr(self, \"return_cls\", False):\n name = \"{}_cl\".format(name)\n\n if map_tag is not None:\n name = \"{}_map_{}\".format(name, map_tag)\n if iter_index is not None:\n name = \"{}_iter{:03d}\".format(name, iter_index)\n if extra_tag is not None:\n name = \"{}_{}\".format(name, extra_tag)\n\n tag = \"_{}\".format(self.output_tag) if self.output_tag else \"\"\n if not ext.startswith(\".\"):\n ext = \".{}\".format(ext)\n return os.path.join(self.output_root, \"{}{}{}\".format(name, tag, ext))", "def filename_type(filename):\n import re\n\n nii_re = re.compile(\".+(nii.gz)$|.+(nii)$\")\n npy_re = re.compile(\".+(npy)$|.+(npz)$\")\n\n\n if len(nii_re.findall(filename)):\n return 'nii'\n elif len(npy_re.findall(filename)):\n return 'npy'\n return None", "def get_flowgram_ali_exe():\r\n return \"FlowgramAli_4frame\"", "def get_nomenclature_channel_fname(czi_fname, nomenclature_file, channel_name, ext='.inr.gz'):\n # - Read NOMENCLATURE file defining naming conventions:\n n_names = get_nomenclature_name(nomenclature_file)\n return n_names[czi_fname]+\"/\", n_names[czi_fname] + \"_\" + channel_name + ext", "def generate_file_name(well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def mk_filename(pattern, queryresponserow, resp, url):\n name = None\n if resp:\n cdheader = resp.headers.get(\"Content-Disposition\", None)\n if cdheader:\n _, params = parse_header(cdheader)\n name = params.get('filename', \"\")\n # Work around https://github.com/sunpy/sunpy/issues/3372\n if name.count('\"') >= 2:\n name = name.split('\"')[1]\n\n # This is a hack to to prevent IRIS data from being labelled as XML files\n if name is None and \"VOEvent_IRIS\" not in queryresponserow['fileid']:\n # Advice from the VSO is to fallback to providerid + fileid for a filename\n # As it's possible multiple providers give the same fileid.\n # However, I haven't implemented this yet as it would be a breaking\n # change to the filenames we expect.\n fileid = queryresponserow['fileid']\n\n # Some providers make fileid a path\n # Some also don't specify a file extension, but not a lot we can do\n # about that.\n name = fileid.split(\"/\")[-1]\n\n # If somehow we have got this far with an empty string, fallback to url segment\n if not name:\n name = url.split('/')[-1]\n\n # Remove any not-filename appropriate characters\n name = slugify(name)\n\n # If absolutely everything else fails make a filename based on download time\n if not name:\n name = f\"vso_file_{datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}\"\n\n fname = pattern.format(file=name,\n **queryresponserow.response_block_map)\n\n return fname", "def GetFileName(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD2_GetFileName(self)", "def filename(self, *args) -> \"PyObject *\":\n return _ida_fpro.qfile_t_filename(self, *args)", "def eval(self):\n if self.magic:\n return self.magic\n if not self.filename:\n return file_pattern.format(self.alias)\n return self.path", "def filename(self, p: Position) -> Optional[str]:\n kind = self.kind\n h = p.h.rstrip()\n if kind == 'adoc':\n m = self.adoc_pattern.match(h)\n if m:\n prefix = m.group(1)\n return h[1 + len(prefix) :].strip()\n return None\n if kind in ('pandoc', 'sphinx'):\n prefix = f\"@{kind}\"\n if g.match_word(h, 0, prefix):\n return h[len(prefix) :].strip()\n return None\n g.trace('BAD KIND', kind)\n return None", "def getFileName(self, date, ext='nc4'):\n\n if self.collection is None:\n raise Exception('Invalid collection, check data exists!')\n if ext[0] == '.':\n ext = ext[1:]\n \n datefmt = '%Y%m'\n if self._F in (1, 3, 6, 'D'): # For all frequencies less than or equal to one (1) day\n datefmt += '%d'\n\n dstr = date.strftime( datefmt )\n tmp = [self.collection, dstr, ext]\n if self._type == 'M2':\n tmp = [ getMerraStream(date) ] + tmp \n return '.'.join( tmp )", "def _get_filename_from_url(self) -> Optional[str]:\n file_name_portion = None\n\n right_portion = self.url.rsplit(\"/\", 1)\n if len(right_portion) == 2:\n # split any potential query params - these start with \"?\"\"\n file_name_portion = right_portion[1].split(\"?\")[0].strip()\n\n if len(file_name_portion) == 0:\n file_name_portion = None\n\n return file_name_portion", "def _filename(self, corotid):\n from datasource import DataSource\n self.corotid = corotid\n self.corot = DataSource(database='corot', user='sro', host='pina.aip.de')\n \n query = \"\"\"SELECT run_code, hlfccdid, win_id \n FROM corot \n WHERE corotid = %d;\"\"\" % self.corotid\n result = self.corot.query(query)\n \n par = {'run': result[0][0],\n 'half': result[0][1].rstrip('RL'), \n 'win': result[0][2]}\n filename = '/work2/jwe/CoRoT/%(run)s/data/%(run)s_%(half)s_%(win)04d.fits' % par\n logger.info('%d = %s' % (corotid,filename))\n return filename", "def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n #return Path(dir) / filename\n return filename", "def file(self):\n\n dlos_filename = super(DlosPhotoz, self).file()\n\n photoz_str = 'DLOS_photoz_'\n \n file_name = photoz_str.join( \n dlos_filename.split('DLOS_')\n ) \n\n return file_name", "def _get_job_name(path, beam):\n file_parts = os.path.splitext(os.path.basename(MADX_TEMPLATE))\n out_parts = [file_parts[0].replace(\"template\", \"job\"),\n \"b{:d}\".format(beam),\n file_parts[1].strip(\".\")]\n return os.path.join(path, \".\".join(out_parts))", "def _detect_format(directory):\n if os.path.exists(os.path.join(directory, 'solution.nc')):\n return 'netcdf'\n else:\n return 'csv'", "def ez_filename(self, ez):\n return ez.index + '_' + ez['TAXPER'] + '_990EZ'", "def GetModelName(filename, model):\n\n is_srn_model = translator.IsSrnModel(model)\n if(is_srn_model):\n model_name = filename + \"SrnModel\"\n else:\n model_name = filename + \"CellCycleModel\"\n\n return model_name", "def GetFileName(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD3_GetFileName(self)" ]
[ "0.58643293", "0.57457685", "0.5731537", "0.5680883", "0.5600236", "0.55516845", "0.55441797", "0.55333227", "0.5529272", "0.55155426", "0.54681367", "0.5426146", "0.541352", "0.5407", "0.54030865", "0.537808", "0.5370207", "0.53593916", "0.53360903", "0.5330803", "0.5321981", "0.53100234", "0.5309468", "0.5305212", "0.52963525", "0.527246", "0.52604026", "0.5250942", "0.5226428", "0.5224549", "0.52125424", "0.51932955", "0.51880443", "0.518767", "0.51755035", "0.5171913", "0.5168858", "0.51662296", "0.51620865", "0.515999", "0.5151132", "0.5131518", "0.5125167", "0.5111184", "0.510613", "0.50992435", "0.50867003", "0.50866896", "0.5084397", "0.5080228", "0.50753427", "0.5070971", "0.5070971", "0.5070971", "0.5053173", "0.504666", "0.5038532", "0.5033107", "0.5032305", "0.50292903", "0.5026008", "0.50255346", "0.5024544", "0.5018986", "0.5015994", "0.5012667", "0.50041115", "0.5002144", "0.5000751", "0.49951795", "0.49888742", "0.4988823", "0.49825296", "0.497752", "0.49764904", "0.49738005", "0.49726954", "0.4966619", "0.49649405", "0.49603638", "0.4953223", "0.49513504", "0.49458227", "0.4936939", "0.4923021", "0.49216336", "0.4916135", "0.49159968", "0.4913425", "0.49117437", "0.4900672", "0.48960835", "0.4892797", "0.48916903", "0.48809606", "0.4878049", "0.48759288", "0.48723742", "0.4867585", "0.48585546" ]
0.6297846
0
Pipeline for process of scraping new data Each step in the pipeline has corresponding directory of plugins. Plugins are dynamically loaded based on files in the corresponding dir.
def scrape_pipeline(args): kickoff = args.kickoff fname = args.fname d = DbHelper() s = Scraper() c = Crawler(20) if fname is not None: app_names = pd.read_csv(fname)['packageName'].tolist() apps = [list(a) for a in zip(app_names, d.app_names_to_uuids(app_names))] else: apps = None # start by updating top apps if not args.skip_top: logger.info("getting top apps...") new_top_list = c.get_top_apps_list() logger.info("scraping top apps not in DB...") s.scrape_missing(new_top_list, compare_top=True) logger.info("updating top apps...") d.update_top_apps(new_top_list) if kickoff == True: s = None if fname is None: # use crawler to get list of package names logger.error("Crawler for package names not implemented yet") return else: # use specified file of package names s = Scraper(input_file=fname) # use scraper logger.info("Starting efficient scrape...") s.efficient_scrape() logger.info("...efficient scrape done") else: # use updater logger.info("Starting updater...") if fname is None: u = Updater() else: u = Updater(input_file=fname) u.update_apps() logger.info("...update done") # crawl privacy policies c.crawl_app_privacy_policies(app_list=apps) if args.no_decompile: # download only logger.info("Starting download...") downloader = Downloader() if apps is None: downloader.download_all_from_db(top=True) else: downloader.download(apps) logger.info("...done") else: # download/decompile logger.info("Starting download and decompile...") download_decompile_all() logger.info("...download and decompile done") logger.info("run analysis pipeline now")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collectPlugins(self):\n\t\tself.locatePlugins()\n\t\tself.loadPlugins()", "async def load_plugins(self):\n for plug in os.listdir('plugins'):\n if plug.startswith('.'):\n continue\n if not os.path.isdir('plugins/%s' % plug) or not os.path.isfile('plugins/%s/hook.py' % plug):\n self.log.error('Problem locating the \"%s\" plugin. Ensure CALDERA was cloned recursively.' % plug)\n exit(0)\n plugin = Plugin(name=plug)\n if await plugin.load():\n await self.get_service('data_svc').store(plugin)\n if plugin.name in self.config['plugins']:\n plugin.enabled = True\n for plugin in self.config['plugins']:\n plug = await self._services.get('data_svc').locate('plugins', match=dict(name=plugin))\n [await p.enable(self.get_services()) for p in plug]\n self.log.debug('Enabling %s plugin' % plugin)\n\n templates = ['plugins/%s/templates' % p.name.lower()\n for p in await self.get_service('data_svc').locate('plugins')]\n templates.append('templates')\n aiohttp_jinja2.setup(self.application, loader=jinja2.FileSystemLoader(templates))", "def dataComponent_pluginBuild(self, **kwargs):\n str_path = '/plugins'\n self.PT = plugin.Plugin_DS(within = self)\n P = self.PT._pluginTree\n\n for key,val in kwargs.iteritems():\n if key == 'path': str_path = val\n\n s = self.contents\n if s.cd(str_path)['status']:\n s.mknode(['run'])\n s.mkcd('available')\n for d in P.lstr_lsnode('/plugins'):\n s.graft(P, '/plugins/%s' % d)", "def dataComponent_pluginRun(self, **kwargs):\n str_outputPath = '/plugin'\n str_inputPath = '/dataView/files'\n\n str_plugin = 'mri_convert'\n for key,val in kwargs.iteritems():\n if key == 'plugin': str_plugin = val\n if key == 'inputPath': str_inputPath = val\n if key == 'outputPath': str_outputPath = val\n\n s = self.contents\n\n s.cd('/plugins/run')\n rand_date = self.fake.date_time_this_decade()\n str_timestamp = rand_date.isoformat()\n s.mkcd('%s-mri_convert' % str_timestamp)\n\n output = data()\n output.contents_build_1(SeriesFilesCount = 10)\n o = output.contents\n s.graft(o, '/dataView')\n s.graft(o, '/plugins')\n\n # o.tree_metaData_print(False)\n # print(o)\n\n #\n # if s.cd(str_outputPath)['status']:\n # rand_date = self.fake.date_time_this_decade()\n # str_timestamp = rand_date.isoformat()\n # l_run = s.lstr_lsnode()\n # str_newRun = str(len(l_run))\n # s.mkcd(str_newRun)\n # s.touch('timestamp', str_timestamp)\n # s.mknode(['parameters', 'results', 'info'])\n # s.touch('info/detail', {\n # str_plugin: data._dict_plugin[str_plugin]\n # })\n # s.touch('parameters/input', {\n # str_plugin: '<some dictionary of all input parameters>'\n # })\n # s.cd('results')\n # str_outputPath = s.cwd()\n # if str_plugin.lower() != 'pacspull' and str_plugin.lower() != 'mri_convert':\n # self.dataComponent_build(\n # path = s.cwd(),\n # plugin = str_plugin\n # )\n # if str_plugin.lower() == 'mri_convert':\n # inputTree = C_snode.C_stree()\n # inputTree.cd('/')\n # inputTree.graft(s, str_inputPath)\n #\n # self.dataComponent_build(\n # path = str_outputPath,\n # plugin = str_plugin,\n # tree_convertFrom = inputTree,\n # type_convertTo = \"nii\"\n # )", "def request_plugins(self):", "def __load_plugins(self, directory_to_search, plugin_files):\n\n if os.path.abspath(directory_to_search) not in sys.path:\n sys.path.insert(0, os.path.abspath(directory_to_search))\n\n for next_plugin_file in plugin_files:\n next_plugin_module = next_plugin_file[0:-3]\n plugin_class_name = self.__snake_to_camel(next_plugin_module)\n self.__attempt_to_load_plugin(\n next_plugin_module, plugin_class_name, next_plugin_file\n )", "def plugins():\n pass", "def setup(self):\n rc = self.rc\n try:\n for plugin in self.plugins:\n plugin.setup(rc)\n except Exception as e:\n self.exit(e)", "def handle_loadall(bot, ievent):\n plugs.loadall(plugin_packages, force=True)\n ievent.done()", "def run_pipeline(directory):\n\n # io = IO(path)\n # df = io.load_cleaned_file(download_always=False)\n # df = add_choke_events(df)\n\n # Add calls to features.Xxx here\n\n #directory = main_directory\n site=os.listdir(directory)\n site_dicom={}\n site_dicom_sub={}\n site_sub_files={}\n i,k,j=0,0,0\n for filename in site:\n site_dicom[i]=directory+'/'+filename+'/DICOM-raw'\n temporary_path=os.listdir(site_dicom[i])\n\n for another_file in temporary_path:\n site_dicom_sub[j]=site_dicom[i]+'/'+another_file+'/scans'\n temporary_path_1 = os.listdir(site_dicom_sub[j])\n for another_file_1 in temporary_path_1:\n site_sub_files[k]=site_dicom_sub[j]+'/'+another_file_1+'/'\n k=k+1\n j = j + 1\n i=i+1\n splitted={}\n output_mif={}\n for i in range (len(site_sub_files)):\n splitted[i]=site_sub_files[i].split('/')\n output_mif[i]=directory+'/'+splitted[i][5]+'/MIF-raw/'+splitted[i][5]+'_'+splitted[i][7]+'_'+splitted[i][9]+'.mif'\n\n\n # save (or return) dataframe here?\n return site_sub_files,output_mif", "def _import_plugins(self) -> None:\n logger.debug('Importing plugins')\n self._pm = pluggy.PluginManager('sirbot')\n self._pm.add_hookspecs(hookspecs)\n\n for plugin in self.config['sirbot']['plugins']:\n try:\n p = importlib.import_module(plugin)\n except (ModuleNotFoundError, ):\n if os.getcwd() not in sys.path:\n sys.path.append(os.getcwd())\n p = importlib.import_module(plugin)\n else:\n raise\n self._pm.register(p)", "def load_plugins(self):\n self.__doing('load_plugins')\n self.__do_if_not_done('bootstrap')\n if self.env.mode in ('dummy', 'unit_test'):\n return\n for package in self.packages:\n self.add_package(package)", "def _package_plugins(ctx):\n print(\"\\n\\n-- Creating Zip Files \\n\")\n\n project_dir = Path(__file__).parent\n plugins_projects = [\n x for x in (project_dir / \"build/build_directory_for_tests/\").iterdir() if x.is_dir()\n ]\n artifacts_dir = project_dir / \"build/artifacts\"\n\n plugins_zip = project_dir / \"build/plugin_zip\"\n if plugins_zip.exists():\n shutil.rmtree(plugins_zip)\n\n plugins_zip.mkdir()\n\n for project in plugins_projects:\n plugins_dirs = [\n x for x in (project / \"plugin\").iterdir() if x.is_dir() and (x / \"assets\").exists()\n ]\n hm_generator = HookManGenerator(\n hook_spec_file_path=project_dir / f\"tests/plugins/{project.name}/hook_specs.py\"\n )\n\n for plugin in plugins_dirs:\n (plugin / \"artifacts\").mkdir()\n if sys.platform == \"win32\":\n shutil.copy2(src=artifacts_dir / f\"{plugin.name}.dll\", dst=plugin / \"artifacts\")\n else:\n shutil.copy2(src=artifacts_dir / f\"lib{plugin.name}.so\", dst=plugin / \"artifacts\")\n\n hm_generator.generate_plugin_package(\n package_name=plugin.name, plugin_dir=plugin, dst_path=plugins_zip\n )", "def install(ctx, plugins, **kwargs):\n global logger\n logger = ctx.logger\n\n for plugin in plugins:\n ctx.logger.info(\"Installing plugin {0}\".format(plugin['name']))\n\n if plugin['name'] == 'default_workflows':\n # special handling for the default workflows plugin, as it does not\n # currently sit on the file server and is not under a blueprint\n # context\n plugin['url'] = '/opt/manager/cloudify-manager-{}/workflows'\\\n .format(manager_branch)\n install_celery_plugin(plugin)\n continue\n\n if \"folder\" in plugin:\n\n # convert the folder into a url inside the file server\n\n management_ip = get_cosmo_properties()[\"management_ip\"]\n if management_ip:\n plugin[\"url\"] = \\\n \"http://{0}:53229/blueprints/{1}/plugins/{2}.zip\"\\\n .format(management_ip, ctx.blueprint_id, plugin['folder'])\n\n ctx.logger.info(\"Installing plugin from URL --> {0}\"\n .format(plugin['url']))\n install_celery_plugin(plugin)", "def initPlugins(plugins):\r\n method = moduleName + '.initPlugins'\r\n #Graph.logQ.put( [logType , logLevel.DEBUG , method , \"entering\"])\r\n Graph.logQ.put( [logType , logLevel.INFO , method , 'Starting to intialize the plugins'])\r\n \r\n # For general plugins, there are four types declared in the 'pluginType' attribute:\r\n # EngineService - Plugin class extends threading.Thread. Plugin.run() will run for the lifetime of the server\r\n # Service - Plugin class extends threading.Thread. Plugin.run() and Plugin.join() will be called as needed\r\n # Utility - Plugin is normal callable object. Plugin.execute() returns a value and is called as needed\r\n \r\n Graph.logQ.put( [logType , logLevel.INFO , method , '..Initializing service and utility plugins'])\r\n \r\n for pluginKey in plugins:\r\n plugin = plugins[pluginKey]\r\n pluginType = plugin[\"pluginType\"] \r\n name = plugin[\"name\"]\r\n modName = \"Tioga.Plugins.\" + plugin[\"Module\"]\r\n try:\r\n mod = importlib.import_module(modName)\r\n except Exception as e:\r\n #raise e\r\n #debug\r\n #mod = Fileutils.getModuleFromResolvedPath(modName)\r\n pass\r\n \r\n dtParams = plugin[\"PluginParemeters\"]\r\n plugin['module'] = mod\r\n plugin['moduleName'] = moduleName\r\n plugin['params'] = dtParams\r\n\r\n \r\n Graph.logQ.put( [logType , logLevel.INFO , method , '....%s plugin from module %s cataloged as %s' %(pluginType, modName, name)])\r\n if pluginType == \"EngineService\":\r\n engineServices.append(plugin)\r\n elif pluginType == \"ArchiveService\":\r\n archiveServices.append(plugin)\r\n elif pluginType == \"InitService\":\r\n initServices.append(plugin)\r\n elif pluginType == \"Service\":\r\n services[name] = plugin\r\n elif pluginType == \"InitUtility\":\r\n initUtils.append(plugin)\r\n else:\r\n utilities[name] = plugin", "def run(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n for entry in glob.glob(os.path.join(self.data_folder, self.data_expression)):\n f = open(entry)\n text = json.loads(f.read())\n f.close()\n self.create_page_objects(text)", "def download(self):\n opsys, machine = get_platform()\n _platform = f\"{opsys}_{machine}\"\n\n plugin_dir = f\"{self._temp_dir}/terraform-plugins\"\n\n if not os.path.isdir(plugin_dir):\n os.mkdir(plugin_dir)\n for name, details in self._plugins.items():\n uri = get_url(name, details)\n file_name = uri.split(\"/\")[-1]\n\n click.secho(\n f\"getting plugin: {name} version {details['version']} from {uri}\",\n fg=\"yellow\",\n )\n\n with urllib.request.urlopen(uri) as response, open(\n f\"{plugin_dir}/{file_name}\", \"wb\"\n ) as plug_file:\n shutil.copyfileobj(response, plug_file)\n with zipfile.ZipFile(f\"{plugin_dir}/{file_name}\") as zip_file:\n zip_file.extractall(f\"{plugin_dir}/{_platform}\")\n os.remove(f\"{plugin_dir}/{file_name}\")\n\n files = glob.glob(f\"{plugin_dir}/{_platform}/terraform-provider*\")\n for afile in files:\n os.chmod(afile, 0o755)\n filename = os.path.basename(afile)\n if self._tf_version_major >= 13:\n source = PluginSource(name, details)\n host_dir = os.path.join(plugin_dir, source.host)\n namespace_dir = os.path.join(host_dir, source.namespace)\n provider_dir = os.path.join(namespace_dir, name)\n version_dir = os.path.join(provider_dir, details[\"version\"])\n platform_dir = os.path.join(version_dir, _platform)\n os.makedirs(platform_dir, exist_ok=True)\n os.rename(afile, os.path.join(platform_dir, filename))\n else:\n os.rename(afile, f\"{plugin_dir}/{filename}\")\n\n click.secho(f\"plugin installed to: {plugin_dir}/{_platform}/\", fg=\"yellow\")", "def setup_plugins(self) -> None:\n load_success = 0\n load_error = 0\n load_disabled = 0\n\n LOGGER.info(\"Loading plugins...\")\n usable_plugins = plugins.get_usable_plugins(self.settings)\n for name, info in usable_plugins.items():\n plugin_handler, is_enabled = info\n if not is_enabled:\n load_disabled = load_disabled + 1\n continue\n\n try:\n plugin_handler.load()\n except Exception as e:\n load_error = load_error + 1\n LOGGER.exception(\"Error loading %s: %s\", name, e)\n except SystemExit:\n load_error = load_error + 1\n LOGGER.exception(\n \"Error loading %s (plugin tried to exit)\", name)\n else:\n try:\n if plugin_handler.has_setup():\n plugin_handler.setup(self)\n plugin_handler.register(self)\n except Exception as e:\n load_error = load_error + 1\n LOGGER.exception(\"Error in %s setup: %s\", name, e)\n else:\n load_success = load_success + 1\n LOGGER.info(\"Plugin loaded: %s\", name)\n\n total = sum([load_success, load_error, load_disabled])\n if total and load_success:\n LOGGER.info(\n \"Registered %d plugins, %d failed, %d disabled\",\n (load_success - 1),\n load_error,\n load_disabled)\n else:\n LOGGER.warning(\"Warning: Couldn't load any plugins\")", "def load_plugins(hook_plugins, command_plugins):\n for file in find_plugins():\n try:\n module_name = os.path.splitext(os.path.basename(file))[0]\n module = importlib.import_module(PLUGINS_DIR + '.' + module_name)\n for entry_name in dir(module):\n entry = getattr(module, entry_name)\n if not inspect.isclass(entry) or inspect.getmodule(entry) != module:\n continue\n if issubclass(entry, Hook):\n hook_plugins.append(entry())\n elif issubclass(entry, Command):\n command_plugins.append(entry())\n except (ImportError, NotImplementedError):\n continue", "def load(self):\r\n info_files = list()\r\n directory_listing = os.listdir(self.path)\r\n for _file in directory_listing:\r\n if _file.endswith(INFO_FILE_EXTENSION):\r\n info_files.append(_file)\r\n\r\n for filename in info_files:\r\n plugin_name = filename[:-len(INFO_FILE_EXTENSION)]\r\n md = self.metadata_for_plugin(plugin_name)\r\n try:\r\n if md['type'].upper() in PLUGIN_TYPES:\r\n for target in md['targets']:\r\n target = target if isinstance(target, str) else tuple(target)\r\n if not target in self._targets:\r\n self._targets[target] = plugin_name\r\n else:\r\n msg = 'Did not register %s for %s. Plugin %s already registered.' % (plugin_name,\r\n target,\r\n self._targets[target])\r\n self._register_plugin_with_error(plugin_name, msg)\r\n self.log.warn(msg)\r\n self._plugins[plugin_name] = md\r\n self._types[md['type'].upper()].append(plugin_name)\r\n self.log.info('Plugin (%s) loaded...' % plugin_name)\r\n except KeyError as e:\r\n msg = 'Invalid metadata for plugin (%s). Missing entry for %s.' % (plugin_name, e)\r\n self._register_plugin_with_error(plugin_name, msg)\r\n self.log.exception(msg)\r\n except Exception as e:\r\n msg = 'Loading plugin (%s) failed with exception: %s' % (plugin_name, e)\r\n self._register_plugin_with_error(plugin_name, msg)\r\n self.log.exception(msg)", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')", "def load_plugins(self):\n self.log.debug('Loading plugins')\n # Load plugins\n for plugin in self.config['plugins']:\n self.plugins.append({\n 'module': __import__('turbo_hipster.task_plugins.' +\n plugin['name'] + '.task',\n fromlist='turbo_hipster.task_plugins' +\n plugin['name']),\n 'plugin_config': plugin\n })\n self.log.debug('Plugin %s loaded' % plugin['name'])", "def load_plugins():\n\timport imp\n\tglobal plugins\n\n\t# import plugins\n\ti = 1\n\tfor dirname, dirnames, filenames in os.walk(PLUGINS_PATH):\n\t\tfor filename in filenames:\n\t\t\tif filename.startswith(\"plugin-\") and filename.endswith(\".py\"):\n\t\t\t\t# print \"Loading plugin:\", filename\n\t\t\t\tload_path = os.path.join(dirname, filename)\n\t\t\t\tmodule = imp.load_source(\"plugin%s\" % i, load_path)\n\n\t# init each plugin\n\tplugins = LinksProvider.get_plugins()", "def boot():\n\t\tcreate_project_url_dir(Spider.project_name)\n\t\tcreate_url_data(Spider.project_name, Spider.base_url)\n\t\tSpider.queue = file_to_set(Spider.queue_file)\n\t\tSpider.crawled = file_to_set(Spider.crawled_file)", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n out_dataset[1].create_dataset(in_dataset[1])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'SINOGRAM',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')\n\n in_pData[1].plugin_data_setup( 'PROJECTION',)\n out_pData[1].plugin_data_setup( 'PROJECTION','multiple')", "def handle_plugins(plugins, plugins_dir, installation_dir):\n create_plugins_dir(plugins_dir)\n # Install plugins.\n if not isinstance(plugins, dict):\n raise NonRecoverableError(\n 'The plugins value is not valid: {value} '\n 'If you wish to use custom Terraform providers must provide a '\n 'dictionary in the following format: search.path/provider_name.'\n ''\n 'For example:'\n 'plugins: \\n'\n ' registry.terraform.io/hashicorp/template: '\n 'https://releases.hashicorp.com/terraform-provider-template/'\n '2.1.2/'\n 'terraform-provider-template_2.1.2_linux_amd64.zip\\n'.format(\n value=plugins)\n )\n for plugin_name, plugin_url in plugins.items():\n with tempfile.NamedTemporaryFile(\n suffix=\".zip\",\n delete=False,\n dir=installation_dir) as plugin_zip:\n plugin_zip.close()\n ctx.logger.debug('Downloading Terraform plugin: {url}'.format(\n url=plugin_url))\n download_file(plugin_zip.name, plugin_url)\n unzip_path = os.path.join(plugins_dir, plugin_name)\n mkdir_p(os.path.dirname(unzip_path))\n unzip_and_set_permissions(plugin_zip.name, unzip_path)\n os.remove(plugin_zip.name)", "def _loadPlugins(self, plugin_repo_path):\n try:\n os.stat(plugin_repo_path)\n except OSError:\n \n pass\n \n sys.path.append(plugin_repo_path)\n\n dir_name_regexp = re.compile(r\"^[\\d\\w\\-\\_]+$\")\n for name in os.listdir(plugin_repo_path):\n if dir_name_regexp.match(name):\n try:\n module_path = os.path.join(plugin_repo_path, name)\n sys.path.append(module_path)\n module_filename = os.path.join(module_path, \"plugin.py\")\n self._plugin_modules[name] = imp.load_source(name, module_filename)\n except Exception:\n msg = \"An error ocurred while loading plugin %s.\\n%s\" % (module_filename, traceback.format_exc())\n getLogger(self).error(msg)\n else:\n pass", "def _insertAllSteps(self):\n \n # Get pointer to input micrographs \n self.particlePickingRun = self.xmippParticlePicking.get()\n \n copyId = self._insertFunctionStep('copyInputFilesStep')\n # Get micrographs to pick\n #self.inputMicrographs.set(self.getInputMicrographs())\n \n deps = []\n for mic in self.getInputMicrographs():\n stepId = self._insertFunctionStep('autopickMicrographStep', mic.getFileName(), prerequisites=[copyId])\n deps.append(stepId)\n \n self._insertFunctionStep('_createOutput',self._getExtraPath(), prerequisites=deps)", "def _get_plugins(self):\n logger.debug('Gathering plugins')\n\n for plugin in plugins.__all__:\n try:\n module = importlib.import_module('plugins.' + plugin)\n\n for plugin_class in dir(module):\n obj = getattr(module, plugin_class)\n\n if inspect.isclass(obj) and issubclass(obj, threading.Thread): # if plugin is subclass of Thread\n try:\n self.plugins.append(PluginInfo(obj))\n logger.info('Plugin found: \\\"{}\\\" with identifier: \\\"{}\\\"'.format(obj.p_name, obj.p_identifier))\n except (AttributeError, ValueError) as err:\n if isinstance(err, AttributeError):\n logger.exception('Plugin: \\\"{}\\\" missing one or more required properties, ignoring...'.format(plugin_class))\n elif isinstance(err, ValueError):\n logger.exception('Plugin: \\\"{}\\\" contains a space in the identifier, ignoring...'.format(plugin_class))\n\n except ImportError:\n logger.error('Could not load plugin: \\\"{}\\\"'.format(plugin))", "def initialize():\n\n # create plugin locations\n for p in (cache_path, config_path, data_path):\n p.mkdir(parents=False, exist_ok=True)", "def _discover_plugins(plugins_dir=config['plugins_dir']):\n plugin_paths = glob(join(plugins_dir, '*'))\n for plugin_path in plugin_paths:\n if not os.path.isdir(plugin_path):\n continue\n extra_path = get_plugin_path_extension(plugin_path)\n plugin_name = os.path.basename(plugin_path)\n plugin_name = plugin_name.replace('-', '_')\n with set_aside(), patch_path(plugin_name, *extra_path), clear_module_cache(plugin_name):\n site_packages = get_plugin_site_packages_directory(plugin_path)\n for module_name in filter_distribute_modules(get_modules(site_packages)):\n if not module_name.startswith(plugin_name):\n continue\n try:\n importlib.import_module(module_name)\n except Exception as e:\n handle_exception(e, plugin_name, module_name)\n ensure_plugin_module_loaded(plugin_name)", "def load_plugins(self, plugin_list):\n for plugin_name in plugin_list:\n self.load_plugin(plugin_name)", "def __import(self):\n configured_plugins = self.config.get('plugins', {}).copy()\n plugin_module_files = self.__find_plugin_modules(configured_plugins)\n self.__initialize_plugins(configured_plugins, plugin_module_files)", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'PROJECTION',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION',multiple)", "def pipeline_test_data(self):\n if self.linearity:\n Detector1Pipeline.call(self.ramp_file, save_results=True, output_dir=self.output_dir, output_use_model=True,\n steps={'ipc': {'skip': True},\n 'rscd': {'skip': True},\n 'lastframe': {'save_results': True,\n 'output_dir': self.output_dir},\n 'dark_current': {'save_results': True,\n 'output_dir': self.output_dir},\n #'linearity': {'skip': True},\n 'jump': {'save_results': True,\n 'output_dir': self.output_dir}})\n else:\n Detector1Pipeline.call(self.ramp_file, save_results=True, output_dir=self.output_dir, output_use_model=True,\n steps={'ipc': {'skip': True},\n 'rscd': {'skip': True},\n 'lastframe': {'save_results': True,\n 'output_dir': self.output_dir},\n 'dark_current': {'save_results': True,\n 'output_dir': self.output_dir},\n 'linearity': {'skip': True},\n 'jump': {'save_results': True,\n 'output_dir': self.output_dir}})\n\n self.pre_dark_file = os.path.join(self.output_dir, 'step_lastframe.fits')\n self.post_dark_file = os.path.join(self.output_dir, 'step_dark_current.fits')\n self.jump_file = os.path.join(self.output_dir, 'step_jump.fits')\n self.rate_file = os.path.join(self.output_dir, 'step_rate.fits')", "def initialize(self):\n\n # create plugin locations\n for p in (cache_path, config_path, data_path):\n p.mkdir(parents=False, exist_ok=True)", "def run_plugin():\n config = load_config()\n all_resources = []\n\n try:\n for resource in config.resources:\n all_resources = all_resources + get_resource(resource['url'], resource.get('sha256'), resource.get('patches'))\n except yaml.YAMLError as e:\n c.eprint(\"%s: invalid yaml\" % url)\n if hasattr(e, 'problem_mark'):\n c.eprint(e.problem_mark)\n c.eprint(e.problem)\n if e.context is not None:\n c.eprint(e.context)\n raise e\n except HTTPError as e:\n c.eprint(\"%s: %s %s\" % (url, e.code, e.reason))\n raise e\n except URLError as e:\n c.eprint(\"%s: %s\" % (url, e.reason))\n raise e\n yaml.dump_all(all_resources, sys.stdout, default_flow_style=False)", "def file_loader(self):\n\n for folder in self.config[\"data_folders\"]:\n f = os.path.join(folder, self.data_file)\n yield jsonlist.load_file(f)", "def handle_plugins(self, request):\n \"\"\"\n @api {get} /plugins List plugins\n @apiName GetPlugins\n @apiGroup Node\n @apiVersion 1.0.0\n\n @apiDescription List plugins loaded on the node.\n\n @apiSuccessExample {json} Example response:\n [\n \"configbackup\",\n \"mailer\",\n \"executionsummary\"\n ]\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n plugins = get_plugin_registry().get_plugins()\n return HTTPReply(code = 200, body = json.dumps(plugins), headers = headers)", "def _initialize_plugins(self):\n logger.debug('Initializing plugins')\n plugins = self._pm.hook.plugins(loop=self._loop)\n if plugins:\n for plugin in plugins:\n name = plugin.__name__\n registry_name = plugin.__registry__ or plugin.__name__\n config = self.config.get(name, {})\n\n priority = config.get('priority', 50)\n\n if priority:\n self._plugins[name] = {\n 'plugin': plugin,\n 'config': config,\n 'priority': priority,\n 'factory': registry_name\n }\n\n self._start_priority[priority].append(name)\n else:\n logger.error('No plugins found')", "def contents_build_1(self, **kwargs):\n\n SeriesFilesCount = 10\n for key,val in kwargs.iteritems():\n if key == 'SeriesFilesCount': SeriesFilesCount = val\n\n # First, build a PACS_pull tree\n self.dataComponent_build(\n path = '/',\n plugin = 'PACSPull',\n SeriesFilesCount = SeriesFilesCount\n )\n\n self.dataComponent_pluginBuild(\n path = '/plugins'\n )\n\n\n # Now \"run\" an mri_convert to nifi\n # self.dataComponent_pluginRun(\n # inputPath = '/dataView/files',\n # outputPath = '/plugin/run',\n # plugin = 'mri_convert'\n # )", "def discover_all_plugins(self):\n for v in pkg_resources.iter_entry_points('dgit.plugins'):\n m = v.load()\n m.setup(self)", "def loadPlugins():\n sys.path.append(basedefs.DIR_PLUGINS)\n fileList = sorted(os.listdir(basedefs.DIR_PLUGINS), cmp=plugin_compare)\n for item in fileList:\n # Looking for files that end with ###.py, example: a_plugin_100.py\n match = re.search(\"^(.+\\_\\d\\d\\d)\\.py$\", item)\n if match:\n try:\n moduleToLoad = match.group(1)\n logging.debug(\"importing module %s, from file %s\", moduleToLoad, item)\n moduleobj = __import__(moduleToLoad)\n moduleobj.__file__ = os.path.join(basedefs.DIR_PLUGINS, item)\n globals()[moduleToLoad] = moduleobj\n checkPlugin(moduleobj)\n controller.addPlugin(moduleobj)\n except:\n logging.error(\"Failed to load plugin from file %s\", item)\n logging.error(traceback.format_exc())\n raise Exception(\"Failed to load plugin from file %s\" % item)", "def _load_plugins(plugin_names, package, plugin_dir, plugin_params=None):\n plugins = []\n for plugin_name in plugin_names:\n for file_name in os.listdir(plugin_dir):\n if file_name.endswith(\".py\"):\n module_name = '.' + file_name.rstrip(\".py\")\n module = importlib.import_module(module_name, package)\n plugin_class = getattr(module, plugin_name, None)\n if plugin_class:\n plugins.append(plugin_class(*plugin_params))\n break # Only one plugin per module.\n return plugins", "def populate_db_with_plugins(agent):\n conn = jenkins.Jenkins(agent.url, agent.user, agent.password)\n plugins = conn.get_plugins()\n\n for plugin in plugins.iteritems():\n insert_plugin_data_into_db(plugin)", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def index_plugins(self, system, partial_root):\n for root, dirs, files in os.walk(self.directory.path):\n relroot = os.path.relpath(root, self.directory.path)\n splitrelroot = relroot.split(os.sep)\n\n # Skips hidden directories\n hiddendir = False\n for d in splitrelroot:\n if d.startswith('.'):\n hiddendir = True\n if hiddendir:\n continue\n\n if PythonPluginStubFile.can_load_directory(fileman.new(root)):\n file_pid = None\n try:\n # Generate PluginID for directory\n file_pid = PluginId.parse(\".\".join([partial_root] + splitrelroot))\n\n logger.debug(\"Indexing plugin with file_pid '{}'\".format(file_pid))\n\n # Generate description object\n plugin_description = PythonPluginStubFile(fileman.new(root))\n\n # Make the stub\n stub = PluginStub(system, plugin_description, file_pid)\n\n # Check platform to make sure the plugin is valid for the target:\n if stub.check_platform(config_target):\n # Add stub to directory (and system)\n self._add_plugin_stub(system, stub)\n else:\n logger.debug(\"Plugin failed platform check '{}'\".format(stub.id))\n except:\n # TODO(Mason): More specific exceptions\n tb_string = \"\\n\\t\".join((\"\".join(traceback.format_exception(*sys.exc_info()))).split(\"\\n\"))\n logger.error(\"Plugin failed to load, ID per directory is '{}':\\n\\t{}\".format(file_pid, tb_string))", "def load_plugins(self, config):\n # 从文件夹中扫描出plugin文件\n plugins_file = []\n try:\n for f in glob.glob(os.path.join(self.directory, '*.py')):\n f = os.path.basename(f)\n if f not in ('__init__.py', 'base.py'):\n plugins_file.append(f[:-3])\n except OSError:\n print(\"Failed to access: %s\" % dir)\n\n # 将文件装置成类对象\n for name in plugins_file:\n path = os.path.relpath(self.directory, os.path.realpath('.'))\n path = path.replace(os.path.sep, '.')\n module = import_module('.%s' % name, path)\n plugin_class = getattr(module, getattr(module, \"__className__\"))\n if hasattr(module, \"__type__\"):\n plugins[name] = {'type': module.__type__, 'plugin_class': plugin_class}\n\n # 恢复初始值\n hook.plugins = {}\n\n # 根据配置顺序向hook注册\n for (type, plugin_list) in config.items():\n for plugin_name in plugin_list:\n if plugin_name in plugins:\n plugin = plugins[plugin_name]\n if type in plugin['type']:\n hook.plugins.setdefault(type, []).append({\n 'name': plugin_name,\n 'plugin_class': plugin['plugin_class'],\n 'toggle': True\n })\n else:\n raise PluginNoSupportException('{} no support {}'.format(plugin_name, type))\n else:\n raise NoFoundPluginException('{} no found'.format(plugin_name))\n\n # 追加未开启的插件\n for name, plugin in plugins.items():\n for type in plugin['type']:\n if name not in config[type]:\n # 添加未开启的插件信息\n hook.plugins.setdefault(type, []).append({\n 'name': name,\n 'plugin_class': plugin['plugin_class'],\n 'toggle': False\n })", "def preprocessing_pipeline(self):\n self.__multilabel_processing()\n self.__split_dataset()\n self.__save_datasets()", "def load_plugin_data(self, data):\n return", "def _iter_plugin_files(dirs):\n for plugin_dir in dirs:\n plugin_dir = Path(plugin_dir).expanduser()\n if not plugin_dir.exists(): # pragma: no cover\n continue\n for subdir, dirs, files in os.walk(plugin_dir, followlinks=True):\n subdir = Path(subdir)\n # Skip test folders.\n base = subdir.name\n if 'test' in base or '__' in base or '.git' in str(subdir): # pragma: no cover\n continue\n logger.debug(\"Scanning `%s`.\", subdir)\n for filename in files:\n if (filename.startswith('__') or not filename.endswith('.py')):\n continue # pragma: no cover\n logger.debug(\"Found plugin module `%s`.\", filename)\n yield subdir / filename", "def prepare_addons(self):\n\n for addon in self.options.addons:\n if addon.startswith(\"http\") or addon.startswith(\"ftp\"):\n path = self.download_addon(addon, tempfile.gettempdir())\n self.downloaded_addons.append(path)\n self.addon_list.append(path)\n else:\n self.addon_list.append(addon)", "def handle(self, *args, **options):\n\n # they look strange but are what comes over from wordpress API\n # im giessing there are redirects in place to make this work\n SOURCES = {\n 'sample-page': 'aac',\n 'home-2': 'commissioning',\n 'nhs-england-and-nhs-improvement-corona-virus': 'coronavirus',\n 'greener-nhs': 'greenernhs',\n 'improvement-knowledge-hub': 'improvement-hub',\n 'tbc': 'non-executive-opportunities',\n 'nhs-rightcare': 'rightcare',\n }\n # for BasePage models\n pages = BasePage.objects.all().order_by('-depth')\n\n for page in pages:\n first_published = page.first_published_at\n last_published = page.last_published_at\n latest_revision_created = page.latest_revision_created_at\n\n if page.slug in SOURCES.keys():\n # print(SOURCES[page.wp_slug])\n sys.stdout.write('\\n✅ {} is fixed'.format(SOURCES[page.wp_slug]))\n slug = SOURCES[page.wp_slug]\n page.slug = slug\n \"\"\"\n running save_revision() as it seems like a good idea to not break page paths\n just to be safe...\n try to keep revision dates to match whats in wordpress as our\n revisions reset that at the save()\n \"\"\"\n try:\n rev = page.save_revision()\n page.first_published_at = first_published\n page.last_published_at = last_published\n page.latest_revision_created_at = latest_revision_created\n # probably not the best way to do this but need to update the dates on the page record\n # to keep in sync with wordpress at the import stage\n # futher imports will collect new data and new dates.\n page.save()\n rev.publish()\n except ValidationError:\n print('⚠️ {} slug cannot be updated!!!'.format(page))\n time.sleep(2)\n\n # for ComponentsPage models\n # pages = ComponentsPage.objects.all().order_by('-depth')\n\n # for page in pages:\n # first_published = page.first_published_at\n # last_published = page.last_published_at\n # latest_revision_created = page.latest_revision_created_at\n\n # if page.slug in SOURCES.keys():\n # # print(SOURCES[page.wp_slug])\n # sys.stdout.write('\\n✅ {} is fixed'.format(SOURCES[page.wp_slug]))\n # slug = SOURCES[page.wp_slug]\n # page.slug = slug\n # \"\"\"\n # running save_revision() as it seems like a good idea to not break page paths\n # just to be safe...\n # try to keep revision dates to match whats in wordpress as our\n # revisions reset that at the save()\n # \"\"\"\n # try:\n # rev = page.save_revision()\n # page.first_published_at = first_published\n # page.last_published_at = last_published\n # page.latest_revision_created_at = latest_revision_created\n # # probably not the best way to do this but need to update the dates on the page record\n # # to keep in sync with wordpress at the import stage\n # # futher imports will collect new data and new dates.\n # page.save()\n # rev.publish()\n # except ValidationError:\n # print('⚠️ {} slug cannot be updated!!!'.format(page))\n # time.sleep(2)\n\n sys.stdout.write('\\n✅ Done\\n')", "def setUp(self):\n file_dir_path = os.path.dirname(__file__)\n conll_ud_dir = os.path.abspath(\n os.path.join(\n file_dir_path, *([os.pardir] * 4), \"data_samples/conll_ud\"\n )\n )\n pl = Pipeline()\n pl.set_reader(ConllUDReader())\n pl.initialize()\n\n self.data_packs: List[DataPack] = [\n data_pack for data_pack in pl.process_dataset(conll_ud_dir)\n ]\n self.doc_ids = [\n \"weblog-blogspot.com_nominations_20041117172713_ENG_\"\n \"20041117_172713\",\n \"weblog-blogspot.com_nominations_20041117172713_ENG_\"\n \"20041117_172714\",\n ]", "def main():\n os.makedirs(PATH)\n fetch_data()\n convert_to_json(model_list, 'models.json', is_model=True)\n convert_to_json(backend_list, 'backends.json')\n convert_to_json(type_list, 'types.json')\n convert_to_json(featurizer_list, 'featurizers.json')", "def execute(self):\n rc = self.rc\n try:\n for plugin in self.plugins:\n plugin.execute(rc)\n except Exception as e:\n self.exit(e)", "async def pipeline_impl(config):\n await generate_groups_impl(config)\n await merge_singular_plural_impl(config)\n await add_parent_groups_impl(config)\n await prune_single_groups_impl(config)\n await move_inner_items_impl(config)\n await split_large_groups_impl(config)", "def process(self, *args, **kwargs):\n for name in self.plugin:\n if not self.plugin[name].post_inited:\n self.plugin[name].post_init()\n return XMLStream.process(self, *args, **kwargs)", "def pipeline_runner():\n # file_parser() # take raw data file and extract columns of interest. remove contaminants.\n entry_parser() # remove duplicates, faulty lines and format the whole thing normally.\n lfq_parser() # replace 0s in lfq reading with random small numbers for t testing purposes\n # open Rstudio and do T testing there\n ROutputFormatter() # reformat R output to something more appealing, add FDR and fold change values", "def load(info):\n # Compute full path to plugin's index.html file.\n ResonantLab._cp_config['tools.staticdir.dir'] = os.path.join(info['pluginRootDir'], 'web_external')\n\n # Read the version number from the plugin specification file.\n config_file = os.path.join(info['pluginRootDir'], 'plugin.yml')\n with open(config_file) as f:\n config = yaml.safe_load(f)\n\n # Read the git hash from the hash file.\n git_sha_file = os.path.join(info['pluginRootDir'], 'git-sha')\n with open(git_sha_file) as f:\n git_sha = f.read().strip()\n\n # Instantiate ResonantLab resource and register the plugin with Girder.\n app = info['apiRoot'].resonantlab = ResonantLab(version=config.get('version'), sha=git_sha)\n registerPluginWebroot(app, info['name'])", "def import_all():\n import sys\n\n # obviously this is a hack for now... What's the right way to learn\n # the directory that holds the plugins directory? I don't want the\n # directory itself, because I *think* we might get name conflicts if we\n # import them directly. (I'm fuzzy about how that works. Can you\n # import \"x\" from one path and \"x\" from another path, and have them both\n # around with the same name? sys.modules suggests no.\n pdir = \"/home/sandro/riftr\"\n sys.path.append(pdir)\n \n dir = \"plugins\"\n ids = {}\n for filename in os.listdir(pdir + \"/\" + dir):\n if filename.endswith(\".py\") and not filename[0] == \"_\":\n local = filename[0:-3]\n module_name = dir + \".\" + local\n #print \n #print module_name\n m = __import__(module_name)\n mm = getattr(m, local)\n #print \"=> \", mm\n for (name, entry) in mm.__dict__.items():\n if getattr(entry, \"__doc__\", False) and getattr(entry, \"id\", False):\n if entry.id.startswith(dir+\".\"):\n # because they used \"__name__\"\n entry.id = entry.id[len(dir+\".\"):]\n if entry.id in ids:\n raise RuntimeError, (\"Duplicate id: %s used in %s and %s\" %\n entry.id, ids[entry.id], filename)\n ids[entry.id] = filename\n #print \"registering\", name, entry\n register(entry)\n \n # I wonder why issubclass doesn't work for me like this.\n #if type(entry).__name__ in [ \"classobj\", \"type\" ]:\n # print \"is type/class\", name, entry\n # print issubclass(entry, object)\n # print issubclass(entry, Plugin)\n # print issubclass(entry, InputPlugin)\n\n\n sys.path.pop(-1)", "def __init__(self, app=None, plugins_base=None, plugins_folder=\"plugins\", **kwargs):\n self.plugins_folder = plugins_folder\n self.plugin_abspath = os.path.join(plugins_base or os.getcwd(), self.plugins_folder)\n\n #: all locally stored plugins\n #:\n #: .. versionadded:: 0.1.4\n self.__plugins = []\n\n #: logging Logger instance\n #:\n #: .. versionadded:: 0.1.9\n self.logger = kwargs.get(\"logger\", logger)\n\n #: Template sorting\n #:\n #: .. versionadded:: 1.2.0\n self.stpl = kwargs.get(\"stpl\", False)\n self.stpl_reverse = kwargs.get(\"stpl_reverse\", False)\n\n #: Simple storage service(s3), currently optional: local or redis.\n #: May increase in the future: memcache.\n #: You can also inherit :class:`~flask_pluginkit.BaseStorage`, custom storage interface.\n #:\n #: .. versionadded:: 1.3.0\n self.s3 = kwargs.get(\"s3\")\n self.s3_redis = kwargs.get(\"s3_redis\")\n\n #: Dynamic join point initialization, format::\n #: dict(event=deque())\n #:\n #: .. versionadded:: 2.1.0\n self.dcp_funcs = {}\n\n #: initialize app via a factory\n #:\n #: .. versionadded:: 0.1.4\n if app is not None:\n self.init_app(app)", "def process(self, plugin_data=None, fetch_related_data=False):\n try:\n # Calling pre-processor.\n self.pre_processor()\n\n if plugin_data:\n try:\n # Trying to load the plugin data to JSON.\n plugin_data = json.loads(plugin_data)\n\n # If a valid JSON object, feed it to our plugin and process\n # the data. The ``process_data`` method should be defined\n # in your subclassed plugin class.\n if plugin_data:\n self.load_plugin_data(plugin_data)\n\n self.process_plugin_data(\n fetch_related_data=fetch_related_data\n )\n except Exception as err:\n logger.debug(\n \"Error in class %s. Details: %s\",\n self.__class__.__name__,\n str(err)\n )\n\n # Calling the post processor.\n self.post_processor()\n\n return self\n except Exception as err:\n logger.debug(\n \"Error in class %s. Details: %s\",\n self.__class__.__name__,\n str(err)\n )", "def RunPlugins(cls, platform, file_system, mount_point, knowledge_base):\n # TODO: bootstrap the artifact preprocessor.\n\n searcher = file_system_searcher.FileSystemSearcher(file_system, mount_point)\n\n for weight in cls._GetWeights(cls._plugin_classes, platform):\n for plugin_object in cls._GetPluginsByWeight(\n cls._plugin_classes, platform, weight):\n try:\n plugin_object.Run(searcher, knowledge_base)\n\n except (IOError, errors.PreProcessFail) as exception:\n logging.warning((\n u'Unable to run preprocessor: {0:s} for attribute: {1:s} '\n u'with error: {2:s}').format(\n plugin_object.plugin_name, plugin_object.ATTRIBUTE,\n exception))\n\n # Run the Registry plugins separately so we do not have to open\n # Registry files in every plugin.\n\n path_attributes = None\n if knowledge_base:\n path_attributes = knowledge_base.GetPathAttributes()\n\n registry_file_reader = FileSystemWinRegistryFileReader(\n file_system, mount_point, path_attributes=path_attributes)\n win_registry = dfwinreg_registry.WinRegistry(\n registry_file_reader=registry_file_reader)\n\n for weight in cls._GetWeights(cls._registry_plugin_classes, platform):\n for plugin_object in cls._GetPluginsByWeight(\n cls._registry_plugin_classes, platform, weight):\n\n try:\n plugin_object.Run(win_registry, knowledge_base)\n\n except (IOError, errors.PreProcessFail) as exception:\n logging.warning((\n u'Unable to run preprocessor: {0:s} for attribute: {1:s} '\n u'with error: {2:s}').format(\n plugin_object.plugin_name, plugin_object.ATTRIBUTE,\n exception))", "def load_plugins(self, base, path, pattern=\"*.py\", class_type=None):\n path = os.path.join(path, pattern)\n modules = {}\n for infile in glob.glob(path):\n basename = os.path.basename(infile)\n if basename == \"__init__.py\":\n continue\n plugin_name = basename[:-3]\n plugin_namespace = \"%s.%s\" % (base, plugin_name)\n if plugin_namespace in sys.modules:\n # Already loaded\n plugin = sys.modules[plugin_namespace]\n else:\n plugin = imp.load_source(plugin_namespace, infile)\n caller = getattr(plugin, plugin_name, None)\n if caller is None:\n raise ImportError(\"Class not found:\", plugin_name, plugin)\n if class_type:\n if not inherits_from(caller, class_type):\n raise ImportError(\"Wrong class type:\", plugin_name, plugin)\n modules[plugin_name] = caller\n self.modules = modules", "def go(self):\n self.analyse_folder(BASE)\n self.analyse_folder(JS_FOLDER)", "def __init__(self, config, processors):\n source = HackernewsStories()\n source.configure(config)\n\n super(HackernewsCrawlJob, self).__init__(source, processors)", "def get_pipelines() -> Iterable[DataPipeline]:\n for pipeline_name in get_pipeline_names():\n yield DataPipeline.load(pipeline_name)", "def run(self):\n for lof in self.data_files:\n if lof[0]:\n base = getattr(self, 'install_' + lof[0])\n else:\n base = getattr(self, 'install_base')\n dir = convert_path(lof[1])\n if not os.path.isabs(dir):\n dir = os.path.join(base, dir)\n elif self.root:\n dir = change_root(self.root, dir)\n self.mkpath(dir)\n\n files = lof[2]\n if len(files) == 0:\n # If there are no files listed, the user must be\n # trying to create an empty directory, so add the\n # directory to the list of output files.\n self.outfiles.append(dir)\n else:\n # Copy files, adding them to the list of output files.\n for f in files:\n f = convert_path(f)\n (out, _) = self.copy_file(f, dir)\n #print \"DEBUG: \", out # dbg\n self.outfiles.append(out)\n \n\n return self.outfiles", "def _plugin_create(cls, plugin_dir):\n plugin_path = os.path.join(settings.PLUGINS_PATH, plugin_dir,\n 'metadata.yaml')\n try:\n plugin_metadata = cls._parse_yaml_file(plugin_path)\n Plugin.create(plugin_metadata)\n except Exception as e:\n logger.error(\"cannot create plugin {0} from FS. Reason: {1}\"\n .format(plugin_dir, str(e)))", "def setup(self):\n # Call the baseclass setup to resolve any selections\n super().setup()\n\n # Load the requested file\n cont = self._load_file(self.filename)\n\n # Set the done attribute so the pipeline recognizes this task is finished\n self.done = True\n\n return cont", "def test_register_dynamic_plugin(self):\n pass", "def main():\n # Step1: generate htmls\n csv_data_path= \"./frontend/html_template_data/dataset.csv\"\n html_template_path = \"./frontend/html_template_data/template.html\"\n html_save_path = \"./frontend/html_files/\"\n\n generate_htmls(csv_data_path, html_template_path, html_save_path)\n\n # Step2: push htmls to Github\n # push htmls to Github Pages, currently manual.", "def test_several_folders(self):\n spider_path = 'tests/sample_spiders/'\n test_data = [\n ('valid_metadata', 1),\n ('no_metadata', 0),\n ('incomplete_metadata', 0),\n ('two_spiders_one_file', 0),\n ('no_basespider_inheritance', 0)\n ]\n\n m = SpiderManager()\n for spidername, valid_spiders in test_data:\n path = spider_path + spidername\n os.environ['SPIDER_PATH'] = path\n\n m.load(path)\n spiders = m.get_spiders()\n\n self.assertEqual(type(spiders), list)\n self.assertEqual(len(spiders), valid_spiders)", "def main():\n\n browser = initialize()\n process_directory(browser, \"data\")\n browser.close()", "def run(self):\n pipeline = set_pipeline()\n pipeline.fit(self.X_train, self.y_train)\n return pipeline", "def load_parsers_from_plugins(subparser, plugins):\n for plugin_name, plugin_class in plugins.items():\n # create a parser object for the plugin.\n plugin_parser = subparser.add_parser(\n plugin_name,\n description = plugin_class.__doc__,\n )\n\n plugin_parser.add_argument('vpc_name', help='The VPC\\'s Name tag.')\n\n try:\n # Assume class plugin with 'setup_parser' and 'main' staticmethods.\n plugin_class.setup_parser(plugin_parser)\n plugin_parser.set_defaults(func = plugin_class.main)\n except AttributeError:\n # Assume function plugin w/o 'setup_parser' or 'main' staticmethods.\n plugin_parser.set_defaults(func = plugin_class)", "def test_register_dynamic_plugin1(self):\n pass", "def process_yamls(folder):\n for item in iglob(folder + \"/*.yaml\"):\n data_file = os.path.join(folder, item)\n data = yaml.load(open(data_file))\n load_data(data)", "def main(base_dir: str, output_dir: str) -> None:\n base_path = pathlib.Path(base_dir)\n output_path = pathlib.Path(output_dir).expanduser()\n\n stage_copy_images(base_path, output_path)\n stage_extract_videos(base_path, output_path)", "def pipeline(self):\n\n self._get_data()\n self._upload_to_raw()", "def main():\n # %%\n CFG.profiles_yamls_path.mkdir(parents=True, exist_ok=True)\n fpaths = list( _Config.raw_profiles_path.glob('*.html') )\n print( f'{len(fpaths)} htmls found' )\n # %%\n fpath = CFG.raw_profiles_path / 'luis-mario-urrea-murillo.html'\n # %%\n fpath = CFG.raw_profiles_path / 'cristian-david-montoya-saldarriaga-09638514a.html'\n # %%\n fpaths = [ CFG.raw_profiles_path / 'ricardo-alarcon-44079b105.html' ]\n # %%\n fpaths = [ Path('/home/teo/_data/talent/linkedin_raw_profiles/israellaguan.html')]\n # %%\n dics = {}\n # %%\n\n for i, fpath in enumerate(fpaths):\n if fpath in dics:\n continue\n\n with fpath.open('rt') as f_in:\n html = f_in.read()\n\n print( f'\\n***{i+1}/{len(fpaths)} {fpath.name}:')\n dic = extract_one( html, fpath )\n dic['linkedin_url'] = f\"https://www.linkedin.com/in/{fpath.name.split('.')[0]}\"\n dic['scraped_at'] = dt.datetime.fromtimestamp( fpath.stat().st_ctime )\n # pprint(dic['work_stats'])\n dics[fpath] = dic\n\n dics_arr = list(dics.values())\n # %%\n del dics\n # %%\n\n with (CFG.profiles_yamls_path / 'all_profiles.json').open('wt') as f_out:\n json.dump( dics_arr, f_out, cls=DateTimeEncoder, indent=4 )\n # %%\n with (CFG.profiles_yamls_path / 'all_profiles.yaml').open('wt') as f_out:\n yaml.safe_dump( dics_arr, f_out )\n # %%\n df = produce_summary_table( dics_arr )\n df.to_excel( CFG.raw_profiles_path.parent / 'mined_ruby_candidates_sample.xlsx',\n index=False)\n # %%", "def main(state=None, overwrite=False):\n \n # Initialize process\n process = initialize_process()\n\n if overwrite:\n # TODO: delete all (and only) files to be regenerated\n pass\n\n # Add a spider instance for each state to be run\n if state:\n add_state_to_process(state, process=process)\n else:\n for s in list(CONFIG.keys()):\n add_state_to_process(s, process=process)\n\n # Run scrape\n process.start()", "def collect_pipeline_runs(self):\n db = self.mongo_client.metalearning\n collection = db.pipeline_runs\n collection_size = collection.count()\n pipeline_cursor = collection.find()\n list_of_experiments = {\"classification\": [], \"regression\": []}\n for index, pipeline_run in enumerate(pipeline_cursor):\n if index % 1000 == 0:\n print(\"At {} out of {} documents\".format(index, collection_size))\n # if index == 2000:\n # # running into memory errors\n # break\n pipeline_run_info = self.get_pipeline_run_info(pipeline_run)\n metafeatures = self.get_metafeature_info(pipeline_run)\n # TODO: get all metafeatures so we don't need this\n if metafeatures != {}:\n experiment_json = dict(pipeline_run_info, **metafeatures)\n list_of_experiments[experiment_json[\"problem_type\"]].append(experiment_json)\n\n for problem_type in list_of_experiments.keys():\n final_data_file = json.dumps(list_of_experiments[problem_type], sort_keys=True, indent=4, default=json_util.default)\n with open(\"data/complete_pipelines_and_metafeatures_test_{}.json\".format(problem_type), \"w\") as file:\n file.write(final_data_file)\n\n return", "def main():\n\n from scrapy.crawler import CrawlerProcess\n from scrapy.utils.project import get_project_settings\n\n process = CrawlerProcess(get_project_settings())\n process.crawl(NCBIGeoSpider)\n process.start()", "def get_loading_pipeline(pipeline):\n loading_pipeline = []\n for transform in pipeline:\n is_loading = is_loading_function(transform)\n if is_loading is None: # MultiScaleFlipAug3D\n # extract its inner pipeline\n if isinstance(transform, dict):\n inner_pipeline = transform.get('transforms', [])\n else:\n inner_pipeline = transform.transforms.transforms\n loading_pipeline.extend(get_loading_pipeline(inner_pipeline))\n elif is_loading:\n loading_pipeline.append(transform)\n assert len(loading_pipeline) > 0, \\\n 'The data pipeline in your config file must include ' \\\n 'loading step.'\n return loading_pipeline", "def prepare(self) -> None:\n for name, step, kwargs in self.steps:\n self.stream = step(self.stream, **kwargs)", "def process(self, path):\n\n # Extract filtered content and build source databases to process\n for source in Execute.SOURCES:\n spath = os.path.join(path, source)\n\n # Extract Posts.xml from 7za file\n decompress = Decompress()\n decompress(spath)\n\n posts = os.path.join(spath, \"Posts.xml\")\n filtered = os.path.join(spath, \"Filtered.xml\")\n\n # Filter Posts.xml file for matching questions\n sift = Sift()\n sift(posts, filtered)\n\n dbfile = os.path.join(spath, f\"{source}.db\")\n\n # Convert filtered Posts.xml file to SQLite db file\n xml2db = XML2DB()\n xml2db(filtered, dbfile)\n\n # Get list of all databases to consolidate\n return [\n os.path.join(path, source, f\"{source}.db\") for source in Execute.SOURCES\n ]", "def setup(self):\n Utils.check_dir(os.path.join(expanduser('~'), '.drupdates', 'plugins'))", "def set_pipeline(self):\n feateng_steps = self.kwargs.get('feateng', ['runtime', 'country', 'language',\n 'genre', 'age', 'rated', 'released',\n 'writer', 'director', 'actors', 'production'])\n \n pipe_runtime_features = Pipeline([\n ('runtime', SimpleImputer(strategy='constant', fill_value=\"0\")),\n ('runtime_encoder', CleanRuntimeEncoder()),\n ('runtime_scaler', StandardScaler())])\n \n pipe_country_features = Pipeline([\n ('country', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('country_encoder', CleanCountryEncoder())])\n \n pipe_language_features = Pipeline([\n ('language', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('language_encoder', CleanLanguageEncoder())])\n \n pipe_genre_features = Pipeline([\n ('genre', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('genre_transformer', FunctionTransformer(np.reshape, kw_args={'newshape':-1})), \n ('genre_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_age_features = Pipeline([\n ('age', SimpleImputer(strategy='median')),\n ('age_enconder', CleanAgeEncoder())])\n \n pipe_rated_features = Pipeline([\n ('rated', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('rated_encoder', CleanRatedEncoder()),\n ('rated_ohe', OneHotEncoder(handle_unknown='ignore'))])\n \n pipe_released_features = Pipeline([\n ('released', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('released_encoder', CleanReleasedEncoder()),\n ('released_ohe', OneHotEncoder(handle_unknown='ignore'))])\n\n pipe_writer_features = Pipeline([\n ('writer', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('writer_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('writer_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_director_features = Pipeline([\n ('director', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('director_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('director_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_actors_features = Pipeline([\n ('actors', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('actors_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('actors_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_production_features = Pipeline([\n ('production', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('production_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('production_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n # define default feature engineering blocks\n feateng_blocks = [\n ('runtime', pipe_runtime_features, ['Runtime']),\n ('country', pipe_country_features, ['Country']),\n ('genre', pipe_genre_features, ['Genre']),\n ('age', pipe_age_features, ['Year']),\n ('rated', pipe_rated_features, ['Rated']),\n ('released', pipe_released_features, ['Released']),\n ('writer', pipe_writer_features, ['Writer']),\n ('director', pipe_director_features, ['Director']),\n ('actors', pipe_actors_features, ['Actors']),\n ('language', pipe_language_features, ['Language']),\n ('production', pipe_production_features, ['Production'])]\n \n # filter out some blocks according to input parameters\n for block in feateng_blocks:\n if block[0] not in feateng_steps:\n feateng_blocks.remove(block)\n\n features_encoder = ColumnTransformer(feateng_blocks,\n n_jobs=None,\n remainder='drop')\n\n self.pipeline = Pipeline(steps=[\n ('features', features_encoder),\n ('rgs', self.get_estimator())])", "def run(known_args, pipeline_args):\n\n pipeline_args.extend([\n '--runner=DataflowRunner',\n '--project=wikidetox-viz',\n '--staging_location=gs://wikidetox-viz-dataflow/staging',\n '--temp_location=gs://wikidetox-viz-dataflow/tmp',\n '--job_name=yiqing-ingest-job-truncated-content-run-in-batch',\n '--num_workers=90',\n ])\n\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = True\n if not(known_args.batchno == None):\n known_args.input = 'gs://wikidetox-viz-dataflow/input_lists/7z_file_list_batched_%d'%(known_args.batchno)\n known_args.table = 'wikidetox-viz:wikidetox_conversations.ingested_conversations_batch_%d'%(known_args.batchno) \n print('Running batch %d'%(known_args.batchno)) \n with beam.Pipeline(options=pipeline_options) as p:\n pcoll = (p | ReadFromText(known_args.input)\n | beam.ParDo(WriteDecompressedFile())\n | beam.io.Write(bigquery.BigQuerySink(known_args.table, schema=known_args.schema, validate = True)))", "def get_plugins(self):\n return []", "def pipeline(ctx):\n asyncio.run(pipeline_impl(ctx.obj[\"config\"]))", "def plugins_ready():\n\n for plugin in registerorder:\n plugin.ready()", "def add_plugin_files(self, plugin_name, *args, nb_path=None, **kwargs):\n nb_path = self._resolve_nb_path(nb_path)\n if plugin_name in self._plugin_collections:\n pc = self._plugin_collections[plugin_name]\n else:\n pc = PluginCollection([plugin_name], nb_path, {})\n self._plugin_collections[plugin_name] = pc\n addl_files = pc.run(\"notebook_export\", *args, **kwargs)[0]\n if addl_files is None:\n return\n self._addl_files.extend(addl_files)", "def load_extractor_engines(reload=False):\r\n\r\n global youtube_dl, youtube_dlc\r\n\r\n # youtube-dl ----------------------------------------------------------------------------------------------------\r\n start = time.time()\r\n\r\n if reload and youtube_dl:\r\n importlib.reload(youtube_dl)\r\n else:\r\n import youtube_dl\r\n\r\n config.youtube_dl_version = youtube_dl.version.__version__\r\n\r\n # calculate loading time\r\n load_time = time.time() - start\r\n log(f'youtube_dl version: {config.youtube_dl_version}, load_time= {int(load_time)} seconds')\r\n\r\n # youtube-dlc ----------------------------------------------------------------------------------------------------\r\n start = time.time()\r\n\r\n if reload and youtube_dlc:\r\n importlib.reload(youtube_dlc)\r\n else:\r\n import youtube_dlc\r\n\r\n config.youtube_dlc_version = youtube_dlc.version.__version__\r\n\r\n # calculate loading time\r\n load_time = time.time() - start\r\n log(f'youtube_dlc version: {config.youtube_dlc_version}, load_time= {int(load_time)} seconds')\r\n\r\n # set interrupt / kill switch\r\n set_interrupt_switch()\r\n\r\n # set default extractor\r\n set_default_extractor()\r\n\r\n # get a random user agent and update headers\r\n if not config.custom_user_agent:\r\n config.http_headers['User-Agent'] = youtube_dl.utils.random_user_agent()", "def parse(self, response):\n page_jobs=[]\n\n # Calling abstarct method get_jobs_list() and iterating...\n jobs_div_list=self.get_jobs_list(response)\n for div in jobs_div_list:\n \n # Calling abstarct method get_job_dict()\n job_dict=self.get_job_dict(div)\n\n if not job_dict['url'] or not job_dict['title'] :\n # At least url, title data is loaded from the list of job posting ...\n raise ValueError( \"Could not find valid job information ('url' and 'title') in data:\\n\" + \n str(div.get()) + \"\\nScraped infos:\\n\" + str(job_dict) + \"\\nReport this issue on github!\" )\n \n # Store source as the name of the spider aka website\n job_dict['source']=self.name\n page_jobs.append(job_dict)\n \n \"\"\"\n Load full job page only if:\n - it's a new job (not in database)\n - load_full_jobs=Yes\n - the method parse_full_job_page() has been re-wrote by the Scraper subclass\n \"\"\"\n if ( (not self.db or self.db.find_job(job_dict)==None)\n and self.load_full_jobs ):\n if type(self).parse_full_job_page != Scraper.parse_full_job_page:\n # load_full_jobs=Yes and it's supported by scraper\n # Call parse_full_job_page() with job URL\n\n # Handle SeleniumRequest if use_selenium=True\n if self.use_selenium:\n yield SeleniumRequest(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict),\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict))\n else:\n yield Job(job_dict)\n else:\n yield Job(job_dict)\n\n \"\"\" Just printing in one line \"\"\"\n if self.load_full_jobs:\n if type(self).parse_full_job_page == Scraper.parse_full_job_page:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True and load_all_new_pages=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True, some informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraping {} jobs from {}...\".format(len(page_jobs), response.url))\n else:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. load_all_new_pages=False and load_full_jobs=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url))\n else:\n self.log.info(\"Scraped {} jobs from {}. load_full_jobs=False, some informations might be missing\".format(len(page_jobs), response.url))\n \n \"\"\"\n If all page jobs are new and \n The method get_next_page_url() has been re-wrote by the Scraper subclass\n Scrape next page\n \"\"\"\n if self.load_all_new_pages==True:\n if self.db and any( [self.db.find_job(job_dict)!=None for job_dict in page_jobs] ):\n # All new job postings loaded\n pass\n else:\n if self.get_next_page_url(response)!=None :\n # Loading next page...\n if self.use_selenium:\n yield SeleniumRequest(\n url=self.get_next_page_url(response),\n callback=self.parse,\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(\n url=self.get_next_page_url(response),\n callback=self.parse)\n else:\n if type(self).get_next_page_url != Scraper.get_next_page_url:\n # Last page loaded\n pass\n else:\n self.log.info(\"Scraper {} does not support load_all_new_pages=True, some new job postings might be missing\".format(self.name))", "async def _configure_plugins(self) -> None:\n logger.debug('Configuring plugins')\n funcs = [\n info['plugin'].configure(\n config=info['config'],\n session=self._session,\n router=self.app.router\n )\n for info in self._plugins.values()\n ]\n\n if funcs:\n await asyncio.gather(*funcs, loop=self._loop)\n logger.debug('Plugins configured')", "def setup(self):\n ### Set Names\n # Name of the pipeline reduction step\n self.name='coadd'\n # Shortcut for pipeline reduction step and identifier for\n # saved file names.\n self.procname = 'coadd'\n # Set Logger for this pipe step\n self.log = logging.getLogger('pipe.step.%s' % self.name)\n ### Set Parameter list\n # Clear Parameter list\n self.paramlist = []\n # Append parameters\n self.paramlist.append(['kernel','square',\n 'Specifies the kernel used to determine spreading of input pixels onto output pixels \\\n - options are square, point, gaussian, smoothing, tophat'])\n self.paramlist.append(['pixfrac', 1.,\n 'The fraction of an output pixel(s) that an input pixel\\'s flux is confined to'])\n self.paramlist.append(['resolution', 1.,\n 'Pixel scale divisor for output image (higher gives more resolution, lower gives less)'])\n self.paramlist.append(['pad', 0,\n 'Extra padding outside maximum extent of inputs'])\n self.paramlist.append(['fillval', np.nan,\n 'Value for filling in the area(s) in the output where there is no input data'])\n self.paramlist.append(['drizzleweights','exptime',\n 'How each input image should be weighted when added to the output \\\n - options are exptime, expsq and uniform'])\n self.paramlist.append(['outangle',0.,\n 'Output angle of drizzled image (currently not functional)'])", "def run_all_plugins(self):\n logger.info(\"Starting all plugins\")\n for name in self.name_to_plugin_class:\n if self.name_to_enabled[name]:\n self.run_plugin(name)" ]
[ "0.6368807", "0.63078946", "0.5957224", "0.59520274", "0.5943972", "0.59165645", "0.58626246", "0.5809751", "0.5809528", "0.57702124", "0.57576746", "0.5738377", "0.5659071", "0.5647176", "0.56273735", "0.55972546", "0.55384547", "0.55326164", "0.5529749", "0.55208814", "0.5507725", "0.5490971", "0.5472311", "0.5463065", "0.5444696", "0.54341006", "0.5427605", "0.5409767", "0.5388508", "0.5379545", "0.5367091", "0.536664", "0.5357246", "0.5351736", "0.5350999", "0.5349468", "0.53494436", "0.5348406", "0.5317903", "0.5317479", "0.5312347", "0.5311491", "0.52939975", "0.5292148", "0.5248555", "0.5238981", "0.5222599", "0.5211682", "0.52029943", "0.5195971", "0.5195235", "0.5176642", "0.5174206", "0.51592547", "0.51553684", "0.5138766", "0.5127352", "0.51031464", "0.5096917", "0.5072083", "0.5071216", "0.5066354", "0.5061127", "0.50565165", "0.50557315", "0.50510263", "0.5043591", "0.5040495", "0.5034349", "0.5034048", "0.50317746", "0.5029753", "0.5028067", "0.5025936", "0.50159585", "0.5009233", "0.50074106", "0.5003677", "0.5003412", "0.5002435", "0.49957356", "0.49924412", "0.49908087", "0.4988891", "0.49870571", "0.49852577", "0.49829394", "0.49811587", "0.49799696", "0.49747297", "0.49702322", "0.49616966", "0.4956588", "0.49507853", "0.49465668", "0.49417555", "0.49412245", "0.49294895", "0.49275807", "0.49260476" ]
0.5151529
55
Test if the add operation returns the correct result for a test case
def test_add_int(self): self.assertEqual(operations.add(3,4), 7)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_returns_correct_result(self):\n result = self.calc.add(2, 2)\n self.assertEqual(4, result)", "def test_add(self):\n self.assertEqual(add(1, 1), 2, \"Wrong answer\")\n self.assertEqual(add(10, 1), 11, \"Wrong answer\")\n self.assertEqual(add(15, 15), 30, \"Wrong answer\")", "def test_add(self):\n print('test_add')\n \n self.assertEqual(120, add(100, 20))\n self.assertNotEqual(3, add(10, 10))", "def test_add(self):\n self.assertEqual(3, add(1, 2))\n self.assertNotEqual(3, add(2, 2))", "def test_add(self):\r\n operation = Operation(3, 4)\r\n result = operation.add()\r\n self.assertEqual(result, 7)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_add1(self):\n self.assertEqual(15, add(10 , 5), \"should be 15\")", "def test_add(self):\n self.assertEqual(3, foo.add(1, 2))\n self.assertNotEqual(3, foo.add(2, 2))", "def test_add():\n\n assert add(1, 1) == 2\n assert add(1, 2) == add(2, 1) == 3", "def test_add_numbers(self):\n a, b = 5, 6\n expected = a + b\n # check for equality, real vs expected\n self.assertEqual(add(a, b), expected)", "def test_add2(self):\n self.assertEqual(5, add(10 , -5), \"should be 5\")", "def test_add_integer(self):\n assert cr.add(3, 2) == 3 + 2", "def test_add_two_numbers(self):\n self.assertEqual(add(5, 9), 14)", "def test_and_numbers(self):\n self.assertEqual(add(3,8), 11)", "def test_add(self):\n self.assertEqual(work_file.add(10, 5), 15)\n self.assertEqual(work_file.add(-1, 1), 0)\n self.assertEqual(work_file.add(-1, -1), -2)", "def test_add4(self):\n self.assertEqual(-15, add(-10 , -5), \"should be -15\")", "def test_addition():\n assert calculator.add(7, 3) == 10\n assert calculator.add(7.0, 3.0) == 10.0\n assert calculator.add(7, -3) == 4\n assert calculator.add(7.0, -3.0) == 4.0", "def test_add3(self):\n self.assertEqual(-5, add(-10 , 5), \"should be -5\")", "def test_add_integers(self):\n print(\"---running test_add_integers\")\n result = some_math.add(1, 2)\n assert result == 3", "def test_our_add(self):\n\n # arrange\n x = 2\n y = 3\n expected_result = 5\n\n # act; assert\n self.assertEqual(self.our_module.add(x, y), expected_result)", "def test_two_plus_two():\n assert add.add(2, 2) == 4", "def test_add_zero_arg(self):\n try:\n self.assertEqual(add(0, 15), 15)\n except Exception as error:\n print(error)", "def test_add_numbers():\n assert add(3, 8) == 11", "def test_add_numbers(self):\n self.assertEqual(addNums(3, 8), 11)", "def test_list_int(self):\n result = add(2, 4)\n self.assertEqual(result, 6)", "def test_basic_addition(self):\n self.failUnlessEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.failUnlessEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.failUnlessEqual(1 + 1, 2)", "def test_add(self):\n\n a = random.randint(100, 10000)\n b = random.randint(100, 10000)\n\n path = \"/add/{}/{}\".format(a, b)\n\n response = self.get_response(path)\n self.assertEqual(200, response.getcode())\n\n self.assertIn(str(a + b).encode(), response.read())", "def test_valid_addition(self):\n\n test_name = sys._getframe().f_code.co_name\n\n log.info(\"###### TEST EXECUTION STARTED :: \" + test_name + \" ######\")\n\n num1 = data_reader.get_data(test_name, \"Number_A\")\n num2 = data_reader.get_data(test_name, \"Number_B\")\n expected_text = data_reader.get_data(test_name, \"Expected\")\n\n with allure.step(\"Verify valid addition functionality\"):\n result = self.main_page.verify_addition_functionality(num1, num2, expected=expected_text)\n self.exe_status.mark_final(test_step=test_name, result=result)", "def test_add(self):\n\n for i in range(1, 200 + 1):\n\n for j in range(1, 200 + 1):\n\n for k in range(1, 200 + 1):\n\n value = i + j + k\n assert value == add(i, j, k)", "def test_calculate_addition(self):\n result = self.calcuate.calcuate('1+4')\n expected_result = \"5\"\n self.assertEqual(expected_result, result)", "def test_add_with_int_arg(self):\n\n a = Vec3(2, 3, 4)\n b = 5\n\n result = a + b\n\n expected_result = Vec3(7, 8, 9)\n\n self.assertEqual(result, expected_result)", "def test_basic_addition(self):\r\n self.failUnlessEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.failUnlessEqual(1 + 1, 2)", "def test_add_all_args_greater_zero(self):\n try:\n self.assertEqual(add(17, 23), 40)\n except Exception as error:\n print(error)", "def test_add(x, y, expected):\n\n assert add(x, y) == pytest.approx(add(y, x)) == pytest.approx(expected)", "def test_getSum_twoNumbers(self):\r\n self.assertEqual(17, Arith().add(8, 9))", "def test_add():\n l = [1, 2, 3, 4]\n assert s7.add(*l) == sum(l)\n assert s7.add(100, 200) == 300\n assert s7.add(1.0, 2.0, 100.0) == 103.0", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_addition(l1, l2):\n result = addTwoNumbers(l1, l2)\n assert result.val == '5'\n assert result.next.val == '8'\n assert result.next.next.val == '0'\n assert result.next.next.next.val == '1'", "def test_add_float(self):\n self.assertAlmostEqual(cr.add(2.21, 4.7), 2.21 + 4.7, places=2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_single_value(self, test_input, expected, sc):\n assert sc.add(test_input) == expected", "def test_addition(self):\n\n a1 = points.Point(3, -2, 5)\n a2 = vectors.Vector(-2, 3, 1)\n\n a3 = a1 + a2\n\n self.assertEqual(a3,\n tuples.Tuple([\"x\", \"y\", \"z\", \"w\"], 1, 1, 6, 1))\n self.assertEqual(a3, points.Point(1, 1, 6))", "def test_if_add_expressions_returns_correct_values(self):\n self.assertDictEqual(add_expressions(1, 2, 8)(2, 4), {1: 1, 2: 4, 8: 8, 4: 4})\n self.assertDictEqual(add_expressions(1, 1, 2)(2, 4), {1: 2, 2: 4, 4: 4})\n self.assertDictEqual(add_expressions(1, 1, 2)(0, 4), {1: 2, 2: 2, 0: 0, 4: 4})", "def test_add_all_args_less_zero(self):\n try:\n self.assertEqual(add(-7, -11), -18)\n except Exception as error:\n print(error)", "def test_calculate_addition_of_four_elements(self):\n result = self.calcuate.calcuate('15+4+10+3')\n expected_result = \"32\"\n self.assertEqual(expected_result, result)", "def test_add(self):\n a = Vector(1, 2)\n b = Vector(3, 4)\n c = a + b\n assert c.x == 4\n assert c.y == 6", "def test_invalid_addition(self):\n\n test_name = sys._getframe().f_code.co_name\n\n log.info(\"###### TEST EXECUTION STARTED :: \" + test_name + \" ######\")\n\n num1 = data_reader.get_data(test_name, \"Number_A\")\n num2 = data_reader.get_data(test_name, \"Number_B\")\n expected_text = data_reader.get_data(test_name, \"Expected\")\n\n with allure.step(\"Verify invalid addition functionality\"):\n result = self.main_page.verify_addition_functionality(num1, num2, expected=expected_text)\n self.exe_status.mark_final(test_step=test_name, result=result)", "def test_add():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n new_value = value + 1\n num_a.value += 1\n assert num_a.value == new_value", "def test_add():\n z = Complex(1, -2)\n w = Complex(1, 1)\n assert (z + w) == Complex(2, -1)\n assert (z + (1+1j)) == Complex(2, -1)\n assert (z + 2) == Complex(3, -2)\n assert (z + 2.0) == Complex(3, -2)", "def test_add_strings(self):\n print(\"---running test_add_strings\")\n result = some_math.add('abc', 'def')\n assert result == 'abcdef'", "def test_add_with_vec_argument(self):\n\n a = Vec3(2, 3, 4)\n b = Vec3(1, 2, 3)\n\n result = a + b\n\n expected_result = Vec3(3, 5, 7)\n\n self.assertEqual(result, expected_result)", "def test_evaluate_add_expression(self):\n value = self.evaluate_common(\"2M add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Decimal, \"Expected Decimal\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n value = self.evaluate_common(\"2D add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2F add 2D\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2 add 2L\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int64, \"Expected Int64\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n try:\n value = self.evaluate_common(\"2 add '2'\")\n self.fail(\"String promotion to int\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"2 add null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int32, \"Expected Int32\")\n self.assertTrue(value.value is None, \"Expected None\")", "def test_adding(self):\n adder = Adder()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n self.assertEqual(i + j, adder.calc(j, i))", "def test_add(self):\n query_string = [('x', 56),\n ('y', 56)]\n response = self.client.open('/addition-api/1.0.0/add',\n method='GET',\n query_string=query_string)\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_add_result():\n d1 = Driver(\"Andrew\", \"Audi A6\")\n assert d1.get_results() == {}\n d1.add_result(1, 18)\n assert 18 in d1.get_results().values()\n assert 1 in d1.get_results().keys()\n assert d1.get_results() == {1: 18}\n d1.add_result(3, 10)\n assert 18 and 10 in d1.get_results().values()\n assert 1 and 3 in d1.get_results().keys()\n assert d1.get_results() == {1: 18, 3: 10}\n assert random.random() not in d1.get_results().items()", "def test_add_one_more_test(self):\n self.assertTrue(True)", "def test_add(self):\n result = CalculateDueDate.add(self.test_time, self.test_turn_time)\n self.assertEqual(dt.datetime(2021, 6, 18, 15, 0, 0), result)", "def test_add_floats(self):\n print(\"---running test_add_floats\")\n result = some_math.add(10.5, 2)\n assert result == 12.5", "def test_sphere_add():\n sphere_1 = Sphere(2) \n sphere_2 = Sphere(2) \n assert (sphere_1 + sphere_2) == Sphere(4)", "def add(a,b):\n\treturn a+b", "def test_iadd_with_int_argument(self):\n\n a = Vec3(2, 3, 4)\n b = 1\n\n a += b\n\n expected_result = Vec3(3, 4, 5)\n\n self.assertEqual(a, expected_result)", "def add(a,b):\r\n result=a+b\r\n return result", "def test_point_positive_add(self):\n p1 = Point(x=3, y=5)\n p2 = Point(5, 3)\n p = p1 + p2\n self.assertEqual(str(p), '(8.0, 8.0)',\n 'Test of Point(x=3, y=5) + Point(5, 3) failed. Returned value != (8.0, 8.0)')", "def test_calculate_adding_and_subtraction(self):\n result = self.calcuate.calcuate('8+20-5')\n expected_result = \"23\"\n self.assertEqual(expected_result, result)", "def test_add(self):\n solution = pk.Solution()\n model = pk.Model('iv')\n protocol = pk.Protocol()\n with self.assertRaises(TypeError):\n solution.add()\n with self.assertRaises(TypeError):\n solution.add('model')\n with self.assertRaises(TypeError):\n solution.add('model', 'protocol')\n with self.assertRaises(TypeError):\n solution.add(model, 'protocol')\n with self.assertRaises(TypeError):\n solution.add('model', protocol)\n solution.add(model, protocol)\n self.assertEqual(solution.list_compartments, [(model, protocol)])\n model2 = pk.Model('sc')\n protocol2 = pk.Protocol(initial_dose=1.1, time_span=1.2)\n solution.add(model2, protocol2)\n self.assertEqual(solution.list_compartments,\n [(model, protocol), (model2, protocol2)])" ]
[ "0.87641716", "0.8728581", "0.85901284", "0.83460325", "0.8322828", "0.82488525", "0.82488525", "0.82488525", "0.8183821", "0.8175811", "0.80814475", "0.8047177", "0.8006275", "0.79732543", "0.79165095", "0.7913617", "0.7900404", "0.77927405", "0.7747016", "0.774245", "0.7690612", "0.7683828", "0.7633744", "0.75575197", "0.7547687", "0.7544472", "0.75268966", "0.75142235", "0.75142235", "0.75142235", "0.7499813", "0.74650764", "0.7456521", "0.7436163", "0.74224895", "0.7417655", "0.7417655", "0.7371547", "0.7356089", "0.7343939", "0.7337569", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.73300606", "0.725352", "0.72466505", "0.7229086", "0.7229086", "0.7229086", "0.7229086", "0.7229086", "0.7229086", "0.7229086", "0.7229086", "0.7229086", "0.7229086", "0.7229086", "0.72137016", "0.71968174", "0.71895576", "0.71663195", "0.7153156", "0.7145352", "0.7096335", "0.7087522", "0.7068542", "0.706376", "0.70556504", "0.70314175", "0.70289594", "0.7004928", "0.69350016", "0.6905905", "0.6905231", "0.6901216", "0.6892434", "0.6891861", "0.6891349", "0.68708414", "0.68690735", "0.6855616", "0.6851091" ]
0.8132406
10
Test if the devide operation returns the correct result for a test case
def test_devide_int(self): self.assertEqual(operations.devide(8,4), 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_four_divided_by_two():\n assert divide(4, 2) == 2", "def test_call_decompose(self):\n dec = TwoQubitDecomposeUpToDiagonal()\n u4 = scipy.stats.unitary_group.rvs(4, random_state=47)\n dmat, circ2cx = dec(u4)\n dec_diag = dmat @ Operator(circ2cx).data\n self.assertTrue(Operator(u4) == Operator(dec_diag))", "def test_divide(self):\n self.assertEqual(2, divide(6, 3))\n self.assertEqual(2.5, divide(5, 2))", "def test_getDiff_twoNumbers(self):\r\n self.assertEqual(4, Arith().sub(11, 7))", "def test_half_case(self):\n steps = save_divide(np.ones(2), 2 * np.ones(2))\n np.testing.assert_equal(steps, 0.5 * np.ones(2))", "def test_sub(x, y, expected):\n\n assert sub(x, y) == -sub(y, x) == expected", "def test_div():\n assert_equal(Vector(4.0, 1.0) / 2.0, Vector(2.0, 0.5))", "def test_sub_numbers(self):\n a, b = 5, 10\n expected = b - a\n self.assertEqual(subtract(b, a), expected)", "def test_div():\n c=[1,2]\n def myfunc(x,y):\n f1=1/x/y/2\n return f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n\n expectAns={'diff': [-0.25,-1/8], 'value': 0.25}\n\n assert res==expectAns", "def test_subtract_numbers(self):\n self.assertEqual(subtract(8, 4), 4)", "def test_scalar_division(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n\n a2 = a1 / 2\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))", "def test_div():\n\n assert div(9, 2) == pytest.approx(4.5)", "def test_div():\n\n assert div(9, 2) == pytest.approx(4.5)", "def test_dividing(self):\n divider = Divider()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n if j != 0:\n self.assertEqual(i/j, divider.calc(j, i))", "def test_calculate_subtraction_of_four_elements(self):\n result = self.calcuate.calcuate('24-10-5-3')\n expected_result = \"6\"\n self.assertEqual(expected_result, result)", "def test_det(self, a, dete):\n detc = det(a)\n assert np.isclose(detc, dete)", "def test_divide(self):\n print \"divide\"\n self.assertEqual(2, divide(6, 3))\n self.assertEqual(2.5, divide(5, 2))", "def test_divide(self):\n self.assertEqual(2, foo.divide(6, 3))\n self.assertEqual(2.5, foo.divide(5, 2))", "def test_sub(self):\n newvalues = Fraction(1,2)-Fraction(1,2)\n fraction1 = Fraction(newvalues[0],newvalues[1])\n self.assertEqual(str(fraction1),\"0/4\")", "def test_subtract(self):\n self.assertEqual(work_file.subtract(10, 5), 5)\n self.assertEqual(work_file.subtract(-1, 1), -2)\n self.assertEqual(work_file.subtract(-1, -1), 0)", "def test_inverse_transform(self):", "def test_subtraction():\n assert calculator.subtract(7, 3) == 4\n assert calculator.subtract(7.0, 3.0) == 4.0\n assert calculator.subtract(7, -3) == 10\n assert calculator.subtract(7.0, -3.0) == 10.0", "def test_subtract_numbers(self):\n self.assertEqual(subtract(5, 11), 6)", "def test_subtract_numbers(self):\n self.assertEqual(subtract(5, 11), 6)", "def test_list_int(self):\n result = div(2, 4)\n self.assertEqual(result, 0.5)", "def test_subtract_numbers(self):\n self.assertEqual(sub(9, 3),6)", "def se(actual,expected):\n return np.power(np.subtract(actual,expected),2)", "def test_notequal(self):\n self.assertTrue(Fraction(144,2)!=Fraction(8,4))", "def test_mc_variance_swap(self):\n\n vols = []\n dates = []\n\n interm_date = self.today + int(0.1*365+0.5)\n exercise = EuropeanExercise(self.ex_date)\n\n dates.append(interm_date)\n dates.append(self.ex_date)\n\n vols.append(0.1)\n vols.append(self.values['v'])\n\n # Exercising code using BlackVarianceCurve because BlackVarianceSurface\n # is unreliable. Result should be v*v for arbitrary t1 and v1\n # (as long as 0<=t1<t and 0<=v1<v)\n\n vol_ts = BlackVarianceCurve(self.today, dates, vols, self.dc, True)\n\n stoch_process = BlackScholesMertonProcess(self.spot, self.q_ts,\n self.r_ts, vol_ts)\n\n engine = MCVarianceSwapEngine(stoch_process,\n time_steps_per_year=250,\n required_samples=1023,\n seed=42,\n )\n\n\n variance_swap = VarianceSwap(self.values['type'],\n self.values['strike'],\n self.values['nominal'],\n self.today,\n self.ex_date,\n )\n\n variance_swap.set_pricing_engine(engine)\n\n calculated = variance_swap.variance\n expected = 0.04\n tol = 3.0e-4\n error = abs(calculated-expected)\n self.assertTrue(error<tol)", "def test_frac_diff(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff(e), 0)\n self.assertEqual(s1.frac_diff(s2), 0.75)\n self.assertEqual(s1.frac_diff(s3), 1)\n self.assertEqual(s1.frac_diff(s4), 0) # note truncation", "def test_symmetric_difference(self, client):\n\n expected = {\n 'a': [0,2,4,6,8],\n 'b': [4,6,8,10,12,14,16],\n 'result': [0, 2, 10, 12, 14, 16]\n }\n\n res = client.post('/api/v1/symmetric_difference', json={'a': expected['a'], 'b': expected['b'] })\n assert res.status_code == 200\n assert res.json['data'] == expected['result']\n assert res.json['status'] == 2000", "def test_ddiff_v2(self):\n print \"\\n\"\n for d in ddiff_v2(a, b): print d\n self.assertEqual(d, \"+FUN\")", "def test_doppler(self):\n fm, am, iflaw = misc.doppler(512, 200.0, 65, 10, 50)\n self.assert_is_monotonic_decreasing(iflaw)", "def test_sub_with_vec_argument(self):\n\n a = Vec3(2, 4, 6)\n b = Vec3(1, 2, 3)\n\n result = a - b\n\n expected_result = Vec3(1, 2, 3)\n\n self.assertEqual(result, expected_result)", "def test_three_divided_by_nothing():\n assert divide(3) == 1", "def test_exact_two_qubit_cnot_decompose_random(self, seed):\n unitary = random_unitary(4, seed=seed)\n self.check_exact_decomposition(unitary.data, two_qubit_cnot_decompose)", "def test_is_unital_swap_operator_choi_true():\n np.testing.assert_equal(is_unital(swap_operator(3)), True)", "def test_extrude_both(self):\n\n test_volume_extrude_both = self.test_shape.volume()\n self.test_shape.extrude_both = False\n assert self.test_shape.volume() == pytest.approx(test_volume_extrude_both)", "def test_den(self):\n np_den, torch_den = self.get_denominator()\n np.testing.assert_array_almost_equal(np_den, torch_den.numpy())", "def test__vector_subtraction__given_two_vectors__return_correct_vector():\n assert Vector((0, 1, 2)) - Vector((3, 4, 5)) == Vector((-3, -3, -3))", "def test_dose_to_volume_fraction_mid(self):\n dvh = DVH(self.test_doses, self.test_cum_vols)\n self.assertAlmostEqual(dvh.dose_to_volume_fraction(0.9999999999), self.min_dose, places=4)\n self.assertGreater(dvh.dose_to_volume_fraction(0.9999999999), self.min_dose)", "def test_odd(self):", "def test_ddiff_v1(self):\n print \"\\n\"\n for d in ddiff_v1(a, b): print d\n self.assertEqual(d, \"+FUN\")", "def vd(v2,v1):\n return v2-v1", "def reverse_difference():", "def test_exact_two_qubit_cnot_decompose_paulis(self):\n unitary = Operator.from_label(\"XZ\")\n self.check_exact_decomposition(unitary.data, two_qubit_cnot_decompose)", "def test_divide_success(self):\n with self.assertNoLogs():\n divide_by(10, 2)", "def test__point_subtraction__given_two_points__return_correct_vector():\n assert Point((0, 1, 2)) - Point((3, 4, 5)) == Vector((-3, -3, -3))", "def division_algo(a, b):\n return a / b, a % b", "def test_cliford(generator, paulixops, result):\n u = clifford(generator, paulixops)\n assert u.compare(result)", "def testFloorSub(self):\n (w,h) = self.im32_1.getSize()\n \n self.im32_1.fill(0x80)\n for i in range(256):\n self.im32_3.fill(i)\n floorSub(self.im32_1, self.im32_3, self.im32_2)\n vol = computeVolume(self.im32_2)//(w*h)\n value = max(0x80-i, 0)\n self.assertTrue(vol==value, \"%d: %d %d\" % (i,vol, value))", "def test_rtruediv():\n # Test for reverse division with scalar Rnode object and float value\n x = Rnode(5.0)\n z = 1 / x\n try:\n assert z.value == 1 / x.value\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_get_game_diff(self):\n pass", "def test_decompose_two_qubit_product_gate_not_product(self):\n klkr = Ud(1.0e-6, 0, 0)\n with self.assertRaises(QiskitError) as exc:\n decompose_two_qubit_product_gate(klkr)\n self.assertIn(\"decomposition failed\", exc.exception.message)", "def test_rtruediv():\n truediv = _MathExpression() / 2\n rtruediv = 9 / _MathExpression()\n assert truediv(9) == rtruediv(2)", "def test_even(self):", "def test_paired_difference_analyses(self):\r\n actual = paired_difference_analyses(\r\n self.personal_ids_to_state_values1,\r\n ['firmicutes-abundance',\r\n 'bacteroidetes-abundance'],\r\n ['Pre', 'Post'],\r\n output_dir=self.test_out,\r\n ymin=0.0,\r\n ymax=1.0)\r\n self.assertTrue(exists(join(self.test_out,\r\n 'paired_difference_comparisons.txt')))\r\n self.assertTrue(\r\n exists(join(self.test_out, 'firmicutes-abundance.pdf')))\r\n self.assertTrue(\r\n exists(join(self.test_out, 'bacteroidetes-abundance.pdf')))\r\n # three output paths returned\r\n self.assertEqual(len(actual[0]), 5)\r\n # expected t values returned, they should be less than (firmicutes) or greater (bacteroidetes) than 2 \r\n self.assertLess(abs(actual[1]['firmicutes-abundance'][4]), 2)\r\n self.assertLess(2, abs(actual[1]['bacteroidetes-abundance'][4]))", "def ilerp(a, b, t):\n return (t - a) / (b - a)", "def test_cases(self):\n case_one = math_helpers.num_divisors(1)\n self.assertEqual(case_one, 1)\n\n case_two = math_helpers.num_divisors(10)\n self.assertEqual(case_two, 4)\n\n case_three = math_helpers.num_divisors(6930)\n self.assertEqual(case_three, 48)", "def test_sdp_output():\n state_output_value = fidelity_of_separability(sep_rho, [2, 2], 2)\n assert np.isclose(1, state_output_value)", "def test_negatives(self):\n argument = [-1,-2,-3]\n expected = [0,-2,-4]\n double_preceding(argument)\n self.assertEqual(expected, argument, \"The list contains one 3 item.\")", "def test_part_2(arguments, distance, output):\n assert part_2.solution(arguments, distance) == output", "def remainder(left_object, right_object):\n result = left_object % right_object\n if left_object < 0 and result > 0 or left_object > 0 and result < 0:\n result = result - right_object\n return result", "def test_subtract_different_sizes():\n Vector(1.0) - Vector(2.0, 3.0)", "def test_sub_with_int_arg(self):\n\n a = Vec3(7, 8, 9)\n b = 5\n\n result = a - b\n\n expected_result = Vec3(2, 3, 4)\n\n self.assertEqual(result, expected_result)", "def test_negativenumbers(self):\n result = ps.pairs([-4, 4, 0, -2, 0], 0)\n self.assertEqual(result[0, 0], -4)\n self.assertEqual(result[0, 1], 4)\n self.assertEqual(result[1, 0], 0)\n self.assertEqual(result[1, 1], 0)", "def test_result(self):\n result = compute()\n self.assertEqual(result, '4782')\n print(\"eulpy25Test passed\")", "def devideby2(num):\n return((num / 2) % 4294967296)", "def test_double(self):\n arr, result = [0.1, 0.02, 0.0003], []\n fizz_buzz(arr, result)\n self.assertEqual(result, [0.1, 0.02, 0.0003])", "def do(self, a, b):\n raise SkipTest\n u, s, vt = gula.svd(a, 0)\n assert_almost_equal(a, dot(multiply(u, s), vt))", "def test_delta_val4(self):\n d = Delta(\"+-25%\")\n self.assertEqual(d.cmp(0, 1), False)\n self.assertEqual(d.cmp(8, 4), True)\n self.assertEqual(d.cmp(8, 6), False)", "def test_right(self):\n x = np.array([-100, -2, -1, 0, 1, 1.1])\n self.assertEqual(npinterval.half_sample_mode(x), +1.05)", "def test_fraction_rich_comparisson(self):\n fract1 = source.Fraction(5, 2) # 2.5\n fract2 = source.Fraction(3, 2) # 1.5\n fract3 = source.Fraction(25, 10) # 2.5\n\n self.assertFalse(fract1 != fract3) # 2.5 != 2.5\n self.assertTrue(fract1 == fract3) # 2.5 == 2.5\n self.assertTrue(fract2 < fract3) # 1.5 < 2.5\n\n # Let's try the other way\n self.assertTrue(fract1 >= fract2) # 2.5 >= 1.5\n self.assertFalse(fract2 >= fract3) # 1.5 >= 2.5\n\n # Let's try with other types\n self.assertTrue(fract1 >= 2) # 2.5 >= 2\n self.assertTrue(fract2 == 1.5) # 1.5 == 1.5\n\n # Let's try the other way with other types\n self.assertTrue(2 <= fract1) # 2 <= 2.5\n self.assertTrue(1.5 == fract2) # 1.5 == 1.5\n\n self.assertTrue(10 > fract1) # 10 > 2.5\n self.assertFalse(10 < fract1) # 10 < 2.5\n self.assertTrue(fract1 < 10) # 2.5 < 10\n self.assertFalse(fract1 > 10) # 2.5 > 10", "def test_bothV_traversals(self):\r\n results = self.blake.bothV()\r\n assert len(results) == 2\r\n assert self.beekeeping in results", "def test_div_complex(doctest):", "def test_subtracting(self):\n subtracter = Subtracter()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n self.assertEqual(i-j, subtracter.calc(j, i))", "def test_case34(self):\n \n self.graph1.swapStudents(\"student1\",\"supervisor1\",\"student2\",\"supervisor1\")\n\n result1 = self.graph1.getSupervisors(\"student1\")\n result2 = self.graph1.getSupervisors(\"student2\")\n\n expected1 = ['supervisor1']\n expected2 = ['supervisor1']\n\n self.assertEqual((result1,result2),(expected1,expected2))", "def test_evenly_divisable_row_2(self, day2part2_data):\n result = day2.get_and_divide_evenly_divisable(day2part2_data[1])\n assert result == 3", "def testsub_X_Y ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tsub_fracX_fracY = fracX - fracY\r\n\t\t\tself.assertEqual ( sub_fracX_fracY.toString ().split ()[0], dictSub ['X-Y'] )", "def split_dataset(x_test, y_test, dev_ratio):\n test_size = len(x_test)\n print(test_size)\n dev_size = (int)(test_size * dev_ratio)\n print(dev_size)\n x_dev = x_test[:dev_size]\n x_test = x_test[dev_size:]\n y_dev = y_test[:dev_size]\n y_test = y_test[dev_size:]\n return x_test, y_test", "def test_evenly_divisable_row_1(self, day2part2_data):\n result = day2.get_and_divide_evenly_divisable(day2part2_data[0])\n assert result == 4", "def test_suite():\n test(calc_det([[2, 1],[3, 4]]), 5)", "def test_left(self):\n x = np.array([-1.1, -1, 0, 1, 2, 100])\n self.assertEqual(npinterval.half_sample_mode(x), -1.05)", "def test_div(self):\n funcs = ['div', 'div_']\n for func in funcs:\n for tensor_type in [lambda x: x, SharedTensor]:\n tensor1 = get_random_test_tensor()\n tensor2 = get_random_test_tensor(max_value=0.5) + 1.5\n encrypted = SharedTensor(tensor1)\n encrypted2 = tensor_type(tensor2)\n reference = getattr(tensor1, func)(tensor2)\n encrypted_out = getattr(encrypted, func)(encrypted2)\n msg = '%s %s failed' % (\n 'private' if tensor_type is SharedTensor else 'public',\n func)\n self._check(encrypted_out, reference, msg)\n if '_' in func:\n # Check in-place op worked\n self._check(encrypted, reference, msg)\n else:\n # Check original is not modified\n self._check(encrypted, tensor1, msg)", "def test_svd_sharpness(self):\n \t\t\t\n\t\tesd_before = self.watcher.get_ESD(layer=self.fc2_layer) \n\t\t\n\t\tself.watcher.SVDSharpness(layers=[self.fc2_layer])\n\t\tesd_after = self.watcher.get_ESD(layer=self.fc2_layer) \n\t\t\n\t\tprint(\"max esd before {}\".format(np.max(esd_before)))\n\t\tprint(\"max esd after {}\".format(np.max(esd_after)))\n\n\t\tself.assertGreater(np.max(esd_before)-2.0,np.max(esd_after))", "def _bessel_iv_ratio_fwd(v, z):\n output = _bessel_iv_ratio_naive(v, z)\n return output, (v, z)", "def test_minus(self):\n self.assertEqual(1, minus(3, 2))", "def test_frac_diffNonGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_diff_non_gaps(y), z)\n\n test(s1, s2, 0.75)\n test(s1, s3, 1)\n test(s2, s3, 0.25)\n test(s1, s4, 0.5)\n test(s4, s5, 1)\n test(s4, s6, 0.4)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 1 / 3.0)\n test(e, s4, 0)", "def testPartVersusPart(dataSet,testSet,cmd = ''):\n print(\"Testing part versus part method...\")\n models = calcModels(dataSet,cmd)\n result = sequence(len(testSet[0]))\n class_result = mapv(lambda x: testResult(x[2],testSet),models)\n\n for i in range(0,len(result)):\n result[i] = classify(models,mapv(lambda x: x[i],class_result))\n\n hit,total,acc = accuracy(result,testSet[1])\n print(\"acc: \",acc,\"%\")\n print(\"hit/total: \",hit,\"/\",total)", "def assurance(a, b):\n return a - b", "def test_single_quadrant(self):", "def test_calculate_cipher_step():\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, \"HELLO THERE!\")\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number", "def test_truediv():\n truediv = _MathExpression() / 2\n assert math.isclose(truediv(9), 4.5) # type: ignore", "def sub(x, y) :\r\n z = y - x\r\n # The checker automatically proves z < 0 and z + x == y.\r\n return y", "def test_denominator_float(self):\n steps = save_divide(np.ones(2), 2)\n np.testing.assert_equal(steps, 0.5 * np.ones(2))", "def test_calculate_subtraction(self):\n result = self.calcuate.calcuate('10-8')\n expected_result = \"2\"\n self.assertEqual(expected_result, result)", "def test_heaviside(self):\n\n self.assertEqual(mlu.heaviside(-1), 0)\n self.assertEqual(mlu.heaviside(0), 1)\n self.assertEqual(mlu.heaviside(1), 1)\n\n vector_in = np.asarray((-2, -1, 0, 1, 2))\n vector_out = np.asarray((0, 0, 1, 1, 1))\n np.testing.assert_equal(mlu.heaviside(vector_in), vector_out)", "def expected(A, B):\n return 1 / (1 + 10 ** ((B - A) / 150))", "def test_delta_val2(self):\n d = Delta(\"+2.5-1.5\")\n self.assertEqual(d.cmp(0, 1), False)\n self.assertEqual(d.cmp(1, 3), False)\n self.assertEqual(d.cmp(3, 1), True)", "def test_sub_with_float_arg(self):\n\n a = Vec3(7, 8, 9)\n b = 5.0\n\n result = a - b\n\n expected_result = Vec3(2, 3, 4)\n\n self.assertEqual(result, expected_result)" ]
[ "0.6033728", "0.60241795", "0.5930758", "0.5923345", "0.59231955", "0.5863153", "0.5844059", "0.58171606", "0.5786536", "0.575913", "0.5739477", "0.5738475", "0.5738475", "0.57308537", "0.5692235", "0.56907034", "0.5671279", "0.56524765", "0.56412613", "0.5602296", "0.55714065", "0.5551787", "0.54995507", "0.54995507", "0.5488971", "0.5484036", "0.5474584", "0.54701054", "0.5469794", "0.5452384", "0.5449306", "0.5446016", "0.54333514", "0.5423653", "0.5422097", "0.54142535", "0.5413782", "0.5411189", "0.5410826", "0.53923", "0.53920424", "0.53835005", "0.53742695", "0.53599066", "0.5355408", "0.5341047", "0.53409946", "0.53348714", "0.53294945", "0.5323174", "0.53221774", "0.53171587", "0.5307189", "0.53070635", "0.5306549", "0.53030074", "0.529874", "0.5298492", "0.529803", "0.529144", "0.52838975", "0.527314", "0.5269439", "0.5269179", "0.5266683", "0.5253866", "0.52431214", "0.52365726", "0.52293557", "0.52274", "0.52252096", "0.52236825", "0.5220679", "0.5214076", "0.5212099", "0.5209196", "0.5202676", "0.5202231", "0.52021784", "0.51969975", "0.51938695", "0.5192198", "0.5185597", "0.51784307", "0.5175437", "0.51715964", "0.51708645", "0.51696616", "0.5168831", "0.5167701", "0.5163794", "0.51627475", "0.51540685", "0.5153072", "0.5149186", "0.5149167", "0.51392084", "0.51380575", "0.513649", "0.5135268" ]
0.56738025
16
Test if the equation 1 + 2 is parsed and calculated correctly
def test_parse_add(self): self.assertEqual(parse_input.parse(["1", "+", "2"]), 3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def autosolve(equation):\n\n try:\n # Try to set a variable to an integer\n num1 = int(equation.split(\" \")[0])\n\n except ValueError:\n # Try to set a variable to a decimal\n num1 = float(equation.split(\" \")[0])\n\n try:\n # Try to set a variable to an integer\n num2 = int(equation.split(\" \")[2])\n\n except ValueError:\n # Try to set a variable to a decimal\n num2 = float(equation.split(\" \")[2])\n\n # If the lowercase version of the operator is '+', 'plus' or 'add'\n if equation.split(\" \")[1].lower() in [\"+\", \"plus\", \"add\"]:\n\n # Return the answer\n return num1 + num2\n\n # If the lowercase version of the operator is '-', 'minus' or 'subtract'\n elif equation.split(\" \")[1].lower() in [\"-\", \"minus\", \"subtract\"]:\n\n # Return the answer\n return num1 - num2\n\n # If the lowercase version of the operator is '*', 'times', 'multiply'\n elif equation.split(\" \")[1].lower() in [\"*\", \"times\", \"multiply\"]:\n\n # Return the answer\n return num1 * num2\n\n # If the lowercase version of the operator is '/', 'divide' or 'quotient'\n elif equation.split(\" \")[1].lower() in [\"/\", \"divide\", \"quotient\"]:\n\n # Return the answer\n return num1 / num2\n\n # If the lowercase version of the operator is '%, 'remainder' or 'rem'\n elif equation.split(\" \")[1].lower() in [\"%\", \"remainder\", \"rem\"]:\n\n # Return the answer\n return num1 % num2\n\n # Raise a warning\n raise ValueError(\"Invalid operation provided.\")", "def is_equation(self): \n return False", "def is_equation(self):\n return True", "def is_equation(self):\n return True", "def test_calculate_three_operations_in_bracket(self):\n result = self.calcuate.calcuate('(2x2+1+7)x3-2')\n expected_result = \"34\"\n self.assertEqual(expected_result, result)", "def test_calculate_two_operations_in_bracket(self):\n result = self.calcuate.calcuate('(2-5+7)x3-2')\n expected_result = \"10\"\n self.assertEqual(expected_result, result)", "def is_equation(self):\n return False", "def test_complex_expression(self):\r\n\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"(2^2+1.0)/sqrt(5e0)*5-1\"),\r\n 10.180,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"1+1/(1+1/(1+1/(1+1)))\"),\r\n 1.6,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"10||sin(7+5)\"),\r\n -0.567, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"sin(e)\"),\r\n 0.41, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"k*T/q\"),\r\n 0.025, delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"e^(j*pi)\"),\r\n -1, delta=1e-5\r\n )", "def Calc():\n print('Please type a maths expression with 2 intergers or floats and an operator \"+\", \"-\", \"*\" or \"/\"')\n inp = (input())\n for char in inp:\n if char not in '1234567890.-+*/':\n print('Please restart the program and only type valid characters')\n return\n operators = [\"+\", \"-\", \"*\", \"/\"]\n buf = ''\n operand1 = 0.0\n operand2 = 0.0\n for char in inp:\n if char not in operators:\n buf += char\n else:\n operator = char\n operand1 = float(buf)\n buf = ''\n operand2 = float(buf)\n res = 0.0\n if operator == '+':\n res = su(operand1, operand2)\n elif operator == '-':\n res = sub(operand1, operand2)\n elif operator == '*':\n res = mu(operand1, operand2)\n elif operand2==0:\n return \"Can not divide by 0\"\n else:\n res = di(operand1, operand2)\n print(res)\n return res", "def evaluate1(expr):\n operators = '*/+-'\n operator_stack = []\n operand_stack = []\n\n def parse_operand(s, i):\n \"\"\"\n parse the location of the string until I find an\n operator\n parse \"12\" to 12\n \"12.12\" to 12.12\n returns a float\n \"\"\"\n value = ''\n while (s[i] not in operators):\n value += s[i]\n i += 1\n if s[i] == ')':\n break\n return float(value), i-1\n\n def do_operation(operand1, operand2, operator):\n if operator == '+':\n return operand1 + operand2 \n elif operator == '*':\n return operand1 * operand2\n elif operator == '/':\n return operand1 / operand2\n elif operator == '-':\n return operand1 - operand2\n\n i = 0\n s = expr\n length = len(s)\n numbers = '0123456789'\n while i < length:\n data = s[i]\n if data == '(':\n operand_stack.append(data)\n elif data in numbers:\n # parse the operand number and modifies the index i\n number, i = parse_operand(s, i)\n operand_stack.append(number)\n elif data in operators:\n operator_stack.append(data)\n elif data is ')':\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator = operator_stack.pop()\n operand_stack.pop() # remove (\n operand_stack.append(do_operation(operand1, operand2, operator))\n i += 1\n return operand_stack.pop()", "def test_calculate_adding_in_bracket(self):\n result = self.calcuate.calcuate('(2+1)')\n expected_result = \"3\"\n self.assertEqual(expected_result, result)", "def test_operator_sanity(self):\r\n var1 = 5.0\r\n var2 = 2.0\r\n operators = [('+', 7), ('-', 3), ('*', 10), ('/', 2.5), ('^', 25)]\r\n\r\n for (operator, answer) in operators:\r\n input_str = \"{0} {1} {2}\".format(var1, operator, var2)\r\n result = calc.evaluator({}, {}, input_str)\r\n fail_msg = \"Failed on operator '{0}': '{1}' was not {2}\".format(\r\n operator, input_str, answer\r\n )\r\n self.assertEqual(answer, result, msg=fail_msg)", "def calculate_expression(number1, number2, operator):\n\n if operator == '+':\n return number1 + number2\n elif operator == '-':\n return number1 - number2\n elif operator == '*':\n return number1 * number2", "def exeval(expression): \n if len(expression) <= 3: #Assuming no spaces (\" \") between each value given in the expression\n if expression[0] == \"+\":\n return float(expression[1]) + float(expression[2])\n elif expression[0] == \"-\":\n return float(expression[1]) - float(expression[2])\n else:\n if expression[0] == \"+\":\n return float(expression[1]) + exeval(expression[2:])\n elif expression[0] == \"-\":\n return float(expression[1]) - exeval(expression[2:])", "def test_expression(x, y, z):\n return x * y + y / z", "def calculate (self,phrase):\r\n\r\n\r\n def bracketed (phrase,bracketing='()'):\r\n\r\n \"\"\"Returns TRUE if <phrase> is encompassed by a left bracket and a right bracket\r\n at the same hierarchical level\"\"\"\r\n\r\n level = 0\r\n left_point = None\r\n right_point = None\r\n \r\n\r\n for count,char in enumerate(phrase):\r\n\r\n if char == bracketing[0]:\r\n if level == 0:\r\n left_point = count\r\n level+=1\r\n if char == bracketing[1]:\r\n level-=1\r\n if level == 0:\r\n right_point = count\r\n if not (left_point is None) and (not right_point is None) and left_point == 0 and right_point == len(phrase)-1:\r\n return True\r\n return False\r\n\r\n def quoted (phrase):\r\n\r\n level = 0\r\n foundchar = ''\r\n left_point = None\r\n right_point = None \r\n for count,char in enumerate(phrase):\r\n\r\n if char in ['\"',\"'\"] and level == 0:\r\n foundchar = char\r\n left_point = count\r\n level += 1\r\n elif level == 1 and char == foundchar:\r\n right_point = count\r\n level += 1\r\n if not (left_point is None) and (not right_point is None) and left_point == 0 and right_point == len(phrase)-1:\r\n return True\r\n return False \r\n \r\n \r\n\r\n def is_function(phrase):\r\n\r\n \"\"\"Tests to see if a phrase begins with a predefined function,\r\n in which case it returns information about the iarity of function\"\"\"\r\n \r\n \r\n for x in self.functions.keys():\r\n\r\n if len(x) < len(phrase) and phrase[0:len(x)] == x:\r\n if bracketed(phrase[len(x):]):\r\n if self.functions[x][1]-1 <= phrase.count(',') <= self.functions[x][2]-1:\r\n return x, self.functions[x][0], self.functions[x][2], phrase[len(x):]\r\n else:\r\n return False,False,False,False \r\n \r\n\r\n def all_simple (phrase):\r\n\r\n \"\"\"Tests if a phrase is a simple string representing an expression, rather than an operation\"\"\"\r\n\r\n\r\n for x in phrase:\r\n if (x not in self.operations and not (isinstance(x,(int,type(ListType()),float,bool) or (isinstance(x,str) and quoted(x)))) or self.current_register.contains(x)):\r\n return False\r\n return True\r\n \r\n def parse (phrase):\r\n\r\n \r\n COMPTERMS = ['==','>=','<=','!=','>','<',]\r\n\r\n\r\n def contains_comp (x):\r\n \"\"\"True is x contains any of the COMP Terms\"\"\"\r\n\r\n for comp in COMPTERMS:\r\n if comp in x:\r\n return True\r\n return False\r\n\r\n\r\n \r\n \"\"\"Parses and analzes the phrase\"\"\"\r\n\r\n \r\n if phrase in ['bTrue','bFalse','EmptyList']:\r\n return {'bTrue':True,\r\n 'bFalse':False,\r\n 'EmptyList':ListType()}[phrase]\r\n\r\n if isinstance(phrase,str):\r\n \r\n if quoted(phrase):\r\n return phrase\r\n else:\r\n try:\r\n return float(phrase)\r\n except:\r\n pass\r\n \r\n\r\n # If the phrase is a string\r\n phrase = phrase.strip()\r\n\r\n func_name, func, iarity, func_phrase = is_function(phrase)\r\n # tests is it is function; otherwise the values are false.\r\n \r\n \r\n\r\n if func_name:\r\n if iarity == 1:\r\n # If the function accepts one value\r\n return func(parse(func_phrase))\r\n if iarity == 2:\r\n # Two values \r\n func_phrase = func_phrase[1:-1]\r\n term1,term2 = func_phrase.split(',')[0],func_phrase.split(',')[1]\r\n return func(parse(term1),parse(term2))\r\n if iarity == 3:\r\n func_phrase = func_phrase[1:-1]\r\n term1,term2, term3 = func_phrase.split(',')[0],func_phrase.split(',')[1],func_phrase.split(',')[2]\r\n return func(parse(term1),parse(term2),parse(term3))\r\n \r\n if iarity >3:\r\n # A list of values \r\n func_phrase = func_phrase[1:-1]\r\n return func([parse(x) for x in func_phrase.split(',')])\r\n elif phrase and phrase[0] == '-' and bracketed(phrase[1:]):\r\n # Translates negative sign (as opposed to operators) into corresponding function \r\n return -parse(phrase[2:-1])\r\n\r\n\r\n elif bracketed(phrase):\r\n # removes top-level bracket\r\n phrase = phrase[1:-1]\r\n return parse(phrase)\r\n elif phrase in self.operations:\r\n return phrase\r\n elif self.current_register.contains(phrase):\r\n # for variables and constants\r\n return self.current_register.get(phrase)\r\n elif contains_comp(phrase) and '(' not in phrase and ')' not in phrase:\r\n return calc.computer.get(phrase)\r\n elif phrase and phrase[0]=='@' and phrase[-1]=='@':\r\n # to retrieve values from the log \r\n index = int(parse(phrase[1:-1]))\r\n if 0<= index <= len(self.lines):\r\n return self.lines[index][0]\r\n else:\r\n \r\n phrase = list(phrase)\r\n #phrase is converted to a list to allowing indexical assignments\r\n operation_sequence = []\r\n level = 0\r\n inquotes = False\r\n quote_form = ''\r\n for counter, x in enumerate(phrase):\r\n\r\n # Search for operators that are not enclosed in parantheses\r\n\r\n if not inquotes and x in ['\"',\"'\"]:\r\n inquotes = True\r\n quote_form = x\r\n elif inquotes and x == quote_form:\r\n if counter < len(phrase)-1:\r\n if phrase[counter+1] in ['+']:\r\n phrase[counter+1] = '#+#'\r\n\r\n if x == '(':\r\n level +=1\r\n\r\n if x == ')':\r\n level -=1\r\n if level == 0:\r\n if counter<len(phrase)-1:\r\n if phrase[counter+1] in self.operations:\r\n # If an operator is found, surround it with pound signs\r\n phrase[counter+1] = '#'+phrase[counter+1]+'#'\r\n if phrase[counter+2] in self.operations:\r\n phrase[counter+2] = '~'\r\n # For a minus sign that is not an operator\r\n\r\n \r\n phrase = ''.join(phrase).replace('~','-').split('#')\r\n # Split the phrase into expressions linked by operators \r\n newphrase = []\r\n for x in phrase:\r\n # a clumsy way to distinction between numerical values, and string operators\r\n try:\r\n newphrase.append(float(x))\r\n except:\r\n newphrase.append(x)\r\n phrase = newphrase\r\n\r\n return parse(phrase)\r\n \r\n \r\n\r\n if isinstance(phrase,list):\r\n # If the phrase has already been parsed into a list \r\n if len(phrase) == 1:\r\n return (parse(phrase[0]))\r\n if all_simple(phrase):\r\n # If every value in the phrase list has been reduced to\r\n # a numerical value or an operator\r\n \r\n\r\n for operation in self.operations:\r\n\r\n #In order to preserve the correct order of operations,\r\n #the operations are analyzed in succession\r\n\r\n while operation in phrase:\r\n\r\n #This repeat as long as the operation is in the phrase,\r\n #since with each pass it only \"reduced\"\r\n #expression/operator/expression triplet\r\n \r\n\r\n newlist = [] # For the result of each pass through the list.\r\n lastvalue = None\r\n counter = 0\r\n stop = False\r\n while counter < len(phrase) and not stop:\r\n \r\n \r\n if counter < len(phrase)-2:\r\n a = phrase[counter]\r\n op = phrase[counter+1]\r\n b = phrase[counter+2]\r\n #take a triplet of values from the list\r\n\r\n if op == operation:\r\n # if an operator is found, reduced the triplet, and\r\n # then add the reduced value, together with the rest\r\n # of the list to the \r\n if operation == '*':\r\n c = a*b\r\n elif operation == '+':\r\n if isinstance(a,str) and isinstance(b,str):\r\n c = a[0:-1]+b[1:]\r\n else:\r\n c = a+b\r\n elif operation == '/':\r\n c = a/b\r\n elif operation == '^':\r\n c = a**b\r\n elif operation == '%':\r\n c = a % b\r\n elif operation == '-':\r\n c = a - b\r\n newlist.append(c)\r\n newlist += phrase[counter+3:] \r\n stop = True\r\n else:\r\n newlist.append(a)\r\n else:\r\n # otherwise, just add the text value to the new list\r\n newlist.append(phrase[counter])\r\n counter +=1 \r\n \r\n \r\n phrase = newlist\r\n\r\n \r\n else:\r\n # if the list is not yet simple, return a new list after parsing each element.\r\n phrase = [parse(x) for x in phrase]\r\n return parse(phrase)\r\n\r\n if isinstance(phrase,(int,float,type(ListType()),bool)):\r\n # if a numerical value, stop the recursion\r\n \r\n return phrase \r\n\r\n return parse(phrase)", "def test_calculate_test(self):\n result = self.calcuate.calcuate('3+3+(4-3)')\n expected_result = \"7\"\n self.assertEqual(expected_result, result)", "def test_calculate_bracket_at_the_beginning_and_multiplication(self):\n result = self.calcuate.calcuate('(2+1)x3')\n expected_result = \"9\"\n self.assertEqual(expected_result, result)", "def test_calculate_multiplication_and_adding(self):\n result = self.calcuate.calcuate('1+2x3')\n expected_result = \"7\"\n self.assertEqual(expected_result, result)", "def test_simple_calculation(self):\n\t\turl = reverse('calculation')\n\t\tdata = {'expression': '2+3*(4+2)'}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data, {'result': 20 })", "def test_calculate_multiplication_and_bracket_at_the_end(self):\n result = self.calcuate.calcuate('2x(1+3)')\n expected_result = \"8\"\n self.assertEqual(expected_result, result)", "def test_calculate_bracket_in_bracket(self):\n result = self.calcuate.calcuate('(2+(1+10)-1)')\n expected_result = \"12\"\n self.assertEqual(expected_result, result)", "def main(expression):\n\n exception = parse_expression(expression)\n return calc(poland_notation(exception))", "def test_calculate_all_operations(self):\n result = self.calcuate.calcuate('11-2+4x3-5')\n expected_result = \"16\"\n self.assertEqual(expected_result, result)", "def evaluate(s:str)->str:\n t = s.split()\n res = ''\n\n # Check valid operator \n if t[1] not in ['+','-']:\n return \"Error: Operator must be '+' or '-'.\"\n\n # check valid number \n try:\n t1 = int(t[0])\n t2 = int(t[2])\n \n except ValueError:\n return \"Error: Numbers must only contain digits.\"\n\n # check if numbers are 4 digits \n if (t1>9999 or t1 < -9999 or t2>9999 or t2<-9999):\n return \"Error: Numbers cannot be more than four digits.\"\n \n # addition \n if t[1] == '+':\n res = t1 + t2\n return str(res)\n \n # subtraction \n elif t[1] == '-':\n res = t1 -t2\n return str(res)", "def test_invalid_calculation(self):\n\t\turl = reverse('calculation')\n\t\tdata = {'expression': '2+3**(4+2)'}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\t\tself.assertEqual(response.data, {'error': 'invalid expression' })", "def math_operation(expression):\n if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():\n # eliminates the error call for float and negative numbers\n if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit() or \\\n not str(expression[2]).replace('.', '1').replace('-', '1').isdigit():\n raise ValueError(f'{expression} - check this fragment, something wrong.')\n if expression[2] == 0 and expression[1] == '/':\n raise ValueError(f'{expression} - division by zero.')\n operator = expression[1]\n if operator == '**':\n return expression[0]**expression[2]\n elif operator == '*':\n return expression[0]*expression[2]\n elif operator == '/':\n return expression[0]/expression[2]\n elif operator == '+':\n return expression[0]+expression[2]\n elif operator == '-':\n return expression[0]-expression[2]", "def evaluate_expression(in_str):\n answer = 0\n # key-value pairs keys are the mathematical expressions and the values are the weights that represents the order of oeprations\n # higher weights represnts the expressions to evaluate first, while keys with value 0 are not used yet, they are modifiable\n expression = {\"+\" : 5, \"-\" : 5,\n \"/\" : 10, \"*\" : 10,\n \"**\" : 15,\n \"%\" : 20, \"//\": 20,\n \"&\" : 0, \"#\" : 0, \"!\" : 0, \"|\" : 0, \":\" : 0, \";\" : 0, \"?\": 0\n }\n\n return answer", "def test_eval(self):\n # expr and expr\n base = abs_path('./specs/')\n ps = Parser(base + 'script3-6.py', base)\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 2)\n\n # expr or expr\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a == if or B == b1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 6)\n\n # expr and (expr or expr)\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a == if and (B == b1 or B == b2)\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing !=\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a != if\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing >=\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a.index >= 1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing index\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b.index == 1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing option with integer type\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing option with float type\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b == 1.5\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing unmade decision\n ps.spec['constraints'] = [{\"block\": \"A\", \"condition\": \"b.index == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 0)\n\n # testing if the decision is made when the block depends on a variable\n # inside the block\n ps.spec['constraints'] = [{\"block\": \"B\", \"condition\": \"b.index == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 0)", "def evaluate(self):\n try:\n test_val = self.expression()\n return test_val != 0\n except ValueError:\n raise ParseError(\"Could not evaluate expression.\")", "def main():\r\n eq = input(\"Input an equation: \")\r\n splitList = (mysplit(eq))\r\n operandsList = []\r\n #This loop takes in the split list and adds to a list without operators\r\n for operand in splitList:\r\n if operand == '+' or operand == '-' or operand == '*' or operand == '/':\r\n continue\r\n operandsList.append(operand)\r\n operatorsList = []\r\n #This loop takes in the split list and adds to a list without digits\r\n for operator in splitList:\r\n if operator.isdigit() is True:\r\n continue\r\n operatorsList.append(operator)\r\n #variable to check if the operator is allowed\r\n operatorChecker = False\r\n for sign in operatorsList:\r\n if sign == '+' or sign == '-' or sign == '/' or sign == '*':\r\n operatorChecker = True\r\n else:\r\n operatorChecker = False\r\n operandsDigits = ''.join(operandsList)\r\n #this checks if the operands are digits\r\n operandsChecker = str.isdigit(operandsDigits)\r\n #check if equation contains division with 0\r\n if '/ 0' in eq:\r\n zeroChecker = False\r\n else:\r\n zeroChecker = True\r\n\r\n #if conditions for the\r\n if operandsChecker is False or operatorChecker is False or zeroChecker is False:\r\n print(\"Invalid Input\")\r\n else:\r\n stack, queue = parseNumbers(eq)\r\n stackAnswer = calculateStack(stack)\r\n queueAnswer = calculateQueue(queue)\r\n print(\"Queue total:\", queueAnswer)\r\n print(\"Stack total:\", stackAnswer)\r\n if queueAnswer == stackAnswer:\r\n print(\"They do match!\")\r\n else:\r\n print(\"They do not match!\")", "def validate(string):\n \n tokens = string.split()\n \n # Remembers if the previous token was an operator\n opflag = True\n \n ## Highly inefficient validity checking begins here ##\n \n # List of operators as they would appear in the infix expression\n operators = ['+', '-', '*', '/', '^', 'sqrt']\n \n # First and foremost, detect all unary minus signs and mark them as such\n for i in xrange(len(tokens)):\n # A unary minus is a minus operator which occurs after another operator\n # or after an open parenthesis.\n if tokens[i] in operators or tokens[i] == '(':\n if opflag:\n if tokens[i] == '-':\n tokens[i] = 'u-'\n # Leave opflag true to allow cascading of unary minuses\n elif tokens[i] in ['sqrt', '(']:\n # These operators can be cascaded, so leave them alone\n # Also, leave opflag true to handle a subsequent u-\n pass\n else:\n # Any other operator must be caught\n raise ExpressionError('Operators cannot be cascaded!')\n # We found an operator, but opflag isn't true. Set it.\n else:\n opflag = True\n else:\n # We found something other than an operator, or a ')'. If opflag is\n # false, and the token is not ')', then we have two adjacent\n # variables/numbers. This is also an invalid combination\n if not opflag and tokens[i] != ')':\n raise ExpressionError('Adjacent operands with no operator!')\n # Otherwise, unset opflag\n else:\n opflag = False\n \n # Check whether parentheses match\n s = Stack()\n for token in tokens:\n if token == '(':\n s.push(token)\n elif token == ')':\n if s.pop() != '(':\n raise ExpressionError('Parentheses do not match')\n if not s.is_empty():\n raise ExpressionError('Parentheses do not match')\n \n return tokens", "def test_number_input(self):\r\n easy_eval = lambda x: calc.evaluator({}, {}, x)\r\n\r\n self.assertEqual(easy_eval(\"13\"), 13)\r\n self.assertEqual(easy_eval(\"3.14\"), 3.14)\r\n self.assertEqual(easy_eval(\".618033989\"), 0.618033989)\r\n\r\n self.assertEqual(easy_eval(\"-13\"), -13)\r\n self.assertEqual(easy_eval(\"-3.14\"), -3.14)\r\n self.assertEqual(easy_eval(\"-.618033989\"), -0.618033989)", "def test_calculate_order_multiplication_subtraction_adding(self):\n result = self.calcuate.calcuate('11-2+4x3')\n expected_result = \"21\"\n self.assertEqual(expected_result, result)", "def reducedFormTwo(self, equation):\n find = re.findall('(.)?(\\d+\\.\\d+|\\d+)(\\+|\\-)(\\d+\\.\\d+|\\d+)(.)?' , equation)\n for token in find:\n tmp = ''.join(map(str,token))\n if tmp[-1] == '*' or tmp[-1] == '^' or tmp[-1] == '/':\n continue\n if tmp[0] == '*' or tmp[0] == '^' or tmp[0] == '/':\n continue\n else:\n try:\n if tmp[0] == '-':\n pass\n if not tmp[-1].isnumeric():\n tmp = tmp[:-1]\n res = eval(tmp)\n if res > 0:\n res = '+' + str(res)\n equation = equation.replace(tmp, res)\n except:\n continue\n return equation", "def test_all(self):\n\n tokens = list(Lexer(\"12 + 2^(8/4) - 5 * (7%4)\").generate_tokens())\n answer = [Token(TokenType.NUMBER, 12),\n Token(TokenType.PLUS),\n Token(TokenType.NUMBER, 2),\n Token(TokenType.EXPONENT),\n Token(TokenType.LPAREN),\n Token(TokenType.NUMBER, 8),\n Token(TokenType.DIVIDE),\n Token(TokenType.NUMBER, 4),\n Token(TokenType.RPAREN),\n Token(TokenType.MINUS),\n Token(TokenType.NUMBER, 5),\n Token(TokenType.MULTIPLY),\n Token(TokenType.LPAREN),\n Token(TokenType.NUMBER, 7),\n Token(TokenType.MODULO),\n Token(TokenType.NUMBER, 4),\n Token(TokenType.RPAREN)]\n #Token(TokenType.NUMBER, 3)]\n self.assertEqual(tokens, answer)", "def equation(operation, firstnum, secondnum):\n if operation == 'plus':\n return firstnum + secondnum\n elif operation == 'minus':\n return firstnum - secondnum\n elif operation == 'multiply':\n return firstnum * secondnum\n elif operation == 'divide':\n if not secondnum == 0:\n return firstnum / secondnum\n raise ZeroDivisionError(\"Unable to divide by 0.\")\n raise ValueError('Invalid operation provided.')", "def solve_equation_addition_precendence(eq, verbose=False):\n tokens = tokenize(eq)\n if verbose:\n print(f\"eq: {tokens}\")\n\n stack = []\n ops = {\n None: do_push,\n \"(\": do_push,\n \")\": do_parenthesis,\n \"+\": do_addition,\n \"*\": do_push,\n }\n\n for t in tokens:\n if isinstance(t, int):\n op = stack[-1] if len(stack) else None\n ops[op](stack, t)\n elif t == \"+\" or t == \"*\" or t == \"(\":\n stack.append(t)\n elif t == \")\":\n ops[\")\"](stack, t)\n # solve preparenthesis addition\n if len(stack) > 2:\n v = stack.pop()\n assert isinstance(v, int)\n ops[stack[-1]](stack, v)\n else:\n assert False, f\"fail token: {t}\"\n\n if verbose:\n print(f\"stack: {stack}\")\n\n # solve multiplications\n while len(stack) > 1:\n rhs = stack.pop()\n assert isinstance(rhs, int)\n op = stack.pop()\n if op == \"*\":\n lhs = stack.pop()\n assert isinstance(lhs, int)\n stack.append(lhs * rhs)\n else:\n assert False, f\"invalid operator (not *): {op}\"\n\n assert len(stack) == 1\n return stack[0]", "def test_staff_inputs_expressions_legacy(self):\r\n problem = self.build_problem(answer=\"1+1j\", tolerance=1e-3)\r\n self.assert_grade(problem, '1+j', 'correct')", "def parse_simple_eqn(equation=\"\"):\n # Define replacement rules.\n simple_replacements = [[' ', ''],\n ['**', '^'], ['*', ' \\\\cdot '],\n ['math.', ''], ['np.', ''],\n ['pi', '\\\\pi'] , ['tan', '\\\\tan'],\n ['cos', '\\\\cos'], ['sin', '\\\\sin'],\n ['sec', '\\\\sec'], ['csc', '\\\\csc']]\n complex_replacements = [['^', '{{{i1}}}^{{{i2}}}'],\n ['_', '{{{i1}}}_{{{i2}}}'],\n ['/', '\\\\frac{{{i1}}}{{{i2}}}'],\n ['sqrt','\\\\sqrt{{{i2}}}']]\n # Carry out simple replacements\n for pair in simple_replacements:\n equation = equation.replace(pair[0], pair[1])\n # Now complex replacements\n for item in ['*', '/', '+', '-', '^', '_', ',', 'sqrt']:\n equation = equation.replace(item, ' ' + item + ' ')\n q_split = equation.split()\n for index, item in enumerate(q_split):\n for pair in complex_replacements:\n if item == pair[0]:\n if item == 'sqrt':\n match_str = \" \".join(q_split[index:index+2])\n else:\n match_str = \" \".join(q_split[index-1:index+2])\n equation = equation.replace(match_str, pair[1].format(\n i1=q_split[index-1], i2=q_split[index+1]))\n return equation", "def test_staff_inputs_expressions(self):\r\n problem = self.build_problem(answer=\"1/3\", tolerance=1e-3)\r\n correct_responses = [\"1/3\", \"0.333333\"]\r\n incorrect_responses = []\r\n self.assert_multiple_grade(problem, correct_responses, incorrect_responses)", "def test_parse():\n first = parse_formula(\"PO4H2(CH2)12CH3\")\n assert first == {\"P\":1, \"O\":4, \"H\":29, \"C\":13}\n\n second = parse_formula(\"H2O\")\n assert second == {\"H\":2, \"O\":1}", "def test01_math_operators(self):\n\n import _cppyy\n number = _cppyy.gbl.number\n\n assert (number(20) + number(10)) == number(30)\n assert (number(20) + 10 ) == number(30)\n assert (number(20) - number(10)) == number(10)\n assert (number(20) - 10 ) == number(10)\n assert (number(20) / number(10)) == number(2)\n assert (number(20) / 10 ) == number(2)\n assert (number(20) * number(10)) == number(200)\n assert (number(20) * 10 ) == number(200)\n assert (number(20) % 10 ) == number(0)\n assert (number(20) % number(10)) == number(0)\n assert (number(5) & number(14)) == number(4)\n assert (number(5) | number(14)) == number(15)\n assert (number(5) ^ number(14)) == number(11)\n assert (number(5) << 2) == number(20)\n assert (number(20) >> 2) == number(5)", "def eval(self, string):\n tokens = string.split()\n op1 = int(tokens.pop(0))\n operator = tokens.pop(0)\n op2 = int(tokens.pop(0))\n if operator == '+':\n return op1 + op2\n elif operator == '-':\n return op1 - op2\n elif operator == '*':\n return op1 * op2\n elif operator == '/':\n return op1 * op2\n else:\n raise CalculatorException(\"Unknown operator %s\" % operator)", "def autohard(equation):\n\n try:\n # Try to set a variable to an integer\n num1 = int(equation.split(\" \")[1])\n\n except ValueError:\n # Try to set a variable to a decimal\n num1 = float(equation.split(\" \")[1])\n\n # If the lowercase version of the operation equals 'log'\n if equation.split(\" \")[0].lower() == \"log\":\n # Return the answer\n return math.log(num1)\n\n # If the lowercase version of the operation equals 'acos'\n elif equation.split(\" \")[0].lower() == \"acos\":\n # Return the answer\n return math.acos(num1)\n\n # If the lowercase version of the operation equals 'asin'\n elif equation.split(\" \")[0].lower() == \"asin\":\n # Return the answer\n return math.asin(num1)\n\n # If the lowercase version of the operation equals 'atan'\n elif equation.split(\" \")[0].lower() == \"atan\":\n # Return the answer\n return math.atan(num1)\n\n # If the lowercase version of the operation equals 'cos'\n elif equation.split(\" \")[0].lower() == \"cos\":\n # Return the answer\n return math.cos(num1)\n\n # If the lowercase version of the operation equals 'hypot'\n elif equation.split(\" \")[0].lower() == \"hypot\":\n try:\n # Try to set a variable to an integer\n num2 = int(equation.split(\" \")[2])\n\n except ValueError:\n # Try to set a variable to an decimal\n num2 = float(equation.split(\" \")[2])\n\n # Return the answer\n return math.hypot(num1, num2)\n\n # If the lowercase version of the operation equals 'sin'\n elif equation.split(\" \")[0].lower() == \"sin\":\n # Return the answer\n return math.sin(num1)\n\n # If the lowercase version of the operation equals 'tan'\n elif equation.split(\" \")[0].lower() == \"tan\":\n # Return the answer\n return math.tan(num1)\n\n # Raise a warning\n raise ValueError(\"Invalid operation entered.\")", "def evaluator_side_effect(_, __, math_string):\r\n if math_string != '4':\r\n raise err", "def simple_calculator(calculation):\n\n\n operations = {'+': lambda x,y: x + y,'-': lambda x,y: x-y,'*': lambda x,y: x * y,'/': lambda x,y: x/y}\n \n def is_numeric(x):\n\n try:\n float(x)\n int(x)\n except:\n return False\n else:\n return True\n \n\n values = calculation.split()\n print(values)\n if is_numeric(values[0]) and is_numeric(values[2]) and values[1] in operations:\n operation = operations[values[1]]\n try:\n return operation(float(values[0]),float(values[2]))\n except ZeroDivisionError:\n raise ValueError(\"Division by zero\")\n\n\n raise ValueError(\"Invalid Operation\")", "def test_expr(self):\n self.common_test_expr(True)", "def test_calculate_addition(self):\n result = self.calcuate.calcuate('1+4')\n expected_result = \"5\"\n self.assertEqual(expected_result, result)", "def test_calculate_adding_and_subtraction(self):\n result = self.calcuate.calcuate('8+20-5')\n expected_result = \"23\"\n self.assertEqual(expected_result, result)", "def valid(formula):\r\n\r\n try:\r\n return not re.search(r'\\b0[0-9]', formula) and eval((formula) is True\r\n #except ArithmeticError:\r\n #return False\r\n except:\r\n return False", "def secondEvaluate(new_tokens):\n answer = 0\n index = 1\n while index < len(new_tokens):\n if new_tokens[index]['type'] == 'PLUS' or new_tokens[index]['type'] == 'MINUS':\n if new_tokens[index+1]['type'] != 'NUMBER':\n print(\"Invalid syntax\")\n exit(1)\n if new_tokens[index]['type'] == 'NUMBER':\n if new_tokens[index - 1]['type'] == 'PLUS':\n answer += new_tokens[index]['number']\n elif new_tokens[index - 1]['type'] == 'MINUS':\n answer -= new_tokens[index]['number']\n else:\n print('Invalid syntax')\n exit(1)\n index += 1\n return answer", "def evaluate(expr):\n def isdigit(ch):\n try:\n int(ch)\n return True\n except ValueError:\n return False\n\n def evaluate_helper(expr, index):\n ch = expr[index]\n if ch == '(':\n # complex\n index += 1 # move past (\n\n # get the left operand\n left, index = evaluate_helper(expr, index)\n opr = expr[index]\n index += 1 # move past the operator\n\n # get the right operand\n right, index = evaluate_helper(expr, index)\n index += 1 # to move past closing paranthesis\n if opr == '+':\n return left + right, index\n elif opr == '*':\n return left * right, index\n\n \n else:\n if isdigit(ch):\n value = 0\n while isdigit(ch):\n value = value * 10 + int(ch)\n index += 1\n if index < len(expr):\n ch = expr[index]\n else:\n break\n return value, index\n\n \n\n return evaluate_helper(expr, 0)[0]", "def evaluate(expr: str) -> float:\n\n expr = ''.join(filter(lambda ch: ch in valid_characters, expr))\n if not expr:\n return float('NaN') # raise error instead?\n\n # 'Stacks'\n operators = []\n operands = []\n\n try:\n for t in tokenizer(expr):\n\n if isinstance(t, float):\n operands.append(t)\n elif t in openers:\n operators.append(t)\n\n elif t in binary_operators:\n while operators and precedence[operators[-1]] >= precedence[t]:\n operands.append(binary_operators[operators.pop()](operands.pop(), operands.pop()))\n operators.append(t)\n else:\n corresponding_opener = openers[closers.index(t)]\n while (op := operators.pop()) != corresponding_opener:\n operands.append(binary_operators[op](operands.pop(), operands.pop()))\n\n while operators:\n operands.append(binary_operators[operators.pop()](operands.pop(), operands.pop()))\n\n except ArithmeticError as e:\n raise e\n except (ValueError, IndexError): # One of the stacks runs out, i.e. invalid expression structure.\n raise InvalidExpressionError()\n\n # assert (len(operands) == 1)\n return operands.pop()", "def check_math_line(self, line):\n self.E_str = \"check_math_line\"\n err_msg = \"The syntax for a math command is: math <var> = <arthimetic operation>.\"\n err_msg += \"\\nFor example: 'x = x / (1 - x)'\\n\\n\"\n\n # Check we don't too have many equals signs\n if line.count('=') > 1:\n self.print_error(\"Too many '=' found!\\n\\n\" + err_msg)\n elif line.count('=') == 0:\n self.print_error(f\"I can't find a '=' on the math line!{err_msg}\")\n\n # Set the variable for error checking later\n new_var_name, metadata_name = self.get_variable_name(line)\n\n # Split the line by =\n words = line.split('=')\n _, maths = words\n md_var_names, md_names, new_line = self.parse_metadata_line(maths,\n \"get\")\n # Check metadata and their variables have been declared\n metadata = {}\n self.E_str = \"check_math_line\"\n for v_name, m_name in zip(md_var_names, md_names):\n if v_name not in self.variables:\n self.print_error(f\"Undeclared variable '{var}'\")\n Var = getattr(self, v_name)\n if m_name not in Var.metadata:\n e_msg = f\"Undeclared metadata '{m_name}' in variable '{v_name}''\"\n self.print_error(e_msg)\n metadata[m_name] = \"\"\n\n # Check if there are any undeclared variables\n line, any_vars = self.find_vars_in_str(new_line)\n self.E_str = \"check_math_line\"\n\n # Check if there are any unwanted characters\n bad_chars = \"%£\\\"!&}{[]}:;@'^~#<,>?¬`|\"\n for j in bad_chars:\n if j in new_line:\n self.print_error(f\"Illegal character '{j}' in math\")\n\n # Check all brackets are closed\n if new_line.count(\"(\") != line.count(\")\"):\n err_msg = \"You've not closed one of the brackets you opened.\\n\"\n err_msg += \"Num of '(' = %i\\n\" % line.count(\"(\")\n err_msg += \"Num of ')' = %i\\n\" % line.count(\")\")\n self.print_error(err_msg)\n\n self.set_var(new_var_name, \"^EMPTY^\", {metadata_name: \"\"})\n return new_var_name", "def evaluate(formula, model):\r\n # Task 2.1\r\n if is_unary(formula.root):\r\n return not evaluate(formula.first, model)\r\n elif is_ternary(formula.root):\r\n if evaluate(formula.first,model):\r\n return evaluate(formula.second,model)\r\n else:\r\n return evaluate(formula.third,model)\r\n elif is_binary(formula.root):\r\n if formula.root == '&':\r\n return evaluate(formula.first, model) and evaluate(formula.second, model)\r\n elif formula.root == '|':\r\n return evaluate(formula.first, model) or evaluate(formula.second, model)\r\n elif formula.root == '<->':\r\n return not (evaluate(formula.first, model) ^ evaluate(formula.second, model))\r\n elif formula.root == '-&':\r\n return not (evaluate(formula.first, model) and evaluate(formula.second, model))\r\n elif formula.root == '-|':\r\n return not (evaluate(formula.first, model) or evaluate(formula.second, model))\r\n else:\r\n return (not evaluate(formula.first, model)) or evaluate(formula.second, model)\r\n elif is_constant(formula.root):\r\n if formula.root == 'T':\r\n return True\r\n else:\r\n return False\r\n else:\r\n return model[formula.root]\r\n #\r\n # if is_constant(formula.root):\r\n # if formula.root == 'T':\r\n # return True\r\n # elif formula.root == 'F':\r\n # return False\r\n # elif is_variable(formula.root):\r\n # return model[formula.root]\r\n #\r\n # elif is_unary(formula.root):\r\n # return not evaluate(formula.first, model)\r\n # assert (type(formula.first) is Formula) and (type(formula.second) is Formula)\r\n # if is_binary(formula.root):\r\n # if formula.root == '&':\r\n # return evaluate(formula.first, model) and evaluate(formula.second, model)\r\n # elif formula.root == '|':\r\n # return evaluate(formula.first, model) or evaluate(formula.second, model)\r\n # elif formula.root == '->':\r\n # if evaluate(formula.first, model) and not evaluate(formula.second, model):\r\n # return False\r\n # else:\r\n # return True\r\n # elif formula.root == '<->':\r\n # if (not evaluate(formula.first, model) and not evaluate(formula.second, model)) or (\r\n # evaluate(formula.first, model) and evaluate(formula.second, model)):\r\n # return True\r\n # else:\r\n # return False\r\n # elif formula.root == '-&':\r\n # if evaluate(formula.first, model) and evaluate(formula.second, model):\r\n # return False\r\n # else:\r\n # return True\r\n # elif formula.root == '-|':\r\n # if not evaluate(formula.first, model) and not evaluate(formula.second, model):\r\n # return True\r\n # else:\r\n # return False\r\n # else:\r\n # if evaluate(formula.first, model):\r\n # return evaluate(formula.second, model)\r\n # else:\r\n # return evaluate(formula.third, model)\r", "def testadd_X_Y ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tadd_fracX_fracY = fracX + fracY\r\n\t\t\tself.assertEqual ( add_fracX_fracY.toString ().split ()[0], dictAdd ['X+Y'] )", "def calculate_expression(self, txt):\n self.shunting_yard(self.text_parser(txt))\n return self.RPN()", "def operand_present(input_str): # HELPER\n try:\n float(input_str)\n return True\n except ValueError:\n return False", "def calculate(first, second, operator):\n result = \"\"\n if operator == \"+\":\n result = int(first) + int(second)\n elif operator == \"-\":\n result = int(first) - int(second)\n elif operator == \"/\":\n result = int(first) / int(second)\n elif operator == \"*\":\n result = int(first) * int(second)\n else:\n print \"Did not recognize: \" + operator\n\n return result", "def valid_expression(expression):\n OPERATORS= '+*/-'\n if no_operators(expression) != True:\n return no_operators(expression)\n if no_paranthesis(expression) != True:\n return no_paranthesis(expression)\n if no_numbers(expression) != True:\n return no_numbers(expression)\n if invalid_characters(expression) != True:\n return invalid_characters(expression)\n if match_paranthesis(expression) == False:\n raise NotValidExpression('Not a valid expression, brackets mismatched.')\n number_operators = 0\n number_paranthesis = 0\n for i in expression:\n if i in OPERATORS:\n number_operators += 1\n elif i == '(' or i == ')':\n number_paranthesis +=1\n expression1 = expression[1:(len(expression) - 1)] # checks if the expression without the first and last character is valid\n if match_paranthesis(expression1) == False and ('(' in expression1 or ')' in expression1):\n raise NotValidExpression('Not a valid expression, brackets mismatched.') # if it is not, raises an appropiate error\n for i in range(0, len(expression) - 1):\n #Checks if an operator is missing,if there exists a number followed by ( or if there is a )before the number\n if expression[i] not in OPERATORS and expression[i] not in '()':\n if expression[i + 1] == '(':\n raise NotValidExpression('Not a valid expression, operator missing.')\n elif expression[i] in OPERATORS and expression[i + 1] in OPERATORS + ')' :\n raise NotValidExpression('Not a valid expression, wrong placement of operators')\n #Checks if an operator is placed wrongly , before ) or next to another operator\n if expression[i+1] not in OPERATORS and expression[i + 1] not in '()':\n if expression[i] == ')':\n raise NotValidExpression('Not a valid expression, operator missing.')\n elif expression[i+1] in OPERATORS and expression[i] in OPERATORS + '(':\n raise NotValidExpression('Not a valid expression, wrong placement of operators')\n if 2*number_operators != number_paranthesis: # an expression is valid only if the number of paranthesis is equal to the double of the number of operators\n raise NotValidExpression('Not a valid expression, wrong number of operands.')\n return True", "def main():\n\texpression = input(\"Enter expression \")\n\tans = calculate(expression)\n\n\tprint(ans)", "def test_exponential_answer(self):\r\n answer = 50\r\n correct_responses = [\r\n \"50\", \"50.0\", \"5e1\", \"5e+1\",\r\n \"50e0\", \"50.0e0\", \"500e-1\"\r\n ]\r\n incorrect_responses = [\"\", \"3.9\", \"4.1\", \"0\", \"5.01e1\"]\r\n\r\n for input_str in correct_responses:\r\n result = calc.evaluator({}, {}, input_str)\r\n fail_msg = \"Expected '{0}' to equal {1}\".format(\r\n input_str, answer\r\n )\r\n self.assertEqual(answer, result, msg=fail_msg)\r\n\r\n for input_str in incorrect_responses:\r\n result = calc.evaluator({}, {}, input_str)\r\n fail_msg = \"Expected '{0}' to not equal {1}\".format(\r\n input_str, answer\r\n )\r\n self.assertNotEqual(answer, result, msg=fail_msg)", "def testCalculate(self):\r\n for i in range(len(self.__testExpressions)):\r\n self.__Calculator.setExpression(self.__testExpressions[i])\r\n self.__Calculator.calculateResult()\r\n self.assertEqual(self.__Calculator.getResult(), self.__testResult[i])", "def is_math_line(line):\n if '=' in line:\n # Check it isn't some other command\n for cmd in CMD_LIST:\n if re.findall(f\"^{cmd} \", line):\n return False\n\n str_txt, non_str = gen_parse.get_str_between_delims(line, '\"')\n if any(j in non_str for j in '<>-+/*^'):\n return True\n return False", "def test_sum(self):\r\n # Use 'x' as the first term (instead of, say, '1'), so it can't be\r\n # interpreted as a negative number.\r\n self.assertEquals(\r\n preview.latex_preview('-x+2-3+4', variables=['x']),\r\n '-x+2-3+4'\r\n )", "def test_expression_sanitizer(self):\n\n self.assertFalse(_is_math_expr_safe('INSERT INTO students VALUES (?,?)'))\n self.assertFalse(_is_math_expr_safe('import math'))\n self.assertFalse(_is_math_expr_safe('complex'))\n self.assertFalse(_is_math_expr_safe('__import__(\"os\").system(\"clear\")'))\n self.assertFalse(_is_math_expr_safe('eval(\"()._\" + \"_class_\" + \"_._\" +'\n ' \"_bases_\" + \"_[0]\")'))\n self.assertFalse(_is_math_expr_safe('2***2'))\n self.assertFalse(_is_math_expr_safe('avdfd*3'))\n self.assertFalse(_is_math_expr_safe('Cos(1+2)'))\n self.assertFalse(_is_math_expr_safe('hello'))\n self.assertFalse(_is_math_expr_safe('hello_world'))\n self.assertFalse(_is_math_expr_safe('1_2'))\n self.assertFalse(_is_math_expr_safe('2+-2'))\n self.assertFalse(_is_math_expr_safe('print(1.0)'))\n self.assertFalse(_is_math_expr_safe('1.1.1.1'))\n self.assertFalse(_is_math_expr_safe('abc.1'))\n\n self.assertTrue(_is_math_expr_safe('1+1*2*3.2+8*cos(1)**2'))\n self.assertTrue(_is_math_expr_safe('pi*2'))\n self.assertTrue(_is_math_expr_safe('-P1*cos(P2)'))\n self.assertTrue(_is_math_expr_safe('-P1*P2*P3'))\n self.assertTrue(_is_math_expr_safe('-P1'))\n self.assertTrue(_is_math_expr_safe('-1.*P1'))\n self.assertTrue(_is_math_expr_safe('-1.*P1*P2'))\n self.assertTrue(_is_math_expr_safe('-(P1)'))", "def test_calculate_addition_of_four_elements(self):\n result = self.calcuate.calcuate('15+4+10+3')\n expected_result = \"32\"\n self.assertEqual(expected_result, result)", "def testadd_Y_X ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tadd_fracY_fracX = fracY + fracX\r\n\t\t\tself.assertEqual ( add_fracY_fracX.toString ().split ()[0], dictAdd ['Y+X'] )", "def _is_arithmetic(self, words):\n if words[0] in ['add', 'sub', 'neg', 'eq', 'gt', 'lt', 'and', 'or', 'not']:\n if len(words) != 1:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_ARITHMETIC command.\".format(self._file_line))\n return True\n else:\n return False", "def test_01():\n text = \"a = 2 + 3 * (4 + 5)\"\n\n c = _ast.parse(text)\n print(_ast.dump(c))", "def calculate(self, expr):\n try:\n body = ast.parse(expr, mode='eval').body\n return self._calculate(body)\n except (TypeError, SyntaxError, ZeroDivisionError):\n raise StringArithmeticError(\"Can't calculate %s\" % expr, expr)", "def get_equation_from_user(self):\n while True:\n try:\n # Get the string from the user\n string_equation = input(\"Please enter an valid equation\")\n\n # If empty get new string\n while not string_equation:\n string_equation = input(\n \"Please enter an valid not empty equation\")\n break\n except Exception as e:\n print(\"something went wrong please try again \" + str(e))\n\n return string_equation", "def test_sqpp_plus_expr1_minus_paren_expr2(self):\n self.assertEqual(self.parser.parse_query(\"+ expr1 - (expr2)\"),\n ['+', 'expr1', '-', 'expr2'])", "def execute_operation(self, frac1, operator, frac2):\n try:\n ops = {\"+\": (lambda x, y: Frac(x) + Frac(y)), \"-\": (lambda x, y: Frac(x) - Frac(y)), \"*\":\n (lambda x, y: Frac(x) * Frac(y)), \"/\": (lambda x, y: Frac(x) / Frac(y))}\n if operator in ops:\n return str(ops[operator](frac1, frac2))\n else:\n print(self.operator_not_Valid)\n return False\n except Exception as e:\n print(self.operator_not_Valid, e)\n return False", "def test_calculate_subtraction_adding_subtraction(self):\n result = self.calcuate.calcuate('20-5+8-3')\n expected_result = \"20\"\n self.assertEqual(expected_result, result)", "def evaluate(expr: str) -> int:\n output = []\n operators = []\n for token in expr.replace(\" \", \"\"):\n if token.isdigit(): # we assume that there isn't any number > 9 in expr\n output.append(int(token))\n elif token == \"(\":\n operators.append(\"(\")\n elif token in [\")\", \"+\", \"*\"]:\n while operators and operators[-1] != \"(\":\n op = operators.pop()\n if op == \"+\":\n output.append(output.pop() + output.pop())\n elif op == \"*\":\n output.append(output.pop() * output.pop())\n if token != \")\":\n operators.append(token)\n elif operators:\n operators.pop()\n\n while operators:\n op = operators.pop()\n if op == \"+\":\n output.append(output.pop() + output.pop())\n elif op == \"*\":\n output.append(output.pop() * output.pop())\n\n return output[0]", "def test_operator(self):\n\n tokens = list(Lexer(\"+-*/^%\").generate_tokens())\n answer = [Token(TokenType.PLUS),\n Token(TokenType.MINUS),\n Token(TokenType.MULTIPLY),\n Token(TokenType.DIVIDE),\n Token(TokenType.EXPONENT),\n Token(TokenType.MODULO)]\n self.assertEqual(tokens, answer)", "def parse_equations(eqs, ops):\n eeqs = []\n prop_list = ['unit of', 'commutative', 'associative', 'distributes over', 'inverse of', \n 'annihilates', 'idempotent', 'absorbs', 'absorptive', 'involutive']\n props = []\n for eq in eqs:\n if not any_in(prop_list, eq):\n eeqs.append(Eq.parse_eq(eq, ops))\n else:\n if 'unit of' in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*unit of\\s+'(\\w+)'$\", eq)\n unit, side, op = m.groups()\n props.append(Unit(unit, op, side))\n elif \"annihilates\" in eq: \n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*annihilates\\s+'(\\w+)'$\", eq)\n unit, side, op = m.groups()\n props.append(Annih(unit, op, side))\n elif \"distributes over\" in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*distributes over\\s+'(\\w+)'$\", eq)\n op1, side, op2 = m.groups()\n props.append(Dist(op1, op2, side))\n elif \"absorbs\" in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*absorbs\\s+'(\\w+)'$\", eq)\n op1, side, op2 = m.groups()\n props.append(Absorb(op1, op2, side))\n elif \"inverse of\" in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*inverse of\\s+'(\\w+)'\\s+with\\s+'(\\w+)'$\", eq)\n uop, side, op, unit = m.groups()\n props.append(Inverse(uop, op, unit, side))\n elif \"absorptive\" in eq:\n m = re.search(\"^'(\\w+)'\\s+and\\s+'(\\w+)'\\s+absorptive$\", eq)\n op1, op2 = m.groups()\n props.append(Absorb(op1, op2, None))\n props.append(Absorb(op2, op1, None))\n else:\n m = re.search(\"^'(\\w+)'\\s+(.*)$\", eq)\n op = m.group(1)\n kws = splitstrip(m.group(2), \",\")\n if 'associative' in kws:\n props.append(Assoc(op))\n if 'commutative' in kws:\n props.append(Comm(op))\n if 'idempotent' in kws:\n props.append(Idemp(op))\n if 'involutive' in kws:\n props.append(Invol(op))\n\n return eeqs, props", "def solve(self, question):\n # check if the question matches binary operation\n match = self.BINARY_OP_REGEX.match(question)\n if match:\n # read LHS operand\n op1 = self.get_number(match.group(1))\n if op1 is None:\n return\n # read operator\n operator = match.group(2)\n # read RHS operand\n op2 = self.get_number(match.group(3))\n if op2 is None:\n return\n # calculate the operation\n self.handle_binary_operator(op1, operator, op2)\n return\n # check match of unary operation\n match = self.UNARY_OP_REGEX.match(question)\n if match:\n # read operator\n operator = match.group(1).upper()\n # read operand\n op = self.get_number(match.group(2))\n if op is None:\n return\n # calculate the operation\n self.handle_unary_operator(operator, op)\n return\n # no match found\n print(\"Invalid question!\")", "def input_equation(self, eq: str) -> None:\n if self.xmin >= self.xmax:\n raise Exception('Minimum > Maximum')\n\n increment = (self.xmax - self.xmin) / self.precision\n self.dependant = []\n\n x = self.xmin\n while x <= self.xmax:\n try:\n y = eval(eq)\n except ZeroDivisionError:\n print(f'Division by zero, x = {x}')\n x += increment\n except SyntaxError:\n print(f'Invalid equation: {eq}')\n x += increment\n except ValueError:\n print(f'Math domain error, {eq}: x = {x}')\n x += increment\n except TypeError:\n print('Can\\'t convert complex to float')\n x += increment\n else:\n self.dependant.append((x, y))\n x += increment\n self.equation = eq", "def evaluate(compiled_expression):", "def evaluateExpression(expr):\n\toperators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,\n\t\t\t\t ast.Div: op.truediv, ast.USub: op.neg, ast.Pow: myPow}\n\tnode = ast.parse(expr.strip(), mode='eval')\n\treturn evaluate(node.body,operators)", "def evaluate_math(query):\n # Final result\n evaluated_query = []\n\n math_expr = re.compile(r'(\\d+([.]\\d+)*|[+\\-/*])+\\d([.]\\d+)*$')\n\n for q in query:\n if math_expr.match(q):\n evaluated_query += [str(eval(q))]\n else:\n evaluated_query += [q]\n\n return evaluated_query", "def test_addition():\n assert calculator.add(7, 3) == 10\n assert calculator.add(7.0, 3.0) == 10.0\n assert calculator.add(7, -3) == 4\n assert calculator.add(7.0, -3.0) == 4.0", "def test_simple_vars(self):\r\n variables = {'x': 9.72, 'y': 7.91, 'loooooong': 6.4}\r\n\r\n # Should not change value of constant\r\n # even with different numbers of variables...\r\n self.assertEqual(calc.evaluator({'x': 9.72}, {}, '13'), 13)\r\n self.assertEqual(calc.evaluator({'x': 9.72, 'y': 7.91}, {}, '13'), 13)\r\n self.assertEqual(calc.evaluator(variables, {}, '13'), 13)\r\n\r\n # Easy evaluation\r\n self.assertEqual(calc.evaluator(variables, {}, 'x'), 9.72)\r\n self.assertEqual(calc.evaluator(variables, {}, 'y'), 7.91)\r\n self.assertEqual(calc.evaluator(variables, {}, 'loooooong'), 6.4)\r\n\r\n # Test a simple equation\r\n self.assertAlmostEqual(\r\n calc.evaluator(variables, {}, '3*x-y'),\r\n 21.25, delta=0.01 # = 3 * 9.72 - 7.91\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator(variables, {}, 'x*y'),\r\n 76.89, delta=0.01\r\n )\r\n\r\n self.assertEqual(calc.evaluator({'x': 9.72, 'y': 7.91}, {}, \"13\"), 13)\r\n self.assertEqual(calc.evaluator(variables, {}, \"13\"), 13)\r\n self.assertEqual(\r\n calc.evaluator(\r\n {'a': 2.2997471478310274, 'k': 9, 'm': 8, 'x': 0.6600949841121},\r\n {}, \"5\"\r\n ),\r\n 5\r\n )", "def eval_expr(expr):\n match expr:\n case BinaryOp('+', left, right):\n return eval_expr(left) + eval_expr(right)\n case BinaryOp('-', left, right):\n return eval_expr(left) - eval_expr(right)\n case BinaryOp('*', left, right):\n return eval_expr(left) * eval_expr(right)\n case BinaryOp('/', left, right):\n return eval_expr(left) / eval_expr(right)\n case UnaryOp('+', arg):\n return eval_expr(arg)\n case UnaryOp('-', arg):\n return -eval_expr(arg)\n case VarExpr(name):\n raise ValueError(f\"Unknown value of: {name}\")\n case float() | int():\n return expr\n case _:\n raise ValueError(f\"Invalid expression value: {repr(expr)}\")", "def test_evaluate_add_expression(self):\n value = self.evaluate_common(\"2M add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Decimal, \"Expected Decimal\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n value = self.evaluate_common(\"2D add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2F add 2D\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2 add 2L\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int64, \"Expected Int64\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n try:\n value = self.evaluate_common(\"2 add '2'\")\n self.fail(\"String promotion to int\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"2 add null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int32, \"Expected Int32\")\n self.assertTrue(value.value is None, \"Expected None\")", "def Calculator(formula):\r\n ans = 0\r\n #read input into a list\r\n tokens = formula.split()\r\n print(tokens)\r\n\r\n #execute multiplication and division first\r\n oopList = oopFirstPass(tokens)\r\n\r\n print(oopList)\r\n \r\n #addition and subtraction second\r\n if (oopList is not None):\r\n tokens2 = oopList.split()\r\n print(tokens2)\r\n ans = oopSecondPass(tokens2)\r\n\r\n return ans", "def solve_part1(input, verbose=False):\n equations = parse(input)\n\n result = []\n for eq in equations:\n result.append(solve_equation_same_precedence(eq, verbose))\n\n if verbose:\n print(f\"results: {result}\")\n\n return sum(result)", "def isFormula(string):\r\n string = string.replace(' ', '')\r\n if string == '':\r\n return True\r\n elif re.sub(r\"\\w|\\d|->|_|\\(|\\)|~\", '', string):\r\n return False\r\n elif re.findall(r\"(?<!\\w_)\\d+|(?<!\\w)\\d+|->->\", string):\r\n return False\r\n else:\r\n string1 = string.replace('~', '').replace('->', '+')\r\n info = re.findall(r'\\w_\\d+|\\w\\d*', string1)\r\n for part in info:\r\n string1 = string1.replace(part, '(-1)')\r\n try:\r\n eval(string1)\r\n except:\r\n return False\r\n string2 = string.replace('~', '-').replace('->', '+')\r\n info = re.findall(r'\\w_\\d+|\\w\\d*', string2)\r\n for part in info:\r\n string2 = string2.replace(part, '(-1)')\r\n try:\r\n eval(string2)\r\n except:\r\n return False\r\n return True", "def calc(operand_1, operand_2):\n return operand_1 + operand_2", "def calc(operand_1, operand_2):\n return operand_1 + operand_2", "def test_maths(self):\n\n # Test that basic integers work\n self.assertEqual(int(1) + int(1), int(2), \"Basic addition failed\")\n self.assertNotEqual(int(1) + int(1), int(3), \"Basic addition failed\")\n\n # Test doubles\n # FIXME: Deployment fails for some reason. Maybe bug in CPU? Commenting it out.\n # self.assertEqual(float(0.1) + float(0.2), float(0.3), \"Floating addition failed\")\n self.assertNotEqual(float(1) + float(1), float(3), \"Floating Addition failed\")", "def Test(self, String, infix):\r\n tmp1 = self.Check_code_operand(infix[0])\r\n tmp2 = self.Check_code_operand(infix[1])\r\n if (tmp1 is False) or (tmp2 is False):\r\n return False\r\n if (tmp1[0] == 'imm') or (tmp1[2] == 0) or ((tmp1[0] == 'imm') and (tmp2[0] == 'imm')):\r\n if (tmp1[2] == 0) and (tmp2[2] != 0):\r\n tmp1[2]=tmp2[2]\r\n else:\r\n return False\r\n if ((tmp1[0] == 'add') and (tmp2[0] == 'add')) or ((tmp1[2] != tmp2[2]) and (tmp2[2] != 0) and (tmp2[0] != 'imm')):\r\n return False\r\n\r\n if String == 'and':\r\n\r\n a = 0\r\n if (tmp1[0] != 'add'):\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n b = 0\r\n if (tmp2[0] != 'add'):\r\n b = tmp2[1]\r\n else:\r\n b = self.Get_value_from_memory(tmp2[1], tmp1[2])\r\n\r\n if b < 0:\r\n b = pow(2, (tmp1[2] * 8)) + b\r\n if b < 0:\r\n return False\r\n\r\n self.Flags[\"ac\"] = 0\r\n self.Flags[\"of\"] = 0\r\n self.Flags[\"cf\"] = 0\r\n\r\n a = a & b\r\n\r\n\r\n if bool(a & pow(2, (tmp1[2] * 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n if a == 0:\r\n self.Flags[\"zf\"] = 1\r\n else:\r\n self.Flags[\"zf\"] = 0\r\n\r\n if tmp1[0] == 'reg':\r\n if len(infix[0][0])==3:\r\n self.Registers[infix[0][0]] = a\r\n else:\r\n self.Save_value_in_reg_X(infix[0][0],a)\r\n else:\r\n if not self.Save_value_in_memory(tmp1[1], a, tmp1[2]):\r\n return False\r\n elif String == 'test':\r\n\r\n a = 0\r\n if (tmp1[0] != 'add'):\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n b = 0\r\n if (tmp2[0] != 'add'):\r\n b = tmp2[1]\r\n else:\r\n b = self.Get_value_from_memory(tmp2[1], tmp1[2])\r\n\r\n if b < 0:\r\n b = pow(2, (tmp1[2] * 8)) + b\r\n if b < 0:\r\n return False\r\n\r\n self.Flags[\"ac\"] = 0\r\n self.Flags[\"of\"] = 0\r\n self.Flags[\"cf\"] = 0\r\n\r\n a = a & b\r\n\r\n\r\n if bool(a & pow(2, (tmp1[2] * 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n if a == 0:\r\n self.Flags[\"zf\"] = 1\r\n else:\r\n self.Flags[\"zf\"] = 0\r\n elif String == 'or':\r\n\r\n a = 0\r\n if (tmp1[0] != 'add'):\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n b = 0\r\n if (tmp2[0] != 'add'):\r\n b = tmp2[1]\r\n else:\r\n b = self.Get_value_from_memory(tmp2[1], tmp1[2])\r\n\r\n if b < 0:\r\n b = pow(2, (tmp1[2] * 8)) + b\r\n if b < 0:\r\n return False\r\n\r\n self.Flags[\"ac\"] = 0\r\n self.Flags[\"of\"] = 0\r\n self.Flags[\"cf\"] = 0\r\n\r\n a = a | b\r\n\r\n\r\n if bool(a & pow(2, (tmp1[2] * 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n if a == 0:\r\n self.Flags[\"zf\"] = 1\r\n else:\r\n self.Flags[\"zf\"] = 0\r\n\r\n if tmp1[0] == 'reg':\r\n if len(infix[0][0])==3:\r\n self.Registers[infix[0][0]] = a\r\n else:\r\n self.Save_value_in_reg_X(infix[0][0],a)\r\n else:\r\n if not self.Save_value_in_memory(tmp1[1], a, tmp1[2]):\r\n return False\r\n elif String == 'xor':\r\n\r\n a = 0\r\n if (tmp1[0] != 'add'):\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n b = 0\r\n if (tmp2[0] != 'add'):\r\n b = tmp2[1]\r\n else:\r\n b = self.Get_value_from_memory(tmp2[1], tmp1[2])\r\n\r\n if b < 0:\r\n b = pow(2, (tmp1[2] * 8)) + b\r\n if b < 0:\r\n return False\r\n\r\n self.Flags[\"ac\"] = 0\r\n self.Flags[\"of\"] = 0\r\n self.Flags[\"cf\"] = 0\r\n\r\n a = a ^ b\r\n\r\n\r\n if bool(a & pow(2, (tmp1[2] * 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n if a == 0:\r\n self.Flags[\"zf\"] = 1\r\n else:\r\n self.Flags[\"zf\"] = 0\r\n\r\n if tmp1[0] == 'reg':\r\n if len(infix[0][0])==3:\r\n self.Registers[infix[0][0]] = a\r\n else:\r\n self.Save_value_in_reg_X(infix[0][0],a)\r\n else:\r\n if not self.Save_value_in_memory(tmp1[1], a, tmp1[2]):\r\n return False\r\n return True", "def calculate_infix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate infix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\ttry:\n\t\t\tfor e in elements:\n\t\t\t\tif not e.isdigit() and e != \")\":\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and not cls.is_operator(stack[-1]):\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and cls.is_operator(stack[-1]):\n\t\t\t\t\toperator = stack.pop()\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(e), operator)\n\t\t\t\t\tif stack[-1] == \"(\":\n\t\t\t\t\t\tstack.append(str(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\t\tbreak\n\t\t\t\tif e == \")\":\n\t\t\t\t\tvalue = stack.pop()\n\t\t\t\t\tob = stack.pop()\n\t\t\t\t\tif (ob == \"(\"):\n\t\t\t\t\t\tstack.append(str(value))\n\t\t\t\t\telif (cls.is_operator(ob)):\n\t\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(value), ob)\n\t\t\t\t\t\tstack.append(str(result))\n\n\t\t\tanswer = float(stack[0])\n\t\t\tlogger.info(f\"the answe is {answer}\")\n\t\t\treturn answer\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Exception from the infix function\")", "def test_sqpp_paren_expr1_minus_expr2_and_paren_expr3(self):\n self.assertEqual(self.parser.parse_query('(expr1) - expr2 + (expr3)'),\n ['+', 'expr1', '-', 'expr2', '+', 'expr3'])", "def eval_sum(parse_result):\r\n total = 0.0\r\n current_op = operator.add\r\n for token in parse_result:\r\n if token == '+':\r\n current_op = operator.add\r\n elif token == '-':\r\n current_op = operator.sub\r\n else:\r\n total = current_op(total, token)\r\n return total", "def reducedFormOne(self, equation):\n splitter = re.split('(\\+|\\-)', equation)\n newEquation = str()\n state = 0\n for token in splitter:\n if '(' in token or state > 0 or '[' in token:\n state += 1\n newEquation += token\n continue\n if ')' in token or ']' in token:\n state -= 1\n continue\n if '^' + self.var in token:\n newEquation += token\n self.validPolynome = False\n continue\n find = re.findall('(\\*|\\^|\\/)?(' + self.var + ')(\\^\\d+)?' , token)\n newVar = []\n for var in find:\n newVar.append(''.join(map(str,var)))\n for var in newVar:\n token = token.replace(var, '')\n if token != '+' and token != '-' and token != '':\n try:\n newEquation += str(eval(token.replace('^', '**'))) + ''.join(newVar)\n except:\n self.error = True\n continue\n else:\n newEquation += token\n return newEquation", "def solve_part2(input, verbose=False):\n equations = parse(input)\n\n result = []\n for eq in equations:\n result.append(solve_equation_addition_precendence(eq, verbose))\n\n if verbose:\n print(f\"results: {result}\")\n\n return sum(result)" ]
[ "0.69371355", "0.69028455", "0.68847096", "0.68847096", "0.6814599", "0.6751683", "0.6709436", "0.6692912", "0.66831404", "0.66694516", "0.6641257", "0.66409284", "0.6588774", "0.6569552", "0.6498368", "0.649347", "0.64901173", "0.6489638", "0.64841825", "0.6454213", "0.6449714", "0.64332175", "0.641108", "0.64083457", "0.6401689", "0.6360533", "0.6359056", "0.6325971", "0.63214594", "0.6309388", "0.63074183", "0.6285559", "0.6281743", "0.62751555", "0.6272417", "0.6234513", "0.62214476", "0.6216995", "0.62054163", "0.62036407", "0.6197002", "0.61953497", "0.6186626", "0.6169971", "0.61660784", "0.6160653", "0.6152093", "0.6144689", "0.6118961", "0.6115423", "0.6105922", "0.60853434", "0.60838526", "0.6081173", "0.60527784", "0.60238063", "0.6008693", "0.6003784", "0.59938335", "0.59913874", "0.59890175", "0.5981167", "0.59730464", "0.5957896", "0.5949426", "0.5934872", "0.592833", "0.59264374", "0.59108335", "0.5910762", "0.5905739", "0.5905521", "0.5901508", "0.59000564", "0.58975405", "0.5897293", "0.589711", "0.5894343", "0.5866245", "0.5849453", "0.5844186", "0.58441216", "0.5811367", "0.57916445", "0.57882947", "0.5787066", "0.57779396", "0.57728654", "0.57611144", "0.5755891", "0.57505333", "0.574939", "0.574939", "0.57425797", "0.57216907", "0.5712488", "0.57018113", "0.57004696", "0.5699264", "0.56987935" ]
0.59757686
62
Test if the equation 3 / 2 is parsed and calculated correctly
def test_parse_devide(self): self.assertEqual(parse_input.parse(["8", "/", "4"]), 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_calculate_three_operations_in_bracket(self):\n result = self.calcuate.calcuate('(2x2+1+7)x3-2')\n expected_result = \"34\"\n self.assertEqual(expected_result, result)", "def is_equation(self): \n return False", "def is_equation(self):\n return True", "def is_equation(self):\n return True", "def test_expression(x, y, z):\n return x * y + y / z", "def is_equation(self):\n return False", "def test_staff_inputs_expressions(self):\r\n problem = self.build_problem(answer=\"1/3\", tolerance=1e-3)\r\n correct_responses = [\"1/3\", \"0.333333\"]\r\n incorrect_responses = []\r\n self.assert_multiple_grade(problem, correct_responses, incorrect_responses)", "def autosolve(equation):\n\n try:\n # Try to set a variable to an integer\n num1 = int(equation.split(\" \")[0])\n\n except ValueError:\n # Try to set a variable to a decimal\n num1 = float(equation.split(\" \")[0])\n\n try:\n # Try to set a variable to an integer\n num2 = int(equation.split(\" \")[2])\n\n except ValueError:\n # Try to set a variable to a decimal\n num2 = float(equation.split(\" \")[2])\n\n # If the lowercase version of the operator is '+', 'plus' or 'add'\n if equation.split(\" \")[1].lower() in [\"+\", \"plus\", \"add\"]:\n\n # Return the answer\n return num1 + num2\n\n # If the lowercase version of the operator is '-', 'minus' or 'subtract'\n elif equation.split(\" \")[1].lower() in [\"-\", \"minus\", \"subtract\"]:\n\n # Return the answer\n return num1 - num2\n\n # If the lowercase version of the operator is '*', 'times', 'multiply'\n elif equation.split(\" \")[1].lower() in [\"*\", \"times\", \"multiply\"]:\n\n # Return the answer\n return num1 * num2\n\n # If the lowercase version of the operator is '/', 'divide' or 'quotient'\n elif equation.split(\" \")[1].lower() in [\"/\", \"divide\", \"quotient\"]:\n\n # Return the answer\n return num1 / num2\n\n # If the lowercase version of the operator is '%, 'remainder' or 'rem'\n elif equation.split(\" \")[1].lower() in [\"%\", \"remainder\", \"rem\"]:\n\n # Return the answer\n return num1 % num2\n\n # Raise a warning\n raise ValueError(\"Invalid operation provided.\")", "def Calc():\n print('Please type a maths expression with 2 intergers or floats and an operator \"+\", \"-\", \"*\" or \"/\"')\n inp = (input())\n for char in inp:\n if char not in '1234567890.-+*/':\n print('Please restart the program and only type valid characters')\n return\n operators = [\"+\", \"-\", \"*\", \"/\"]\n buf = ''\n operand1 = 0.0\n operand2 = 0.0\n for char in inp:\n if char not in operators:\n buf += char\n else:\n operator = char\n operand1 = float(buf)\n buf = ''\n operand2 = float(buf)\n res = 0.0\n if operator == '+':\n res = su(operand1, operand2)\n elif operator == '-':\n res = sub(operand1, operand2)\n elif operator == '*':\n res = mu(operand1, operand2)\n elif operand2==0:\n return \"Can not divide by 0\"\n else:\n res = di(operand1, operand2)\n print(res)\n return res", "def autohard(equation):\n\n try:\n # Try to set a variable to an integer\n num1 = int(equation.split(\" \")[1])\n\n except ValueError:\n # Try to set a variable to a decimal\n num1 = float(equation.split(\" \")[1])\n\n # If the lowercase version of the operation equals 'log'\n if equation.split(\" \")[0].lower() == \"log\":\n # Return the answer\n return math.log(num1)\n\n # If the lowercase version of the operation equals 'acos'\n elif equation.split(\" \")[0].lower() == \"acos\":\n # Return the answer\n return math.acos(num1)\n\n # If the lowercase version of the operation equals 'asin'\n elif equation.split(\" \")[0].lower() == \"asin\":\n # Return the answer\n return math.asin(num1)\n\n # If the lowercase version of the operation equals 'atan'\n elif equation.split(\" \")[0].lower() == \"atan\":\n # Return the answer\n return math.atan(num1)\n\n # If the lowercase version of the operation equals 'cos'\n elif equation.split(\" \")[0].lower() == \"cos\":\n # Return the answer\n return math.cos(num1)\n\n # If the lowercase version of the operation equals 'hypot'\n elif equation.split(\" \")[0].lower() == \"hypot\":\n try:\n # Try to set a variable to an integer\n num2 = int(equation.split(\" \")[2])\n\n except ValueError:\n # Try to set a variable to an decimal\n num2 = float(equation.split(\" \")[2])\n\n # Return the answer\n return math.hypot(num1, num2)\n\n # If the lowercase version of the operation equals 'sin'\n elif equation.split(\" \")[0].lower() == \"sin\":\n # Return the answer\n return math.sin(num1)\n\n # If the lowercase version of the operation equals 'tan'\n elif equation.split(\" \")[0].lower() == \"tan\":\n # Return the answer\n return math.tan(num1)\n\n # Raise a warning\n raise ValueError(\"Invalid operation entered.\")", "def test_calculate_two_operations_in_bracket(self):\n result = self.calcuate.calcuate('(2-5+7)x3-2')\n expected_result = \"10\"\n self.assertEqual(expected_result, result)", "def test_calculate_bracket_at_the_beginning_and_multiplication(self):\n result = self.calcuate.calcuate('(2+1)x3')\n expected_result = \"9\"\n self.assertEqual(expected_result, result)", "def exeval(expression): \n if len(expression) <= 3: #Assuming no spaces (\" \") between each value given in the expression\n if expression[0] == \"+\":\n return float(expression[1]) + float(expression[2])\n elif expression[0] == \"-\":\n return float(expression[1]) - float(expression[2])\n else:\n if expression[0] == \"+\":\n return float(expression[1]) + exeval(expression[2:])\n elif expression[0] == \"-\":\n return float(expression[1]) - exeval(expression[2:])", "def test_invalid_calculation(self):\n\t\turl = reverse('calculation')\n\t\tdata = {'expression': '2+3**(4+2)'}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\t\tself.assertEqual(response.data, {'error': 'invalid expression' })", "def opr1():\n try:\n a = input(\"Enter the number 'a': \")\n b = input(\"Enter the number 'b': \")\n z = (float(a) ** 2) / float(b)\n return z\n except (ValueError, ZeroDivisionError):\n return 'Something is entered incorrectly!'", "def math_operation(expression):\n if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():\n # eliminates the error call for float and negative numbers\n if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit() or \\\n not str(expression[2]).replace('.', '1').replace('-', '1').isdigit():\n raise ValueError(f'{expression} - check this fragment, something wrong.')\n if expression[2] == 0 and expression[1] == '/':\n raise ValueError(f'{expression} - division by zero.')\n operator = expression[1]\n if operator == '**':\n return expression[0]**expression[2]\n elif operator == '*':\n return expression[0]*expression[2]\n elif operator == '/':\n return expression[0]/expression[2]\n elif operator == '+':\n return expression[0]+expression[2]\n elif operator == '-':\n return expression[0]-expression[2]", "def test_division_and_math(self):\n\n good_examples = \"\"\"\n [score] / 2 -> CAST(datatypes.score AS FLOAT) / 2\n [score] / 2.0 -> CAST(datatypes.score AS FLOAT) / 2.0\n sum(score) / count(*) -> CASE WHEN (count(*) = 0) THEN NULL ELSE CAST(sum(datatypes.score) AS FLOAT) / CAST(count(*) AS FLOAT) END\n [score] / 1 -> datatypes.score\n sum([score] / 1) -> sum(datatypes.score)\n sum([score] / [score]) -> sum(CASE WHEN (datatypes.score = 0) THEN NULL ELSE CAST(datatypes.score AS FLOAT) / CAST(datatypes.score AS FLOAT) END)\n score / 2 -> CAST(datatypes.score AS FLOAT) / 2\n sum(score / score) -> sum(CASE WHEN (datatypes.score = 0) THEN NULL ELSE CAST(datatypes.score AS FLOAT) / CAST(datatypes.score AS FLOAT) END)\n [score] / (2/1) -> CAST(datatypes.score AS FLOAT) / 2\n [score] / (0.5/0.25) -> CAST(datatypes.score AS FLOAT) / 2.0\n [score] / (0.5 / 0.25) -> CAST(datatypes.score AS FLOAT) / 2.0\n [score] * (2*3) -> datatypes.score * 6\n [score] * (2*score) -> datatypes.score * 2 * datatypes.score\n [score] * (2 / score) -> datatypes.score * CASE WHEN (datatypes.score = 0) THEN NULL ELSE 2 / CAST(datatypes.score AS FLOAT) END\n [score] / (10-7) -> CAST(datatypes.score AS FLOAT) / 3\n [score] / (10-9) -> datatypes.score\n ([score] + [score]) / ([score] - [score]) -> CASE WHEN (datatypes.score - datatypes.score = 0) THEN NULL ELSE CAST(datatypes.score + datatypes.score AS FLOAT) / CAST(datatypes.score - datatypes.score AS FLOAT) END\n # Order of operations has: score + (3 + (5 / 5))\n score + (3 + 5 / (10 - 5)) -> datatypes.score + 4.0\n # Order of operations has: score + (3 + 0.5 - 5)\n score + (3 + 5 / 10 - 5) -> datatypes.score + -1.5\n \"\"\"\n\n for field, expected_sql in self.examples(good_examples):\n expr, _ = self.builder.parse(field, debug=True)\n self.assertEqual(expr_to_str(expr), expected_sql)", "def test_complex_expression(self):\r\n\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"(2^2+1.0)/sqrt(5e0)*5-1\"),\r\n 10.180,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"1+1/(1+1/(1+1/(1+1)))\"),\r\n 1.6,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"10||sin(7+5)\"),\r\n -0.567, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"sin(e)\"),\r\n 0.41, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"k*T/q\"),\r\n 0.025, delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"e^(j*pi)\"),\r\n -1, delta=1e-5\r\n )", "def test_operator_sanity(self):\r\n var1 = 5.0\r\n var2 = 2.0\r\n operators = [('+', 7), ('-', 3), ('*', 10), ('/', 2.5), ('^', 25)]\r\n\r\n for (operator, answer) in operators:\r\n input_str = \"{0} {1} {2}\".format(var1, operator, var2)\r\n result = calc.evaluator({}, {}, input_str)\r\n fail_msg = \"Failed on operator '{0}': '{1}' was not {2}\".format(\r\n operator, input_str, answer\r\n )\r\n self.assertEqual(answer, result, msg=fail_msg)", "def test_all(self):\n\n tokens = list(Lexer(\"12 + 2^(8/4) - 5 * (7%4)\").generate_tokens())\n answer = [Token(TokenType.NUMBER, 12),\n Token(TokenType.PLUS),\n Token(TokenType.NUMBER, 2),\n Token(TokenType.EXPONENT),\n Token(TokenType.LPAREN),\n Token(TokenType.NUMBER, 8),\n Token(TokenType.DIVIDE),\n Token(TokenType.NUMBER, 4),\n Token(TokenType.RPAREN),\n Token(TokenType.MINUS),\n Token(TokenType.NUMBER, 5),\n Token(TokenType.MULTIPLY),\n Token(TokenType.LPAREN),\n Token(TokenType.NUMBER, 7),\n Token(TokenType.MODULO),\n Token(TokenType.NUMBER, 4),\n Token(TokenType.RPAREN)]\n #Token(TokenType.NUMBER, 3)]\n self.assertEqual(tokens, answer)", "def main(expression):\n\n exception = parse_expression(expression)\n return calc(poland_notation(exception))", "def test_3_operands(self):\n f12: Fraction = Fraction(1, 2)\n f34: Fraction = Fraction(3, 4)\n f44: Fraction = Fraction(4, 4)\n f84: Fraction = Fraction(8, 4)\n f48: Fraction = Fraction(4, 8)\n f816: Fraction = Fraction(8, 16)\n self.assertTrue(f12 + f34 + f44 == Fraction(72, 32))\n self.assertTrue(f12 - f34 - f44 == Fraction(-5,4))\n self.assertTrue(f12 * f34 * f44 == Fraction(3, 8))\n self.assertTrue(f12 / f34 / f44 == Fraction(4, 6))\n self.assertTrue(f12 == f48 == f816)\n self.assertFalse(f12 == f816 == f34)\n self.assertTrue(f12 != f34 != f44)\n self.assertFalse(f12 != f48 != f816)\n self.assertTrue(f12 < f34 < f44)\n self.assertFalse(f84 < f44 < f12)\n self.assertTrue(f12 <= f12 <= f48)\n self.assertFalse(f84 <= f44 < f12)\n self.assertTrue(f44 > f34 > f12)\n self.assertFalse(f12 > f44 > f84)\n self.assertTrue(f12 >= f48 >= f816)\n self.assertFalse(f12 >= f44 >= f816)", "def main():\r\n eq = input(\"Input an equation: \")\r\n splitList = (mysplit(eq))\r\n operandsList = []\r\n #This loop takes in the split list and adds to a list without operators\r\n for operand in splitList:\r\n if operand == '+' or operand == '-' or operand == '*' or operand == '/':\r\n continue\r\n operandsList.append(operand)\r\n operatorsList = []\r\n #This loop takes in the split list and adds to a list without digits\r\n for operator in splitList:\r\n if operator.isdigit() is True:\r\n continue\r\n operatorsList.append(operator)\r\n #variable to check if the operator is allowed\r\n operatorChecker = False\r\n for sign in operatorsList:\r\n if sign == '+' or sign == '-' or sign == '/' or sign == '*':\r\n operatorChecker = True\r\n else:\r\n operatorChecker = False\r\n operandsDigits = ''.join(operandsList)\r\n #this checks if the operands are digits\r\n operandsChecker = str.isdigit(operandsDigits)\r\n #check if equation contains division with 0\r\n if '/ 0' in eq:\r\n zeroChecker = False\r\n else:\r\n zeroChecker = True\r\n\r\n #if conditions for the\r\n if operandsChecker is False or operatorChecker is False or zeroChecker is False:\r\n print(\"Invalid Input\")\r\n else:\r\n stack, queue = parseNumbers(eq)\r\n stackAnswer = calculateStack(stack)\r\n queueAnswer = calculateQueue(queue)\r\n print(\"Queue total:\", queueAnswer)\r\n print(\"Stack total:\", stackAnswer)\r\n if queueAnswer == stackAnswer:\r\n print(\"They do match!\")\r\n else:\r\n print(\"They do not match!\")", "def evaluator_side_effect(_, __, math_string):\r\n if math_string != '4':\r\n raise err", "def evaluate(formula, model):\r\n # Task 2.1\r\n if is_unary(formula.root):\r\n return not evaluate(formula.first, model)\r\n elif is_ternary(formula.root):\r\n if evaluate(formula.first,model):\r\n return evaluate(formula.second,model)\r\n else:\r\n return evaluate(formula.third,model)\r\n elif is_binary(formula.root):\r\n if formula.root == '&':\r\n return evaluate(formula.first, model) and evaluate(formula.second, model)\r\n elif formula.root == '|':\r\n return evaluate(formula.first, model) or evaluate(formula.second, model)\r\n elif formula.root == '<->':\r\n return not (evaluate(formula.first, model) ^ evaluate(formula.second, model))\r\n elif formula.root == '-&':\r\n return not (evaluate(formula.first, model) and evaluate(formula.second, model))\r\n elif formula.root == '-|':\r\n return not (evaluate(formula.first, model) or evaluate(formula.second, model))\r\n else:\r\n return (not evaluate(formula.first, model)) or evaluate(formula.second, model)\r\n elif is_constant(formula.root):\r\n if formula.root == 'T':\r\n return True\r\n else:\r\n return False\r\n else:\r\n return model[formula.root]\r\n #\r\n # if is_constant(formula.root):\r\n # if formula.root == 'T':\r\n # return True\r\n # elif formula.root == 'F':\r\n # return False\r\n # elif is_variable(formula.root):\r\n # return model[formula.root]\r\n #\r\n # elif is_unary(formula.root):\r\n # return not evaluate(formula.first, model)\r\n # assert (type(formula.first) is Formula) and (type(formula.second) is Formula)\r\n # if is_binary(formula.root):\r\n # if formula.root == '&':\r\n # return evaluate(formula.first, model) and evaluate(formula.second, model)\r\n # elif formula.root == '|':\r\n # return evaluate(formula.first, model) or evaluate(formula.second, model)\r\n # elif formula.root == '->':\r\n # if evaluate(formula.first, model) and not evaluate(formula.second, model):\r\n # return False\r\n # else:\r\n # return True\r\n # elif formula.root == '<->':\r\n # if (not evaluate(formula.first, model) and not evaluate(formula.second, model)) or (\r\n # evaluate(formula.first, model) and evaluate(formula.second, model)):\r\n # return True\r\n # else:\r\n # return False\r\n # elif formula.root == '-&':\r\n # if evaluate(formula.first, model) and evaluate(formula.second, model):\r\n # return False\r\n # else:\r\n # return True\r\n # elif formula.root == '-|':\r\n # if not evaluate(formula.first, model) and not evaluate(formula.second, model):\r\n # return True\r\n # else:\r\n # return False\r\n # else:\r\n # if evaluate(formula.first, model):\r\n # return evaluate(formula.second, model)\r\n # else:\r\n # return evaluate(formula.third, model)\r", "def calculate (self,phrase):\r\n\r\n\r\n def bracketed (phrase,bracketing='()'):\r\n\r\n \"\"\"Returns TRUE if <phrase> is encompassed by a left bracket and a right bracket\r\n at the same hierarchical level\"\"\"\r\n\r\n level = 0\r\n left_point = None\r\n right_point = None\r\n \r\n\r\n for count,char in enumerate(phrase):\r\n\r\n if char == bracketing[0]:\r\n if level == 0:\r\n left_point = count\r\n level+=1\r\n if char == bracketing[1]:\r\n level-=1\r\n if level == 0:\r\n right_point = count\r\n if not (left_point is None) and (not right_point is None) and left_point == 0 and right_point == len(phrase)-1:\r\n return True\r\n return False\r\n\r\n def quoted (phrase):\r\n\r\n level = 0\r\n foundchar = ''\r\n left_point = None\r\n right_point = None \r\n for count,char in enumerate(phrase):\r\n\r\n if char in ['\"',\"'\"] and level == 0:\r\n foundchar = char\r\n left_point = count\r\n level += 1\r\n elif level == 1 and char == foundchar:\r\n right_point = count\r\n level += 1\r\n if not (left_point is None) and (not right_point is None) and left_point == 0 and right_point == len(phrase)-1:\r\n return True\r\n return False \r\n \r\n \r\n\r\n def is_function(phrase):\r\n\r\n \"\"\"Tests to see if a phrase begins with a predefined function,\r\n in which case it returns information about the iarity of function\"\"\"\r\n \r\n \r\n for x in self.functions.keys():\r\n\r\n if len(x) < len(phrase) and phrase[0:len(x)] == x:\r\n if bracketed(phrase[len(x):]):\r\n if self.functions[x][1]-1 <= phrase.count(',') <= self.functions[x][2]-1:\r\n return x, self.functions[x][0], self.functions[x][2], phrase[len(x):]\r\n else:\r\n return False,False,False,False \r\n \r\n\r\n def all_simple (phrase):\r\n\r\n \"\"\"Tests if a phrase is a simple string representing an expression, rather than an operation\"\"\"\r\n\r\n\r\n for x in phrase:\r\n if (x not in self.operations and not (isinstance(x,(int,type(ListType()),float,bool) or (isinstance(x,str) and quoted(x)))) or self.current_register.contains(x)):\r\n return False\r\n return True\r\n \r\n def parse (phrase):\r\n\r\n \r\n COMPTERMS = ['==','>=','<=','!=','>','<',]\r\n\r\n\r\n def contains_comp (x):\r\n \"\"\"True is x contains any of the COMP Terms\"\"\"\r\n\r\n for comp in COMPTERMS:\r\n if comp in x:\r\n return True\r\n return False\r\n\r\n\r\n \r\n \"\"\"Parses and analzes the phrase\"\"\"\r\n\r\n \r\n if phrase in ['bTrue','bFalse','EmptyList']:\r\n return {'bTrue':True,\r\n 'bFalse':False,\r\n 'EmptyList':ListType()}[phrase]\r\n\r\n if isinstance(phrase,str):\r\n \r\n if quoted(phrase):\r\n return phrase\r\n else:\r\n try:\r\n return float(phrase)\r\n except:\r\n pass\r\n \r\n\r\n # If the phrase is a string\r\n phrase = phrase.strip()\r\n\r\n func_name, func, iarity, func_phrase = is_function(phrase)\r\n # tests is it is function; otherwise the values are false.\r\n \r\n \r\n\r\n if func_name:\r\n if iarity == 1:\r\n # If the function accepts one value\r\n return func(parse(func_phrase))\r\n if iarity == 2:\r\n # Two values \r\n func_phrase = func_phrase[1:-1]\r\n term1,term2 = func_phrase.split(',')[0],func_phrase.split(',')[1]\r\n return func(parse(term1),parse(term2))\r\n if iarity == 3:\r\n func_phrase = func_phrase[1:-1]\r\n term1,term2, term3 = func_phrase.split(',')[0],func_phrase.split(',')[1],func_phrase.split(',')[2]\r\n return func(parse(term1),parse(term2),parse(term3))\r\n \r\n if iarity >3:\r\n # A list of values \r\n func_phrase = func_phrase[1:-1]\r\n return func([parse(x) for x in func_phrase.split(',')])\r\n elif phrase and phrase[0] == '-' and bracketed(phrase[1:]):\r\n # Translates negative sign (as opposed to operators) into corresponding function \r\n return -parse(phrase[2:-1])\r\n\r\n\r\n elif bracketed(phrase):\r\n # removes top-level bracket\r\n phrase = phrase[1:-1]\r\n return parse(phrase)\r\n elif phrase in self.operations:\r\n return phrase\r\n elif self.current_register.contains(phrase):\r\n # for variables and constants\r\n return self.current_register.get(phrase)\r\n elif contains_comp(phrase) and '(' not in phrase and ')' not in phrase:\r\n return calc.computer.get(phrase)\r\n elif phrase and phrase[0]=='@' and phrase[-1]=='@':\r\n # to retrieve values from the log \r\n index = int(parse(phrase[1:-1]))\r\n if 0<= index <= len(self.lines):\r\n return self.lines[index][0]\r\n else:\r\n \r\n phrase = list(phrase)\r\n #phrase is converted to a list to allowing indexical assignments\r\n operation_sequence = []\r\n level = 0\r\n inquotes = False\r\n quote_form = ''\r\n for counter, x in enumerate(phrase):\r\n\r\n # Search for operators that are not enclosed in parantheses\r\n\r\n if not inquotes and x in ['\"',\"'\"]:\r\n inquotes = True\r\n quote_form = x\r\n elif inquotes and x == quote_form:\r\n if counter < len(phrase)-1:\r\n if phrase[counter+1] in ['+']:\r\n phrase[counter+1] = '#+#'\r\n\r\n if x == '(':\r\n level +=1\r\n\r\n if x == ')':\r\n level -=1\r\n if level == 0:\r\n if counter<len(phrase)-1:\r\n if phrase[counter+1] in self.operations:\r\n # If an operator is found, surround it with pound signs\r\n phrase[counter+1] = '#'+phrase[counter+1]+'#'\r\n if phrase[counter+2] in self.operations:\r\n phrase[counter+2] = '~'\r\n # For a minus sign that is not an operator\r\n\r\n \r\n phrase = ''.join(phrase).replace('~','-').split('#')\r\n # Split the phrase into expressions linked by operators \r\n newphrase = []\r\n for x in phrase:\r\n # a clumsy way to distinction between numerical values, and string operators\r\n try:\r\n newphrase.append(float(x))\r\n except:\r\n newphrase.append(x)\r\n phrase = newphrase\r\n\r\n return parse(phrase)\r\n \r\n \r\n\r\n if isinstance(phrase,list):\r\n # If the phrase has already been parsed into a list \r\n if len(phrase) == 1:\r\n return (parse(phrase[0]))\r\n if all_simple(phrase):\r\n # If every value in the phrase list has been reduced to\r\n # a numerical value or an operator\r\n \r\n\r\n for operation in self.operations:\r\n\r\n #In order to preserve the correct order of operations,\r\n #the operations are analyzed in succession\r\n\r\n while operation in phrase:\r\n\r\n #This repeat as long as the operation is in the phrase,\r\n #since with each pass it only \"reduced\"\r\n #expression/operator/expression triplet\r\n \r\n\r\n newlist = [] # For the result of each pass through the list.\r\n lastvalue = None\r\n counter = 0\r\n stop = False\r\n while counter < len(phrase) and not stop:\r\n \r\n \r\n if counter < len(phrase)-2:\r\n a = phrase[counter]\r\n op = phrase[counter+1]\r\n b = phrase[counter+2]\r\n #take a triplet of values from the list\r\n\r\n if op == operation:\r\n # if an operator is found, reduced the triplet, and\r\n # then add the reduced value, together with the rest\r\n # of the list to the \r\n if operation == '*':\r\n c = a*b\r\n elif operation == '+':\r\n if isinstance(a,str) and isinstance(b,str):\r\n c = a[0:-1]+b[1:]\r\n else:\r\n c = a+b\r\n elif operation == '/':\r\n c = a/b\r\n elif operation == '^':\r\n c = a**b\r\n elif operation == '%':\r\n c = a % b\r\n elif operation == '-':\r\n c = a - b\r\n newlist.append(c)\r\n newlist += phrase[counter+3:] \r\n stop = True\r\n else:\r\n newlist.append(a)\r\n else:\r\n # otherwise, just add the text value to the new list\r\n newlist.append(phrase[counter])\r\n counter +=1 \r\n \r\n \r\n phrase = newlist\r\n\r\n \r\n else:\r\n # if the list is not yet simple, return a new list after parsing each element.\r\n phrase = [parse(x) for x in phrase]\r\n return parse(phrase)\r\n\r\n if isinstance(phrase,(int,float,type(ListType()),bool)):\r\n # if a numerical value, stop the recursion\r\n \r\n return phrase \r\n\r\n return parse(phrase)", "def test_calculate_bracket_in_bracket(self):\n result = self.calcuate.calcuate('(2+(1+10)-1)')\n expected_result = \"12\"\n self.assertEqual(expected_result, result)", "def test_simple_calculation(self):\n\t\turl = reverse('calculation')\n\t\tdata = {'expression': '2+3*(4+2)'}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data, {'result': 20 })", "def test_staff_inputs_expressions_legacy(self):\r\n problem = self.build_problem(answer=\"1+1j\", tolerance=1e-3)\r\n self.assert_grade(problem, '1+j', 'correct')", "def check_math_line(self, line):\n self.E_str = \"check_math_line\"\n err_msg = \"The syntax for a math command is: math <var> = <arthimetic operation>.\"\n err_msg += \"\\nFor example: 'x = x / (1 - x)'\\n\\n\"\n\n # Check we don't too have many equals signs\n if line.count('=') > 1:\n self.print_error(\"Too many '=' found!\\n\\n\" + err_msg)\n elif line.count('=') == 0:\n self.print_error(f\"I can't find a '=' on the math line!{err_msg}\")\n\n # Set the variable for error checking later\n new_var_name, metadata_name = self.get_variable_name(line)\n\n # Split the line by =\n words = line.split('=')\n _, maths = words\n md_var_names, md_names, new_line = self.parse_metadata_line(maths,\n \"get\")\n # Check metadata and their variables have been declared\n metadata = {}\n self.E_str = \"check_math_line\"\n for v_name, m_name in zip(md_var_names, md_names):\n if v_name not in self.variables:\n self.print_error(f\"Undeclared variable '{var}'\")\n Var = getattr(self, v_name)\n if m_name not in Var.metadata:\n e_msg = f\"Undeclared metadata '{m_name}' in variable '{v_name}''\"\n self.print_error(e_msg)\n metadata[m_name] = \"\"\n\n # Check if there are any undeclared variables\n line, any_vars = self.find_vars_in_str(new_line)\n self.E_str = \"check_math_line\"\n\n # Check if there are any unwanted characters\n bad_chars = \"%£\\\"!&}{[]}:;@'^~#<,>?¬`|\"\n for j in bad_chars:\n if j in new_line:\n self.print_error(f\"Illegal character '{j}' in math\")\n\n # Check all brackets are closed\n if new_line.count(\"(\") != line.count(\")\"):\n err_msg = \"You've not closed one of the brackets you opened.\\n\"\n err_msg += \"Num of '(' = %i\\n\" % line.count(\"(\")\n err_msg += \"Num of ')' = %i\\n\" % line.count(\")\")\n self.print_error(err_msg)\n\n self.set_var(new_var_name, \"^EMPTY^\", {metadata_name: \"\"})\n return new_var_name", "def equation(operation, firstnum, secondnum):\n if operation == 'plus':\n return firstnum + secondnum\n elif operation == 'minus':\n return firstnum - secondnum\n elif operation == 'multiply':\n return firstnum * secondnum\n elif operation == 'divide':\n if not secondnum == 0:\n return firstnum / secondnum\n raise ZeroDivisionError(\"Unable to divide by 0.\")\n raise ValueError('Invalid operation provided.')", "def test_calculate_test(self):\n result = self.calcuate.calcuate('3+3+(4-3)')\n expected_result = \"7\"\n self.assertEqual(expected_result, result)", "def get_equation_from_user(self):\n while True:\n try:\n # Get the string from the user\n string_equation = input(\"Please enter an valid equation\")\n\n # If empty get new string\n while not string_equation:\n string_equation = input(\n \"Please enter an valid not empty equation\")\n break\n except Exception as e:\n print(\"something went wrong please try again \" + str(e))\n\n return string_equation", "def parse_simple_eqn(equation=\"\"):\n # Define replacement rules.\n simple_replacements = [[' ', ''],\n ['**', '^'], ['*', ' \\\\cdot '],\n ['math.', ''], ['np.', ''],\n ['pi', '\\\\pi'] , ['tan', '\\\\tan'],\n ['cos', '\\\\cos'], ['sin', '\\\\sin'],\n ['sec', '\\\\sec'], ['csc', '\\\\csc']]\n complex_replacements = [['^', '{{{i1}}}^{{{i2}}}'],\n ['_', '{{{i1}}}_{{{i2}}}'],\n ['/', '\\\\frac{{{i1}}}{{{i2}}}'],\n ['sqrt','\\\\sqrt{{{i2}}}']]\n # Carry out simple replacements\n for pair in simple_replacements:\n equation = equation.replace(pair[0], pair[1])\n # Now complex replacements\n for item in ['*', '/', '+', '-', '^', '_', ',', 'sqrt']:\n equation = equation.replace(item, ' ' + item + ' ')\n q_split = equation.split()\n for index, item in enumerate(q_split):\n for pair in complex_replacements:\n if item == pair[0]:\n if item == 'sqrt':\n match_str = \" \".join(q_split[index:index+2])\n else:\n match_str = \" \".join(q_split[index-1:index+2])\n equation = equation.replace(match_str, pair[1].format(\n i1=q_split[index-1], i2=q_split[index+1]))\n return equation", "def opr():\n try:\n a = input(\"Enter the number 'a': \")\n b = input(\"Enter the number 'b': \")\n z = (float(a) ** 2) / float(b)\n return z\n except ValueError:\n return 'You entered not a number!'\n except ZeroDivisionError:\n return 'Only stupid divide by zero'", "def test_eval(self):\n # expr and expr\n base = abs_path('./specs/')\n ps = Parser(base + 'script3-6.py', base)\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 2)\n\n # expr or expr\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a == if or B == b1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 6)\n\n # expr and (expr or expr)\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a == if and (B == b1 or B == b2)\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing !=\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a != if\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing >=\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a.index >= 1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing index\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b.index == 1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing option with integer type\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing option with float type\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b == 1.5\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing unmade decision\n ps.spec['constraints'] = [{\"block\": \"A\", \"condition\": \"b.index == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 0)\n\n # testing if the decision is made when the block depends on a variable\n # inside the block\n ps.spec['constraints'] = [{\"block\": \"B\", \"condition\": \"b.index == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 0)", "def simple_calculator(calculation):\n\n\n operations = {'+': lambda x,y: x + y,'-': lambda x,y: x-y,'*': lambda x,y: x * y,'/': lambda x,y: x/y}\n \n def is_numeric(x):\n\n try:\n float(x)\n int(x)\n except:\n return False\n else:\n return True\n \n\n values = calculation.split()\n print(values)\n if is_numeric(values[0]) and is_numeric(values[2]) and values[1] in operations:\n operation = operations[values[1]]\n try:\n return operation(float(values[0]),float(values[2]))\n except ZeroDivisionError:\n raise ValueError(\"Division by zero\")\n\n\n raise ValueError(\"Invalid Operation\")", "def test_product_single_frac(self):\r\n self.assertEquals(\r\n preview.latex_preview('(2+3)/(4+5)'),\r\n r'\\frac{2+3}{4+5}'\r\n )", "def test_centeredEquation(self):\n\n A33, K = self.cs.centeredEquation\n self.assertTrue((self.A33 == A33).all())\n self.assertEqual(K, 1.)", "def evaluate1(expr):\n operators = '*/+-'\n operator_stack = []\n operand_stack = []\n\n def parse_operand(s, i):\n \"\"\"\n parse the location of the string until I find an\n operator\n parse \"12\" to 12\n \"12.12\" to 12.12\n returns a float\n \"\"\"\n value = ''\n while (s[i] not in operators):\n value += s[i]\n i += 1\n if s[i] == ')':\n break\n return float(value), i-1\n\n def do_operation(operand1, operand2, operator):\n if operator == '+':\n return operand1 + operand2 \n elif operator == '*':\n return operand1 * operand2\n elif operator == '/':\n return operand1 / operand2\n elif operator == '-':\n return operand1 - operand2\n\n i = 0\n s = expr\n length = len(s)\n numbers = '0123456789'\n while i < length:\n data = s[i]\n if data == '(':\n operand_stack.append(data)\n elif data in numbers:\n # parse the operand number and modifies the index i\n number, i = parse_operand(s, i)\n operand_stack.append(number)\n elif data in operators:\n operator_stack.append(data)\n elif data is ')':\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator = operator_stack.pop()\n operand_stack.pop() # remove (\n operand_stack.append(do_operation(operand1, operand2, operator))\n i += 1\n return operand_stack.pop()", "def test_calculate_all_operations(self):\n result = self.calcuate.calcuate('11-2+4x3-5')\n expected_result = \"16\"\n self.assertEqual(expected_result, result)", "def test_truediv():\n truediv = _MathExpression() / 2\n assert math.isclose(truediv(9), 4.5) # type: ignore", "def valid(formula):\r\n\r\n try:\r\n return not re.search(r'\\b0[0-9]', formula) and eval((formula) is True\r\n #except ArithmeticError:\r\n #return False\r\n except:\r\n return False", "def test_calculate_multiplication_and_bracket_at_the_end(self):\n result = self.calcuate.calcuate('2x(1+3)')\n expected_result = \"8\"\n self.assertEqual(expected_result, result)", "def calc_formula(usr_input: str):\n # split out input string for calculation\n str_parse_at_ws = usr_input.split()\n a = int(str_parse_at_ws[0])\n b = int(str_parse_at_ws[1])\n c = int(str_parse_at_ws[2])\n # Calculate results of parsed string, converted to integer\n result = a / b + c\n print('Formula: {}/{}+{}'.format(a, b, c),\n 'Result: {}'.format(float(result)))\n return result", "def solve_quadratic_equation(a, b, c):\n if a != 0:\n d = b**2 - 4*a*c\n # d = discriminant\n if d > 0:\n first_root = (-b + math.sqrt(d)) / (2 * a)\n second_root = (-b - math.sqrt(d)) / (2 * a)\n return first_root, second_root\n elif d == 0:\n root = -(b / (2 * a))\n return root, None\n elif d < 0:\n return None, None\n else:\n print(\"Variable 'a' can`t be equal 0\")", "def evaluate_expression(in_str):\n answer = 0\n # key-value pairs keys are the mathematical expressions and the values are the weights that represents the order of oeprations\n # higher weights represnts the expressions to evaluate first, while keys with value 0 are not used yet, they are modifiable\n expression = {\"+\" : 5, \"-\" : 5,\n \"/\" : 10, \"*\" : 10,\n \"**\" : 15,\n \"%\" : 20, \"//\": 20,\n \"&\" : 0, \"#\" : 0, \"!\" : 0, \"|\" : 0, \":\" : 0, \";\" : 0, \"?\": 0\n }\n\n return answer", "def test_rtruediv():\n truediv = _MathExpression() / 2\n rtruediv = 9 / _MathExpression()\n assert truediv(9) == rtruediv(2)", "def reducedFormTwo(self, equation):\n find = re.findall('(.)?(\\d+\\.\\d+|\\d+)(\\+|\\-)(\\d+\\.\\d+|\\d+)(.)?' , equation)\n for token in find:\n tmp = ''.join(map(str,token))\n if tmp[-1] == '*' or tmp[-1] == '^' or tmp[-1] == '/':\n continue\n if tmp[0] == '*' or tmp[0] == '^' or tmp[0] == '/':\n continue\n else:\n try:\n if tmp[0] == '-':\n pass\n if not tmp[-1].isnumeric():\n tmp = tmp[:-1]\n res = eval(tmp)\n if res > 0:\n res = '+' + str(res)\n equation = equation.replace(tmp, res)\n except:\n continue\n return equation", "def test_calculate_adding_in_bracket(self):\n result = self.calcuate.calcuate('(2+1)')\n expected_result = \"3\"\n self.assertEqual(expected_result, result)", "def solve(self, question):\n # check if the question matches binary operation\n match = self.BINARY_OP_REGEX.match(question)\n if match:\n # read LHS operand\n op1 = self.get_number(match.group(1))\n if op1 is None:\n return\n # read operator\n operator = match.group(2)\n # read RHS operand\n op2 = self.get_number(match.group(3))\n if op2 is None:\n return\n # calculate the operation\n self.handle_binary_operator(op1, operator, op2)\n return\n # check match of unary operation\n match = self.UNARY_OP_REGEX.match(question)\n if match:\n # read operator\n operator = match.group(1).upper()\n # read operand\n op = self.get_number(match.group(2))\n if op is None:\n return\n # calculate the operation\n self.handle_unary_operator(operator, op)\n return\n # no match found\n print(\"Invalid question!\")", "def calc():\r\n\r\n op = input(\"Valitse operaatio (+, -, *, /): \")\r\n if op in ('+', '-', '*', '/'):\r\n try:\r\n luku1 = float(input(\"Anna luku 1: \"))\r\n luku2 = float(input(\"Anna luku 2: \"))\r\n except ValueError:\r\n print(\"Ei tämä ole mikään luku\")\r\n\r\n else:\r\n if op == '+':\r\n vastaus = luku1 + luku2\r\n elif op == '-':\r\n vastaus = luku1 - luku2\r\n elif op == '*':\r\n vastaus = luku1 * luku2\r\n elif op == '/':\r\n if luku2 != 0:\r\n vastaus = luku1 / luku2\r\n else:\r\n print(\"Tällä ohjelmalla ei pääse äärettömyyteen\")\r\n return\r\n print(\"Tulos: {}\".format(vastaus))\r\n else:\r\n print(\"operaatiota ei ole olemassa\")", "def present_solved_equation(self, result):\n print(\"the result to the equation is:\", result)", "def pm3d_formula(x,formula):\n \n if(formula<0):\t\t\n\tx=1.0-x\n\tformula=-formula\n\n if(formula==0): return 0\n elif(formula==1): return 0.5\n elif(formula==2): return 1\n elif(formula==3): return x\n elif(formula==4): return(x * x)\n elif(formula==5): return(x * x * x)\n elif(formula==6): return(x * x * x * x)\n elif(formula==7): return(Numeric.sqrt(x))\n elif(formula==8): return(x**0.25)\n elif(formula==9): return(Numeric.sin(90.0 * x * DEG2RAD))\n elif(formula==10): return(Numeric.cos(90 * x * DEG2RAD))\n elif(formula==11): return(Numeric.fabs(x - 0.5))\n elif(formula==12): return((2 * x - 1) * (2.0 * x - 1))\n elif(formula==13): return(Numeric.sin(180 * x * DEG2RAD))\n elif(formula==14): return(Numeric.fabs(cos(180 * x * DEG2RAD)))\n elif(formula==15): return(Numeric.sin(360 * x * DEG2RAD))\n elif(formula==16): return(Numeric.cos(360 * x * DEG2RAD))\n elif(formula==17): return(Numeric.fabs(Numeric.sin(360 * x * DEG2RAD)))\n elif(formula==18): return(Numeric.fabs(Numeric.cos(360 * x * DEG2RAD)))\n elif(formula==19): return(Numeric.fabs(Numeric.sin(720 * x * DEG2RAD)))\n elif(formula==20): return(Numeric.fabs(Numeric.cos(720 * x * DEG2RAD)))\n elif(formula==21): return(3 * x) # ???????\n elif(formula==22): return(3 * x - 1)\n elif(formula==23): return(3 * x - 2)\n elif(formula==24): return(Numeric.fabs(3 * x - 1))\n elif(formula==25): return(Numeric.fabs(3 * x - 2))\n elif(formula==26): return((1.5 * x - 0.5))\n elif(formula==27): return((1.5 * x - 1))\n elif(formula==28): return(Numeric.fabs(1.5 * x - 0.5))\n elif(formula==29): return(Numeric.fabs(1.5 * x - 1))\n elif(formula==30):\n if (x <= 0.25): return 0.0\n if (x >= 0.57): return 1.0\n\treturn(x / 0.32 - 0.78125)\n elif(formula==31):\n if (x <= 0.42): return 0.0\n if (x >= 0.92): return 1.0\n\treturn(2 * x - 0.84)\n elif(formula==32):\n if (x <= 0.42): return(4*x)\n if (x <= 0.92): return(-2 * x + 1.84)\n return(x / 0.08 - 11.5)\n elif(formula==33): return(Numeric.fabs(2 * x - 0.5))\n elif(formula==34): return(2 * x)\n elif(formula==35): return(2 * x - 0.5)\n elif(formula==36): return(2 * x - 1)\n return(0)", "def main():\n\texpression = input(\"Enter expression \")\n\tans = calculate(expression)\n\n\tprint(ans)", "def operand_present(input_str): # HELPER\n try:\n float(input_str)\n return True\n except ValueError:\n return False", "def test_parse():\n first = parse_formula(\"PO4H2(CH2)12CH3\")\n assert first == {\"P\":1, \"O\":4, \"H\":29, \"C\":13}\n\n second = parse_formula(\"H2O\")\n assert second == {\"H\":2, \"O\":1}", "def test_number_input(self):\r\n easy_eval = lambda x: calc.evaluator({}, {}, x)\r\n\r\n self.assertEqual(easy_eval(\"13\"), 13)\r\n self.assertEqual(easy_eval(\"3.14\"), 3.14)\r\n self.assertEqual(easy_eval(\".618033989\"), 0.618033989)\r\n\r\n self.assertEqual(easy_eval(\"-13\"), -13)\r\n self.assertEqual(easy_eval(\"-3.14\"), -3.14)\r\n self.assertEqual(easy_eval(\"-.618033989\"), -0.618033989)", "def validate(string):\n \n tokens = string.split()\n \n # Remembers if the previous token was an operator\n opflag = True\n \n ## Highly inefficient validity checking begins here ##\n \n # List of operators as they would appear in the infix expression\n operators = ['+', '-', '*', '/', '^', 'sqrt']\n \n # First and foremost, detect all unary minus signs and mark them as such\n for i in xrange(len(tokens)):\n # A unary minus is a minus operator which occurs after another operator\n # or after an open parenthesis.\n if tokens[i] in operators or tokens[i] == '(':\n if opflag:\n if tokens[i] == '-':\n tokens[i] = 'u-'\n # Leave opflag true to allow cascading of unary minuses\n elif tokens[i] in ['sqrt', '(']:\n # These operators can be cascaded, so leave them alone\n # Also, leave opflag true to handle a subsequent u-\n pass\n else:\n # Any other operator must be caught\n raise ExpressionError('Operators cannot be cascaded!')\n # We found an operator, but opflag isn't true. Set it.\n else:\n opflag = True\n else:\n # We found something other than an operator, or a ')'. If opflag is\n # false, and the token is not ')', then we have two adjacent\n # variables/numbers. This is also an invalid combination\n if not opflag and tokens[i] != ')':\n raise ExpressionError('Adjacent operands with no operator!')\n # Otherwise, unset opflag\n else:\n opflag = False\n \n # Check whether parentheses match\n s = Stack()\n for token in tokens:\n if token == '(':\n s.push(token)\n elif token == ')':\n if s.pop() != '(':\n raise ExpressionError('Parentheses do not match')\n if not s.is_empty():\n raise ExpressionError('Parentheses do not match')\n \n return tokens", "def input_equation(self, eq: str) -> None:\n if self.xmin >= self.xmax:\n raise Exception('Minimum > Maximum')\n\n increment = (self.xmax - self.xmin) / self.precision\n self.dependant = []\n\n x = self.xmin\n while x <= self.xmax:\n try:\n y = eval(eq)\n except ZeroDivisionError:\n print(f'Division by zero, x = {x}')\n x += increment\n except SyntaxError:\n print(f'Invalid equation: {eq}')\n x += increment\n except ValueError:\n print(f'Math domain error, {eq}: x = {x}')\n x += increment\n except TypeError:\n print('Can\\'t convert complex to float')\n x += increment\n else:\n self.dependant.append((x, y))\n x += increment\n self.equation = eq", "def test_calculate_multiplication_and_adding(self):\n result = self.calcuate.calcuate('1+2x3')\n expected_result = \"7\"\n self.assertEqual(expected_result, result)", "def test_expression_sanitizer(self):\n\n self.assertFalse(_is_math_expr_safe('INSERT INTO students VALUES (?,?)'))\n self.assertFalse(_is_math_expr_safe('import math'))\n self.assertFalse(_is_math_expr_safe('complex'))\n self.assertFalse(_is_math_expr_safe('__import__(\"os\").system(\"clear\")'))\n self.assertFalse(_is_math_expr_safe('eval(\"()._\" + \"_class_\" + \"_._\" +'\n ' \"_bases_\" + \"_[0]\")'))\n self.assertFalse(_is_math_expr_safe('2***2'))\n self.assertFalse(_is_math_expr_safe('avdfd*3'))\n self.assertFalse(_is_math_expr_safe('Cos(1+2)'))\n self.assertFalse(_is_math_expr_safe('hello'))\n self.assertFalse(_is_math_expr_safe('hello_world'))\n self.assertFalse(_is_math_expr_safe('1_2'))\n self.assertFalse(_is_math_expr_safe('2+-2'))\n self.assertFalse(_is_math_expr_safe('print(1.0)'))\n self.assertFalse(_is_math_expr_safe('1.1.1.1'))\n self.assertFalse(_is_math_expr_safe('abc.1'))\n\n self.assertTrue(_is_math_expr_safe('1+1*2*3.2+8*cos(1)**2'))\n self.assertTrue(_is_math_expr_safe('pi*2'))\n self.assertTrue(_is_math_expr_safe('-P1*cos(P2)'))\n self.assertTrue(_is_math_expr_safe('-P1*P2*P3'))\n self.assertTrue(_is_math_expr_safe('-P1'))\n self.assertTrue(_is_math_expr_safe('-1.*P1'))\n self.assertTrue(_is_math_expr_safe('-1.*P1*P2'))\n self.assertTrue(_is_math_expr_safe('-(P1)'))", "def valid_expression(expression):\n OPERATORS= '+*/-'\n if no_operators(expression) != True:\n return no_operators(expression)\n if no_paranthesis(expression) != True:\n return no_paranthesis(expression)\n if no_numbers(expression) != True:\n return no_numbers(expression)\n if invalid_characters(expression) != True:\n return invalid_characters(expression)\n if match_paranthesis(expression) == False:\n raise NotValidExpression('Not a valid expression, brackets mismatched.')\n number_operators = 0\n number_paranthesis = 0\n for i in expression:\n if i in OPERATORS:\n number_operators += 1\n elif i == '(' or i == ')':\n number_paranthesis +=1\n expression1 = expression[1:(len(expression) - 1)] # checks if the expression without the first and last character is valid\n if match_paranthesis(expression1) == False and ('(' in expression1 or ')' in expression1):\n raise NotValidExpression('Not a valid expression, brackets mismatched.') # if it is not, raises an appropiate error\n for i in range(0, len(expression) - 1):\n #Checks if an operator is missing,if there exists a number followed by ( or if there is a )before the number\n if expression[i] not in OPERATORS and expression[i] not in '()':\n if expression[i + 1] == '(':\n raise NotValidExpression('Not a valid expression, operator missing.')\n elif expression[i] in OPERATORS and expression[i + 1] in OPERATORS + ')' :\n raise NotValidExpression('Not a valid expression, wrong placement of operators')\n #Checks if an operator is placed wrongly , before ) or next to another operator\n if expression[i+1] not in OPERATORS and expression[i + 1] not in '()':\n if expression[i] == ')':\n raise NotValidExpression('Not a valid expression, operator missing.')\n elif expression[i+1] in OPERATORS and expression[i] in OPERATORS + '(':\n raise NotValidExpression('Not a valid expression, wrong placement of operators')\n if 2*number_operators != number_paranthesis: # an expression is valid only if the number of paranthesis is equal to the double of the number of operators\n raise NotValidExpression('Not a valid expression, wrong number of operands.')\n return True", "def is_math_line(line):\n if '=' in line:\n # Check it isn't some other command\n for cmd in CMD_LIST:\n if re.findall(f\"^{cmd} \", line):\n return False\n\n str_txt, non_str = gen_parse.get_str_between_delims(line, '\"')\n if any(j in non_str for j in '<>-+/*^'):\n return True\n return False", "def evaluate(self):\n self.getInput()\n try:\n self.result = eval(self.userInput)\n except ZeroDivisionError:\n self.entry.delete(0, END)\n self.entry.insert(0, \"Not a number\")\n except SyntaxError:\n self.entry.delete(0, END)\n self.entry.insert(0, \"Input error\")\n else:\n self.entry.delete(0, END)\n self.entry.insert(0, self.result)", "def test_equals2( self ) :\r\n root = Tk( )\r\n self.newtext = \"2/0\"\r\n self.e = Entry( root )\r\n EndVaule = 0\r\n self.e.insert( 0, EndVaule )\r\n e = self.e.get( )\r\n EndVaule = \"/\"\r\n self.e.insert( 0, EndVaule )\r\n EndVaule = 2\r\n self.e.insert( 0, EndVaule )\r\n e = self.e.get( )\r\n print(e)\r\n\r\n\r\n try :\r\n # evaluate the expression using the eval function\r\n self.value = eval( self.newtext )\r\n print( self.value )\r\n except ZeroDivisionError :\r\n self.e.delete( 0, END )\r\n self.e.insert( 0, 'Error' )\r\n print(\"Error\")\r\n except SyntaxError or NameError :\r\n self.e.delete( 0, END )\r\n self.e.insert( 0, 'Error' )\r\n else :\r\n if self.value > 999999999999999999 :\r\n self.e.delete( 0, END )\r\n self.e.insert( 0, 'Error' )\r\n else :\r\n\r\n self.e.delete( 0, END )\r\n self.e.insert( 0, self.value )", "def calculate_expression(self, txt):\n self.shunting_yard(self.text_parser(txt))\n return self.RPN()", "def testdiv_X_Y ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tdiv_fracX_fracY = fracX / fracY\r\n\t\t\tself.assertEqual ( div_fracX_fracY.toString ().split ()[0], dictDiv ['X/Y'] )", "def test_exponential_answer(self):\r\n answer = 50\r\n correct_responses = [\r\n \"50\", \"50.0\", \"5e1\", \"5e+1\",\r\n \"50e0\", \"50.0e0\", \"500e-1\"\r\n ]\r\n incorrect_responses = [\"\", \"3.9\", \"4.1\", \"0\", \"5.01e1\"]\r\n\r\n for input_str in correct_responses:\r\n result = calc.evaluator({}, {}, input_str)\r\n fail_msg = \"Expected '{0}' to equal {1}\".format(\r\n input_str, answer\r\n )\r\n self.assertEqual(answer, result, msg=fail_msg)\r\n\r\n for input_str in incorrect_responses:\r\n result = calc.evaluator({}, {}, input_str)\r\n fail_msg = \"Expected '{0}' to not equal {1}\".format(\r\n input_str, answer\r\n )\r\n self.assertNotEqual(answer, result, msg=fail_msg)", "def test_calculate_multiplication(self):\n result = self.calcuate.calcuate('3x3')\n expected_result = \"9\"\n self.assertEqual(expected_result, result)", "def testdiv_Y_X ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tdiv_fracY_fracX = fracY / fracX\r\n\t\t\tself.assertEqual ( div_fracY_fracX.toString ().split ()[0], dictDiv ['Y/X'] )", "def evaluate(self):\n try:\n test_val = self.expression()\n return test_val != 0\n except ValueError:\n raise ParseError(\"Could not evaluate expression.\")", "def calculate_expression(number1, number2, operator):\n\n if operator == '+':\n return number1 + number2\n elif operator == '-':\n return number1 - number2\n elif operator == '*':\n return number1 * number2", "def evaluate(expr: str) -> float:\n\n expr = ''.join(filter(lambda ch: ch in valid_characters, expr))\n if not expr:\n return float('NaN') # raise error instead?\n\n # 'Stacks'\n operators = []\n operands = []\n\n try:\n for t in tokenizer(expr):\n\n if isinstance(t, float):\n operands.append(t)\n elif t in openers:\n operators.append(t)\n\n elif t in binary_operators:\n while operators and precedence[operators[-1]] >= precedence[t]:\n operands.append(binary_operators[operators.pop()](operands.pop(), operands.pop()))\n operators.append(t)\n else:\n corresponding_opener = openers[closers.index(t)]\n while (op := operators.pop()) != corresponding_opener:\n operands.append(binary_operators[op](operands.pop(), operands.pop()))\n\n while operators:\n operands.append(binary_operators[operators.pop()](operands.pop(), operands.pop()))\n\n except ArithmeticError as e:\n raise e\n except (ValueError, IndexError): # One of the stacks runs out, i.e. invalid expression structure.\n raise InvalidExpressionError()\n\n # assert (len(operands) == 1)\n return operands.pop()", "def test_variable_expression(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"float alpha = 0.32\\nfloat gamma = (2.0*cos(alpha*pi)+1)**2\"\n )\n assert np.isclose(bb._var[\"gamma\"], (2.0 * np.cos(0.32 * np.pi) + 1) ** 2)", "def evaluate_math(query):\n # Final result\n evaluated_query = []\n\n math_expr = re.compile(r'(\\d+([.]\\d+)*|[+\\-/*])+\\d([.]\\d+)*$')\n\n for q in query:\n if math_expr.match(q):\n evaluated_query += [str(eval(q))]\n else:\n evaluated_query += [q]\n\n return evaluated_query", "def evaluate(s:str)->str:\n t = s.split()\n res = ''\n\n # Check valid operator \n if t[1] not in ['+','-']:\n return \"Error: Operator must be '+' or '-'.\"\n\n # check valid number \n try:\n t1 = int(t[0])\n t2 = int(t[2])\n \n except ValueError:\n return \"Error: Numbers must only contain digits.\"\n\n # check if numbers are 4 digits \n if (t1>9999 or t1 < -9999 or t2>9999 or t2<-9999):\n return \"Error: Numbers cannot be more than four digits.\"\n \n # addition \n if t[1] == '+':\n res = t1 + t2\n return str(res)\n \n # subtraction \n elif t[1] == '-':\n res = t1 -t2\n return str(res)", "def f3(x):\n return 1 / (1 + x**2)", "def test_scenario_costitem_equation(self):\n\n \"\"\" make sure everything got made in setup \"\"\"\n\n\n scenario_costs = self.scenario.get_costs()\n\n costitem_default_equation = CostItemDefaultEquations.objects.get(\n costitem=self.costitem,\n )\n self.assertIsNotNone(costitem_default_equation)\n costitem_default_equation.equation_tx = '=x*area*$'\n costitem_default_equation.save()\n\n scenario_costs2 = self.scenario.get_costs()\n\n self.assertNotEqual(scenario_costs, scenario_costs2)\n\n # check the equations are different\n ss = self.scenario_structure.structure\n ci_results = scenario_costs[ss.classification]['structures'][ss.code]['cost_data'][self.costitem.code]['results']\n\n ci_results2 = scenario_costs2[ss.classification]['structures'][ss.code]['cost_data'][self.costitem.code][\n 'results']\n\n self.assertNotEqual(ci_results['equation'], ci_results2['equation'])\n\n self.assertNotEqual(ci_results['equation_calc'], ci_results2['equation_calc'])\n\n # now double the PER UNIT COST and see that the 'construction' value doubles\n scenario_costs = self.scenario.get_costs()\n self.assertEqual(self.scenario_costitem.user_input_cost.amount, Decimal('1999.99'))\n\n self.scenario_costitem.user_input_cost.amount = Decimal('1999.99') * 2\n self.scenario_costitem.save()\n\n scenario_costs2 = self.scenario.get_costs()\n ci_results = scenario_costs[ss.classification]['structures'][ss.code]['cost_data'][self.costitem.code]['results']\n ci_results2 = scenario_costs2[ss.classification]['structures'][ss.code]['cost_data'][self.costitem.code]['results']\n\n self.assertNotEqual(ci_results['value_unformatted'], ci_results2['value_unformatted'])\n self.assertEqual(ci_results['value_unformatted'], ci_results2['value_unformatted']/2)\n\n # now double the a_area factor\n scenario_costs = self.scenario.get_costs()\n self.assertEqual(self.structure_costitem_default_factors.a_area, 2)\n self.structure_costitem_default_factors.a_area = self.structure_costitem_default_factors.a_area * 2\n self.structure_costitem_default_factors.save()\n\n scenario_costs2 = self.scenario.get_costs()\n ci_results = scenario_costs[ss.classification]['structures'][ss.code]['cost_data'][self.costitem.code]['results']\n ci_results2 = scenario_costs2[ss.classification]['structures'][ss.code]['cost_data'][self.costitem.code]['results']\n\n self.assertNotEqual(ci_results['value_unformatted'], ci_results2['value_unformatted'])\n self.assertEqual(ci_results['value_unformatted'], ci_results2['value_unformatted']/2)\n\n # now double the structure area\n scenario_costs = self.scenario.get_costs()\n self.assertEqual(self.scenario_structure.area, 1000)\n self.scenario_structure.area = self.scenario_structure.area * 2\n self.scenario_structure.save()\n\n scenario_costs2 = self.scenario.get_costs()\n ci_results = scenario_costs[ss.classification]['structures'][ss.code]['cost_data'][self.costitem.code]['results']\n ci_results2 = scenario_costs2[ss.classification]['structures'][ss.code]['cost_data'][self.costitem.code]['results']\n\n self.assertNotEqual(ci_results['value_unformatted'], ci_results2['value_unformatted'])\n self.assertEqual(ci_results['value_unformatted'], ci_results2['value_unformatted'] / 2)", "def test3():\r\n xmlstr = \"\"\"\r\n<math xmlns=\"http://www.w3.org/1998/Math/MathML\">\r\n <apply>\r\n <divide/>\r\n <cn>1</cn>\r\n <apply>\r\n <plus/>\r\n <cn>2</cn>\r\n <ci>γ</ci>\r\n </apply>\r\n </apply>\r\n</math>\r\n \"\"\"\r\n return formula(xmlstr)", "def test_cases():\r\n quadratic_roots(1,3,-21)\r\n quadratic_roots(2,-4,-6)\r\n quadratic_roots(1,4,-12)\r\n quadratic_roots(4,12,9)\r\n quadratic_roots(-2,-11,-21)\r\n quadratic_roots(4,1,4)\r\n quadratic_roots(1,1,0)\r\n quadratic_roots(1,0,-16)\r\n quadratic_roots(1,-14,-49)\r\n quadratic_roots(1,10,25)", "def parse_equations(eqs, ops):\n eeqs = []\n prop_list = ['unit of', 'commutative', 'associative', 'distributes over', 'inverse of', \n 'annihilates', 'idempotent', 'absorbs', 'absorptive', 'involutive']\n props = []\n for eq in eqs:\n if not any_in(prop_list, eq):\n eeqs.append(Eq.parse_eq(eq, ops))\n else:\n if 'unit of' in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*unit of\\s+'(\\w+)'$\", eq)\n unit, side, op = m.groups()\n props.append(Unit(unit, op, side))\n elif \"annihilates\" in eq: \n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*annihilates\\s+'(\\w+)'$\", eq)\n unit, side, op = m.groups()\n props.append(Annih(unit, op, side))\n elif \"distributes over\" in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*distributes over\\s+'(\\w+)'$\", eq)\n op1, side, op2 = m.groups()\n props.append(Dist(op1, op2, side))\n elif \"absorbs\" in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*absorbs\\s+'(\\w+)'$\", eq)\n op1, side, op2 = m.groups()\n props.append(Absorb(op1, op2, side))\n elif \"inverse of\" in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*inverse of\\s+'(\\w+)'\\s+with\\s+'(\\w+)'$\", eq)\n uop, side, op, unit = m.groups()\n props.append(Inverse(uop, op, unit, side))\n elif \"absorptive\" in eq:\n m = re.search(\"^'(\\w+)'\\s+and\\s+'(\\w+)'\\s+absorptive$\", eq)\n op1, op2 = m.groups()\n props.append(Absorb(op1, op2, None))\n props.append(Absorb(op2, op1, None))\n else:\n m = re.search(\"^'(\\w+)'\\s+(.*)$\", eq)\n op = m.group(1)\n kws = splitstrip(m.group(2), \",\")\n if 'associative' in kws:\n props.append(Assoc(op))\n if 'commutative' in kws:\n props.append(Comm(op))\n if 'idempotent' in kws:\n props.append(Idemp(op))\n if 'involutive' in kws:\n props.append(Invol(op))\n\n return eeqs, props", "def test_validate_answer(self):\r\n sample_dict = {'x': (1, 2)}\r\n problem = self.build_problem(\r\n sample_dict=sample_dict,\r\n num_samples=10,\r\n tolerance=\"1%\",\r\n answer=\"x\"\r\n )\r\n self.assertTrue(problem.responders.values()[0].validate_answer('14*x'))\r\n self.assertFalse(problem.responders.values()[0].validate_answer('3*y+2*x'))", "def division():\r\n error_handler()\r\n f1.delete(0, END)\r\n d1 = float(operand.get())\r\n d2 = float(operator.get())\r\n result = d1 / d2\r\n f1.insert(10, str(result))", "def test_function_sqrt(self):\r\n self.assertEquals(preview.latex_preview('sqrt(3)'), r'\\sqrt{3}')", "def test00(self):\n a = 3\n cr = bcolz.eval(\"2 * a\", rootdir=self.rootdir)\n # print \"bcolz.eval ->\", cr\n self.assertTrue(cr == 6, \"eval does not work correctly\")", "def test_variables(x, y, z):\n a = x * y\n b = y * a\n c = a + b\n return c / z", "def get_staff_ans(self, answer):\r\n try:\r\n correct_ans = complex(answer)\r\n except ValueError:\r\n # When `correct_answer` is not of the form X+Yj, it raises a\r\n # `ValueError`. Then test if instead it is a math expression.\r\n # `complex` seems to only generate `ValueErrors`, only catch these.\r\n try:\r\n correct_ans = evaluator({}, {}, answer)\r\n except Exception:\r\n log.debug(\"Content error--answer '%s' is not a valid number\", answer)\r\n _ = self.capa_system.i18n.ugettext\r\n raise StudentInputError(\r\n _(\"There was a problem with the staff answer to this problem.\")\r\n )\r\n\r\n return correct_ans", "def division(self):\r\n global answer\r\n while True:\r\n try:\r\n easy_random1 = int(random.choice(string.digits))\r\n easy_random2 = int(random.choice(string.digits))\r\n easy_random3 = int(random.choice(string.digits))\r\n easy_random4 = int(random.choice(string.digits))\r\n print(f\"{easy_random1} / {easy_random2} / {easy_random3} / {easy_random4} = ?\")\r\n real_answer = easy_random1 / easy_random2 / easy_random3 / easy_random4\r\n answer = input(\"Enter answer: \")\r\n if answer.lower() == \"stop\":\r\n print(\"okay\")\r\n break\r\n if float(answer) == real_answer:\r\n print(\"CORRECT ANSWER\")\r\n else:\r\n print(\"WRONG ANSWER\")\r\n print(f\"the answer is {real_answer} sorry! try again\")\r\n except ValueError:\r\n return f'\"{answer}\" is not a valid number, only the string stop is allowed'", "def test01_math_operators(self):\n\n import _cppyy\n number = _cppyy.gbl.number\n\n assert (number(20) + number(10)) == number(30)\n assert (number(20) + 10 ) == number(30)\n assert (number(20) - number(10)) == number(10)\n assert (number(20) - 10 ) == number(10)\n assert (number(20) / number(10)) == number(2)\n assert (number(20) / 10 ) == number(2)\n assert (number(20) * number(10)) == number(200)\n assert (number(20) * 10 ) == number(200)\n assert (number(20) % 10 ) == number(0)\n assert (number(20) % number(10)) == number(0)\n assert (number(5) & number(14)) == number(4)\n assert (number(5) | number(14)) == number(15)\n assert (number(5) ^ number(14)) == number(11)\n assert (number(5) << 2) == number(20)\n assert (number(20) >> 2) == number(5)", "def eval2(text):\n\n numbers, operators = [], []\n for glyph in text.split():\n if glyph.isdigit():\n rhs = int(glyph)\n if operators and operators[-1] in '*/':\n lhs = numbers.pop()\n operating = operators.pop()\n if operating == '*':\n numbers.append(lhs * rhs)\n else:\n numbers.append(lhs // rhs)\n else:\n numbers.append(rhs)\n else:\n operators.append(glyph)\n\n while operators:\n lhs, rhs = numbers[0], numbers[1]\n operating = operators[0]\n if operating == '+':\n numbers = [lhs + rhs] + numbers[2:]\n else:\n numbers = [lhs - rhs] + numbers[2:]\n operators = operators[1:]\n\n return numbers.pop()", "def testCalculate(self):\r\n for i in range(len(self.__testExpressions)):\r\n self.__Calculator.setExpression(self.__testExpressions[i])\r\n self.__Calculator.calculateResult()\r\n self.assertEqual(self.__Calculator.getResult(), self.__testResult[i])", "def test_evaluate_div_expression(self):\n value = self.evaluate_common(\"4M div 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Decimal, \"Expected Decimal\")\n self.assertTrue(value.value == 2, \"Expected 2\")\n value = self.evaluate_common(\"4D div 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 2.0, \"Expected 2.0\")\n try:\n value = self.evaluate_common(\"4D div 0\")\n self.fail(\"Division by zero\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"4F div 2D\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 2.0, \"Expected 2.0\")\n value = self.evaluate_common(\"5 div 2L\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int64, \"Expected Int64\")\n self.assertTrue(value.value == 2, \"Expected 2L\")\n value = self.evaluate_common(\"-5 div 2L\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int64, \"Expected Int64\")\n self.assertTrue(value.value == -2, \"Expected -2L\")\n try:\n value = self.evaluate_common(\"4 div '2'\")\n self.fail(\"String promotion to int\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"4 div null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int32, \"Expected Int32\")\n self.assertTrue(value.value is None, \"Expected None\")", "def check_for_float(check):", "def test_operator(self):\n\n tokens = list(Lexer(\"+-*/^%\").generate_tokens())\n answer = [Token(TokenType.PLUS),\n Token(TokenType.MINUS),\n Token(TokenType.MULTIPLY),\n Token(TokenType.DIVIDE),\n Token(TokenType.EXPONENT),\n Token(TokenType.MODULO)]\n self.assertEqual(tokens, answer)", "def test_product_keep_going(self):\r\n self.assertEquals(\r\n preview.latex_preview('2/3*4/5*6'),\r\n r'\\frac{2}{3}\\cdot \\frac{4}{5}\\cdot 6'\r\n )", "def parse_algebra(self):\r\n # 0.33 or 7 or .34 or 16.\r\n number_part = Word(nums)\r\n inner_number = (number_part + Optional(\".\" + Optional(number_part))) | (\".\" + number_part)\r\n # pyparsing allows spaces between tokens--`Combine` prevents that.\r\n inner_number = Combine(inner_number)\r\n\r\n # SI suffixes and percent.\r\n number_suffix = MatchFirst(Literal(k) for k in SUFFIXES.keys())\r\n\r\n # 0.33k or 17\r\n plus_minus = Literal('+') | Literal('-')\r\n number = Group(\r\n Optional(plus_minus) +\r\n inner_number +\r\n Optional(CaselessLiteral(\"E\") + Optional(plus_minus) + number_part) +\r\n Optional(number_suffix)\r\n )\r\n number = number(\"number\")\r\n\r\n # Predefine recursive variables.\r\n expr = Forward()\r\n\r\n # Handle variables passed in. They must start with letters/underscores\r\n # and may contain numbers afterward.\r\n inner_varname = Word(alphas + \"_\", alphanums + \"_\")\r\n varname = Group(inner_varname)(\"variable\")\r\n varname.setParseAction(self.variable_parse_action)\r\n\r\n # Same thing for functions.\r\n function = Group(inner_varname + Suppress(\"(\") + expr + Suppress(\")\"))(\"function\")\r\n function.setParseAction(self.function_parse_action)\r\n\r\n atom = number | function | varname | \"(\" + expr + \")\"\r\n atom = Group(atom)(\"atom\")\r\n\r\n # Do the following in the correct order to preserve order of operation.\r\n pow_term = atom + ZeroOrMore(\"^\" + atom)\r\n pow_term = Group(pow_term)(\"power\")\r\n\r\n par_term = pow_term + ZeroOrMore('||' + pow_term) # 5k || 4k\r\n par_term = Group(par_term)(\"parallel\")\r\n\r\n prod_term = par_term + ZeroOrMore((Literal('*') | Literal('/')) + par_term) # 7 * 5 / 4\r\n prod_term = Group(prod_term)(\"product\")\r\n\r\n sum_term = Optional(plus_minus) + prod_term + ZeroOrMore(plus_minus + prod_term) # -5 + 4 - 3\r\n sum_term = Group(sum_term)(\"sum\")\r\n\r\n # Finish the recursion.\r\n expr << sum_term # pylint: disable=W0104\r\n self.tree = (expr + stringEnd).parseString(self.math_expr)[0]", "def solve_part1(input, verbose=False):\n equations = parse(input)\n\n result = []\n for eq in equations:\n result.append(solve_equation_same_precedence(eq, verbose))\n\n if verbose:\n print(f\"results: {result}\")\n\n return sum(result)", "def quadratic_roots(a, b, c):\r\n if (((math.pow(b,2))-(4*a*c))> 0):\r\n print(\"Equation: \",str(a),\"x^2 + \",str(b),\"x + \",str(c), sep=\"\")\r\n print(\"Two roots.\")\r\n print(\"x =\",((-b + math.sqrt(math.pow(b,2)-4*a*c))/(2*a)))\r\n print(\"x =\",((-b - math.sqrt(math.pow(b,2)-4*a*c))/(2*a)))\r\n \r\n elif (((math.pow(b,2))-4*a*c) == 0):\r\n print(\"Equation: \",str(a),\"x^2 + \",str(b),\"x + \",str(c), sep=\"\")\r\n print(\"One root.\")\r\n print(\"x =\",((-b + math.sqrt(math.pow(b,2)-4*a*c))/(2*a)))\r\n \r\n else :\r\n print(\"Equation: \",str(a),\"x^2 + \",str(b),\"x + \",str(c), sep=\"\")\r\n print(\"No roots.\")", "def test_expr(self):\n self.common_test_expr(True)", "def test_complicated(self):\r\n self.assertEquals(\r\n preview.latex_preview('11*f(x)+x^2*(3||4)/sqrt(pi)'),\r\n r'11\\cdot \\text{f}(x)+\\frac{x^{2}\\cdot (3\\|4)}{\\sqrt{\\pi}}'\r\n )\r\n\r\n self.assertEquals(\r\n preview.latex_preview('log10(1+3/4/Cos(x^2)*(x+1))',\r\n case_sensitive=True),\r\n (r'\\log_{10}\\left(1+\\frac{3}{4\\cdot \\text{Cos}\\left(x^{2}\\right)}'\r\n r'\\cdot (x+1)\\right)')\r\n )" ]
[ "0.6916226", "0.68524635", "0.67907655", "0.67907655", "0.66762483", "0.66166973", "0.6573047", "0.6543683", "0.6413765", "0.62079144", "0.61546904", "0.60421985", "0.603584", "0.60189974", "0.6009543", "0.6008097", "0.5994459", "0.5980641", "0.5955912", "0.59496975", "0.5947985", "0.59205186", "0.590508", "0.5897496", "0.5896991", "0.5891947", "0.58678675", "0.5861261", "0.5837498", "0.5814872", "0.5812304", "0.5801363", "0.5785715", "0.57729125", "0.57662374", "0.57651806", "0.57494396", "0.5739683", "0.5739143", "0.5738641", "0.57346326", "0.5721371", "0.57195145", "0.57136554", "0.5695933", "0.5675956", "0.56452876", "0.56375796", "0.56264305", "0.56159467", "0.5607217", "0.5596181", "0.55877024", "0.55733126", "0.55679274", "0.55651975", "0.5549519", "0.5543704", "0.55190444", "0.54873496", "0.5474705", "0.547369", "0.5467338", "0.54633266", "0.5454939", "0.5451169", "0.5447781", "0.54440266", "0.54400235", "0.5429491", "0.54293174", "0.54281473", "0.54230833", "0.5420146", "0.54163504", "0.54075295", "0.54044604", "0.5400651", "0.53893375", "0.53824157", "0.5375828", "0.53686184", "0.5358362", "0.5356413", "0.5341952", "0.53405213", "0.5333418", "0.53202075", "0.5313688", "0.5311327", "0.53023297", "0.5301156", "0.5292177", "0.5286957", "0.52856255", "0.5282081", "0.52818817", "0.52811676", "0.5275589", "0.5274117", "0.5271851" ]
0.0
-1
Load PTB raw data from data directory "data_path". Reads PTB text files, converts strings to integer ids, and performs minibatching of the inputs.
def ptb_raw_data(data_path=None): train_path = os.path.join(data_path, "ptb.train.txt") valid_path = os.path.join(data_path, "ptb.valid.txt") test_path = os.path.join(data_path, "ptb.test.txt") word_to_id = _build_vocab(train_path) train_data = _file_to_word_ids(train_path, word_to_id) valid_data = _file_to_word_ids(valid_path, word_to_id) test_data = _file_to_word_ids(test_path, word_to_id) vocabulary = len(word_to_id) return train_data, valid_data, test_data, vocabulary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ptb_raw_data(data_path=None):\n\n\t# train_path = os.path.join(data_path, \"ptb.train.txt\")\n\t# valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n\t# test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n\tdata = np.load(data_path)\n\t# data = np.load(data_path).item()\n\t# f = open(data_path)\n\t# data = f.readlines()\n\tword_to_id, id_to_word, wordList = build_vocab_(data)\n\t# word_to_id = _build_vocab(train_path)\n\ttrain_data = _file_to_word_ids(wordList[int(len(wordList)*0.3):int(len(wordList)*1.0)], word_to_id)\n\tvalid_data = _file_to_word_ids(wordList[int(len(wordList)*0.2):int(len(wordList)*0.3)], word_to_id)\n\ttest_data = _file_to_word_ids(wordList[int(len(wordList)*0):int(len(wordList)*0.2)], word_to_id)\n\tvocabulary = len(word_to_id)\n\treturn train_data, valid_data, test_data, vocabulary", "def ptb_raw_data(data_path=None, prefix=\"ptb\"):\n\n train_path = os.path.join(data_path, prefix + \".train.txt\")\n valid_path = os.path.join(data_path, prefix + \".valid.txt\")\n test_path = os.path.join(data_path, prefix + \".test.txt\")\n train_w = _read_words(train_path)\n valid_w = _read_words(valid_path)\n test_w = _read_words(test_path)\n word_to_id, id_2_word = _build_vocab(train_w)\n train_data = _file_to_word_ids(train_w, word_to_id)\n valid_data = _file_to_word_ids(valid_w, word_to_id)\n test_data = _file_to_word_ids(test_w, word_to_id)\n return train_data, valid_data, test_data, word_to_id, id_2_word", "def ptb_raw_data(data_path=None, prefix=\"ptb\"):\n\n train_path = os.path.join(data_path, prefix + \".train.txt\")\n valid_path = os.path.join(data_path, prefix + \".valid.txt\")\n test_path = os.path.join(data_path, prefix + \".test.txt\")\n\n word_to_id, id_2_word = _build_vocab(train_path)\n train_data = _file_to_word_ids(train_path, word_to_id)\n valid_data = _file_to_word_ids(valid_path, word_to_id)\n test_data = _file_to_word_ids(test_path, word_to_id)\n return train_data, valid_data, test_data, word_to_id, id_2_word", "def ptb_raw_data(data_path=None):\n\n train_path = os.path.join(data_path, \"ptb.train.txt\")\n valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n word_to_id, unigrams = _build_vocab(train_path)\n train_data = _file_to_word_ids(train_path, word_to_id)\n valid_data = _file_to_word_ids(valid_path, word_to_id)\n test_data = _file_to_word_ids(test_path, word_to_id)\n vocabulary = len(word_to_id)\n return train_data, valid_data, test_data, vocabulary, unigrams", "def ptb_raw_data(data_path, simple):\n\n train_path = os.path.join(data_path, \"ptb.train.txt\")\n valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n word_to_id, probs = _build_vocab(train_path)\n train_data = _file_to_word_ids(train_path, word_to_id, simple)\n valid_data = _file_to_word_ids(valid_path, word_to_id, simple)\n test_data = _file_to_word_ids(test_path, word_to_id, simple)\n return train_data, valid_data, test_data, probs", "def ptb_raw_data(data_path=None, min_sentence_length=1, max_sentence_length=100):\n\n train_path = os.path.join(data_path, \"ptb.train.txt\")\n valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n word_to_id = _build_vocab(train_path)\n train_data = _file_to_word_ids(train_path, word_to_id, min_sentence_length, max_sentence_length)\n valid_data = _file_to_word_ids(valid_path, word_to_id, min_sentence_length, max_sentence_length)\n test_data = _file_to_word_ids(test_path, word_to_id, min_sentence_length, max_sentence_length)\n vocabulary = len(word_to_id)\n return train_data, valid_data, test_data, vocabulary, word_to_id", "def ptb_char_raw_data(data_path=None, prefix=\"ptb.char\"):\n\n train_path = os.path.join(data_path, prefix + \".train.txt\")\n valid_path = os.path.join(data_path, prefix + \".valid.txt\")\n test_path = os.path.join(data_path, prefix + \".test.txt\")\n train_w = open(train_path).read().split()\n valid_w = open(valid_path).read().split()\n test_w = open(test_path).read().split()\n unique_chars = set(train_w)\n word_to_id = {k: v for k, v in zip(unique_chars, range(len(unique_chars)))}\n id_2_word = {v: k for k, v in word_to_id.items()}\n train_data = _file_to_word_ids(train_w, word_to_id)\n valid_data = _file_to_word_ids(valid_w, word_to_id)\n test_data = _file_to_word_ids(test_w, word_to_id)\n return train_data, valid_data, test_data, word_to_id, id_2_word", "def load_data(data_path, input_shape):\n # load the original data.\n orig_data = pickle.load(open(data_path, 'rb'), encoding='iso-8859-1')\n\n # Get the set of snr & modulations\n mode_snr = list(orig_data.keys())\n mods, snrs = [sorted(list(set(x[i] for x in mode_snr))) for i in [0, 1]]\n mods.remove('AM-SSB')\n mods.remove('WBFM')\n mods.remove('8PSK')\n mods.remove('BPSK')\n\n # Build the train set.\n samples = []\n labels = []\n samples_snr = []\n mod2cate = dict()\n cate2mod = dict()\n for cate in range(len(mods)):\n cate2mod[cate] = mods[cate]\n mod2cate[mods[cate]] = cate\n\n for snr in snrs:\n for mod in mods:\n samples.extend(orig_data[(mod, snr)])\n labels.extend(1000 * [mod2cate[mod]])\n samples_snr.extend(1000 * [snr])\n\n shape = [len(labels), height, width, 1]\n samples = np.array(samples).reshape(shape)\n samples_snr = np.array(samples_snr)\n labels = np.array(labels)\n return samples, labels, mod2cate, cate2mod, snrs, mods, samples_snr", "def text8_raw_data(data_path=None):\n text8 = _read_chars(data_path)\n train = text8[:int(9e7)]\n val = text8[int(9e7):int(95e6)]\n test = text8[int(95e6):]\n word_to_id, id_2_word = _build_vocab(train)\n train_data = _file_to_word_ids(train, word_to_id)\n valid_data = _file_to_word_ids(val, word_to_id)\n test_data = _file_to_word_ids(test, word_to_id)\n return train_data, valid_data, test_data, word_to_id, id_2_word", "def __init__(self, data_path):\r\n\t\tfile_names = ['data_batch_%d' % i for i in range(1,6)]\r\n\t\tfile_names.append('test_batch')\r\n\r\n\t\tX = []\r\n\t\ty = []\r\n\t\tfor file_name in file_names:\r\n\t\t\twith open(data_path + file_name) as fin:\r\n\t\t\t\tdata_dict = cPickle.load(fin)\r\n\t\t\tX.append(data_dict['data'].ravel())\r\n\t\t\ty = y + data_dict['labels']\r\n\r\n\t\tself.X = np.asarray(X).reshape(60000, 32*32*3)\r\n\t\tself.y = np.asarray(y)\r\n\r\n\t\tfin = open(data_path + 'batches.meta')\r\n\t\tself.LABEL_NAMES = cPickle.load(fin)['label_names']\r\n\t\tfin.close()", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_train = np.load(\"data/X_train.npy\")\n\t\t\t\tX_val = np.load(\"data/X_val.npy\")\n\t\t\t\tY_train = np.load(\"data/Y_train.npy\")\n\t\t\t\tY_val = np.load(\"data/Y_val.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tdata_temp = np.zeros((50000,64,64,3))\n\t\t\t\tlabel_temp = []\n\n\t\t\t\tfor i in range(5):\n\n\t\t\t\t\tfile = path + str(i+1)\n\t\t\t\t\twith open(file, 'rb') as fo:\n\t\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\t\tlabel_temp.extend(temp_element[b'labels'])\n\n\t\t\t\t\tfor j in range(10000):\n\t\t\t\t\t\tdata_temp[j+(i*10000)] = self._reshape(temp_data[j])\n\n\t\t\t\tlabel_temp = np.eye(10)[np.array(label_temp)]\n\n\t\t\t\tnp.random.seed(123)\n\t\t\t\tpermutations = list(np.random.permutation(50000))\n\t\t\t\tX = data_temp[permutations, :, : , :] \n\t\t\t\tY = label_temp[permutations, :]\n\t\t\t\tX_train = X[0:40000, :, :, :] \n\t\t\t\tY_train = Y[0:40000, :]\n\t\t\t\tX_val = X[40000:50000, :, :, :] \n\t\t\t\tY_val = Y[40000:50000, :]\n\n\t\t\t\tnp.save(\"./data/X_train\", X_train)\n\t\t\t\tnp.save(\"./data/X_val\", X_val)\n\t\t\t\tnp.save(\"./data/Y_train\", Y_train)\n\t\t\t\tnp.save(\"./data/Y_val\", Y_val)\n\t\t\t\tbreak\n\n\t\treturn X_train, X_val, Y_train, Y_val", "def pi_raw_data(config, data_path=None):\n # Download the data: prem, hyp, label x train, val, test = 9 files\n prem_train_path = os.path.join(data_path, \"pi.prem.train\")\n hyp_train_path = os.path.join(data_path, \"pi.hyp.train\")\n label_train_path = os.path.join(data_path, \"pi.label.train\")\n\n prem_val_path = os.path.join(data_path, \"pi.prem.val\")\n hyp_val_path = os.path.join(data_path, \"pi.hyp.val\")\n label_val_path = os.path.join(data_path, \"pi.label.val\")\n\n prem_test_path = os.path.join(data_path, \"pi.prem.test\")\n hyp_test_path = os.path.join(data_path, \"pi.hyp.test\")\n label_test_path = os.path.join(data_path, \"pi.label.test\")\n\n # read train, val, test data\n prem_train, prem_train_len = _read_prems(prem_train_path)\n hyp_train, hyp_train_len = _read_hyps(hyp_train_path)\n label_train = _read_labels(label_train_path)\n\n prem_val, prem_val_len = _read_prems(prem_val_path) # originally has len_cap=max(prem_train_len)\n hyp_val, hyp_val_len = _read_hyps(hyp_val_path)\n label_val = _read_labels(label_val_path)\n\n prem_test, prem_test_len = _read_prems(prem_test_path)\n hyp_test, hyp_test_len = _read_hyps(hyp_test_path)\n label_test = _read_labels(label_test_path)\n\n word_to_id = glove._get_glove_vocab(\"glove/glove.6B.list\", config.vocab_limit)\n # word_to_id = _get_vocab(prem_train, hyp_train)\n\n train_data = (_sentences_to_word_ids(prem_train, word_to_id), _sentences_to_word_ids(hyp_train, word_to_id), prem_train_len, hyp_train_len, label_train)\n valid_data = (_sentences_to_word_ids(prem_val, word_to_id), _sentences_to_word_ids(hyp_val, word_to_id), prem_val_len, hyp_val_len, label_val)\n test_data = (_sentences_to_word_ids(prem_test, word_to_id), _sentences_to_word_ids(hyp_test, word_to_id), prem_test_len, hyp_test_len, label_test)\n\n return train_data, valid_data, test_data", "def load_data(path='./data/train'):\n print(\"Loading IMDB Data...\")\n data = []\n\n dir = os.path.dirname(__file__)\n file_list = glob.glob(os.path.join(dir, path + '/pos/*'))\n file_list.extend(glob.glob(os.path.join(dir, path + '/neg/*')))\n print(\"Parsing %s files\" % len(file_list))\n for i, f in enumerate(file_list):\n with open(f, \"r\", encoding=\"utf8\") as openf:\n s = openf.read()\n data.append(imp.preprocess(s)) # NOTE: Preprocessing code called here on all reviews\n return data", "def load_data(path_dataset):\n data = read_txt(path_dataset)[1:]\n return preprocess_data(data)", "def sample(data_path, **kwargs):\n\tprocessor = SampleProcessor(kwargs)\n\ttry:\n\t\tprocessor.traverse(data_path)\n\t\tprocessor.output()\n\tfinally:\n\t\tprocessor.close()", "def load_ptb_dataset(name='ptb', path='raw_data'):\n path = os.path.join(path, name)\n logging.info(\"Load or Download Penn TreeBank (PTB) dataset > {}\".format(path))\n\n # Maybe dowload and uncompress tar, or load exsisting files\n maybe_download_and_extract(PTB_FILENAME, path, PTB_URL, extract=True)\n\n data_path = os.path.join(path, 'simple-examples', 'data')\n train_path = os.path.join(data_path, \"ptb.train.txt\")\n valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n word_to_id = nlp.build_vocab(nlp.read_words(train_path))\n\n train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)\n valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)\n test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)\n vocab_size = len(word_to_id)\n\n # logging.info(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '<unk>', '<eos>']\n # logging.info(train_data) # ... 214, 5, 23, 1, 2]\n # logging.info(word_to_id) # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '<eos>': 2 ... }\n # logging.info(vocabulary) # 10000\n # exit()\n return train_data, valid_data, test_data, vocab_size", "def _load_split_data(self, dataset_path):\n for i, prefix in enumerate(['train', 'dev', 'test']):\n filename = os.path.join(dataset_path, '{}.txt'.format(prefix))\n knowledge, src, tgt = self._load_multi_data(filename)\n self.group_text_data[0].append(knowledge)\n self.group_text_data[1].append(src)\n self.group_text_data[2].append(tgt)", "def preprocess_and_save_data(self, dataset_path):\n # File starts with copyright line - irrelevant information so remove\n text = self._load_data(dataset_path)\n text = text[81:]\n\n # Replace punctuation as tokens for text processing\n token_dict = self.token_lookup()\n for key, value in token_dict.items():\n text = text.replace(key, ' {} '.format(value))\n\n # Transform text for use in lookup tables\n text = text.lower()\n text = text.split()\n\n # Create lookup tables\n vocab_to_int, int_to_vocab = self._create_lookup_tables(text)\n int_text = [vocab_to_int[word] for word in text]\n\n pickle.dump((int_text, vocab_to_int, int_to_vocab, token_dict),\n open(\"preprocess.p\", \"wb\"))", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_test = np.load(\"data/X_test.npy\")\n\t\t\t\tY_test = np.load(\"data/Y_test.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tX_test = np.zeros((10000,64,64,3))\n\t\t\t\tY_test = []\n\n\t\t\t\t\n\t\t\t\twith open(path, 'rb') as fo:\n\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\tY_test.extend(temp_element[b'labels'])\n\n\t\t\t\tfor j in range(10000):\n\t\t\t\t\tX_test[j] = self._reshape(temp_data[j])\n\n\t\t\t\tY_test = np.eye(10)[np.array(Y_test)]\n\t\t\t\t\n\t\t\t\tnp.save(\"./data/X_test\", X_test)\n\t\t\t\tnp.save(\"./data/Y_test\", Y_test)\n\n\t\t\t\tbreak\n\n\n\t\treturn X_test, Y_test", "def loadRaw(self, path, preprocfunc=None):\n # Only for 8 and 32 bit images\n depth = self.getDepth()\n if depth==1:\n mamba.raiseExceptionOnError(mambaCore.ERR_BAD_DEPTH)\n \n # Loading the file\n f = file(path, 'rb')\n data = f.read()\n f.close()\n \n # Preprocessing the data if a function was given\n if preprocfunc:\n data = preprocfunc(data)\n \n # Verification over data size\n (w,h) = self.getSize()\n im_size = w*h*(depth/8)\n assert(len(data)==im_size*self.length)\n \n # Loading the data\n for i,im in enumerate(self.seq):\n err = mambaCore.MB_Load(im.mbIm, data[i*im_size:(i+1)*im_size], im_size)\n mamba.raiseExceptionOnError(err)\n self.name = path", "def load_preprocess_test_batch(batch_id, batch_size):\r\n filename = 'preprocess_test_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n# labels = np.argmax(labels,1)\r\n# num = len(labels)\r\n# arr = np.zeros((num, 1))\r\n# for i in range(num):\r\n# arr[i][0] = labels[i]\r\n# ind = [i for i in range(len(features))]\r\n# random.shuffle(ind)\r\n# features = features[ind]\r\n# labels = labels[ind]\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return features[1200:batch_size],labels[1200:batch_size]\r\n #return batch_features_labels(features, labels, batch_size)\r", "def occult_raw_data(path=None):\n if not path:\n path = _get_filename()\n word_to_id = _build_vocab(path)\n all_data = _file_to_char_ids(path, word_to_id)\n # it's just one massive sequence!\n # now we need to slice it up\n # into train, test and valid\n num_chars = len(all_data)\n all_data = np.array(all_data)\n train_end = num_chars//100 * 98\n train = all_data[:train_end, ...]\n test_end = train_end + num_chars//100\n test = all_data[train_end:test_end, ...]\n valid = all_data[test_end:, ...]\n\n return train, valid, test, word_to_id", "def rnnlm_raw_data(data_path, vocab_path):\n\ttrain_path = os.path.join(data_path, \"train\")\n\tvalid_path = os.path.join(data_path, \"valid\")\n\tword_to_id = _build_vocab(vocab_path)\n\ttrain_data = _file_to_word_ids(train_path, word_to_id)\n\tvalid_data = _file_to_word_ids(valid_path, word_to_id)\n\tvocabulary = len(word_to_id)\n\treturn train_data, valid_data, vocabulary, word_to_id", "def load_training_data(file_path):\n return load_data(file_path)", "def parsing(self, data_path, header=0):\n df_content = []\n csvfiles = glob.glob(data_path)\n selected_cols = list(self.data_features)\n selected_cols.append('workload.type')\n selected_cols.append('workload.appname')\n\n for csv in csvfiles:\n data = pd.read_csv(csv, index_col=None, header=header, usecols=selected_cols)\n data[self.data_features] = self.abnormal_detection(data[self.data_features])\n df_content.append(data.dropna(axis=0))\n self.dataset = pd.concat(df_content, sort=False)", "def preprocess(self, path=None):\n path = path if path else self.path\n data = Data.load(path=path).get(Data.__name__)\n for subject in data:\n subject.preprocess(model=self.model, detector=self.detector)\n self.data.append(subject)\n\n return self.data", "def load_from_planetoid_files(dataset_name, path):\n\n def _sample_mask(idx, l):\n \"\"\"Create mask.\"\"\"\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=np.bool)\n\n def _parse_index_file(filename):\n \"\"\"Parse index file.\"\"\"\n index = []\n for line in open(filename):\n index.append(int(line.strip()))\n return index\n\n def _load_file(name):\n \"\"\"Load from data file.\"\"\"\n filename = 'ind.{}.{}'.format(dataset_name, name)\n filename = os.path.join(path, filename)\n with open(filename, 'rb') as f:\n if sys.version_info > (3, 0):\n return pickle.load(f, encoding='latin1') # pylint: disable=unexpected-keyword-arg\n else:\n return pickle.load(f)\n\n x = _load_file('x')\n y = _load_file('y')\n tx = _load_file('tx')\n ty = _load_file('ty')\n allx = _load_file('allx')\n ally = _load_file('ally')\n graph = _load_file('graph')\n\n filename = 'ind.{}.test.index'.format(dataset_name)\n filename = os.path.join(path, filename)\n test_idx_reorder = _parse_index_file(filename)\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_name == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph).\n # Find isolated nodes, add them as zero-vecs into the right position.\n test_idx_range_full = range(\n min(test_idx_reorder),\n max(test_idx_reorder) + 1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range - min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range - min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y) + 500)\n\n train_mask = _sample_mask(idx_train, labels.shape[0])\n val_mask = _sample_mask(idx_val, labels.shape[0])\n test_mask = _sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return (adj, features, y_train, y_val, y_test, train_mask, val_mask,\n test_mask, labels)", "def loadTestData():\n path = raw_input(\"Enter the path of Test Data: \")\n data = np.genfromtxt(path, delimiter=',', dtype=int)\n\n labels = data[:, -1]\n\n unwantedLabels = [4, 5, 6, 7, 8, 9]\n listToDelete = []\n for i, line in enumerate(range(len(data))):\n if labels[i] in unwantedLabels:\n listToDelete.append(i)\n\n actualData = np.delete(data, listToDelete, axis=0)\n\n # print(actualData.shape)\n # Separating the labels and data into different arrays\n actualLabels = actualData[:, -1]\n actualData = actualData[:, :-1]\n\n actualData = pre.scale(actualData)\n\n # Change the label vector to label matrix\n # If Label is 2 then it becomes [0, 1, 0]\n labelMatrix = np.zeros((actualLabels.shape[0], 4))\n for j in range(len(actualLabels)):\n if actualLabels[j] == 0:\n labelMatrix[j][0] = 1\n if actualLabels[j] == 1:\n labelMatrix[j][1] = 1\n if actualLabels[j] == 2:\n labelMatrix[j][2] = 1\n if actualLabels[j] == 3:\n labelMatrix[j][3] = 1\n\n return actualData, actualLabels", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def preprocess_and_save_data(source_path, target_path, text_to_ids):\n # Preprocess\n source_text = load_data(source_path)\n target_text = load_data(target_path)\n\n source_text = source_text.lower()\n target_text = target_text.lower()\n\n source_vocab_to_int, source_int_to_vocab = create_lookup_tables(source_text)\n target_vocab_to_int, target_int_to_vocab = create_lookup_tables(target_text)\n\n source_text, target_text = text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int)\n\n # Save Data\n with open('preprocess.p', 'wb') as out_file:\n pickle.dump((\n (source_text, target_text),\n (source_vocab_to_int, target_vocab_to_int),\n (source_int_to_vocab, target_int_to_vocab)), out_file)", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def _read_data_file(self, path_model_id):\n\n path_dataset_file = path_model_id.joinpath('training_set.csv')\n\n with path_dataset_file.open(mode='r', newline='') as f:\n csv_reader = reader(f, delimiter=',')\n rows = list(csv_reader)\n\n self.example_count = len(rows)\n\n img_files = [path.join(f'label_{row[1]}', row[0]) for row in rows]\n enc_labels = self.class_le.fit_transform([row[1] for row in rows])\n \n self.files_labels = [[img_files[i], enc_labels[i]]\n for i in range(self.example_count)]", "async def process_data_from_path_by_chunk(self, data_path: Path, chunk_size: int = 10000) -> int:\n with data_path.open('r') as file_handler:\n processed_words = []\n total_words_inserted = 0\n for line in file_handler:\n # Each word is sorted lexicographical and use it as the permutation similarity index\n word = line.strip()\n if not word:\n # if word is empty -> end of file\n break\n sorted_word = \"\".join(sorted(word))\n processed_words.append(DictWordModel(word=word, permutation_similarity_index=sorted_word))\n if (num_processed_words := len(processed_words)) == chunk_size:\n await self._insert_words(processed_words)\n processed_words.clear()\n total_words_inserted += num_processed_words\n if processed_words:\n await self._insert_words(processed_words)\n total_words_inserted += len(processed_words)\n\n return total_words_inserted", "def read(path, label2int):\n\n labels = [] # int labels\n samples = [] # examples as strings\n\n for label_dir in os.listdir(path):\n label_dir_path = os.path.join(path, label_dir)\n\n for file in os.listdir(label_dir_path):\n file_path = os.path.join(label_dir_path, file)\n file_text = open(file_path).read().rstrip()\n int_label = label2int[label_dir.lower()]\n samples.append(file_text)\n labels.append(int_label)\n\n return samples, labels", "def get_training_data(path):\n i = 0\n df = {}\n for d in parse(path):\n i += 1\n if i <= sample_size:\n df[i] = d\n else:\n break\n if (i + 1) % 1000 == 0:\n print(\"Step:\", i + 1)\n return pd.DataFrame.from_dict(df, orient='index')", "def Preprocess_IMDB(path=\"datasets/raw/aclImdb/\"):\n output_path = \"datasets/preprocessed/IMDB_Data\"\n\n neg = glob.glob(os.path.join(path, 'test', 'neg', '*'))\n neg += glob.glob(os.path.join(path, 'train', 'neg', '*'))\n neg_data = [io.open(fname, 'r', encoding='utf-8').readlines() for fname in neg]\n neg_data = [sentence[0] for sentence in neg_data]\n\n\n pos = glob.glob(os.path.join(path, 'test', 'pos', '*'))\n pos += glob.glob(os.path.join(path, 'train', 'pos', '*'))\n pos_data = [io.open(fname, 'r', encoding='utf-8').readlines() for fname in pos]\n pos_data = [sentence[0] for sentence in pos_data]\n\n labels = compute_labels(pos_data, neg_data)\n text, labels = shuffle_data(pos_data + neg_data, labels)\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n # split data in 70%/20%/10% train/test/dev split\n train_len = ((len(text) / 10) * 7) + (len(text) % 10)\n test_len = (len(text) / 10) * 2\n dev_len = len(text) / 10\n\n trX = text[0:train_len]\n teX = text[train_len:train_len + test_len]\n vaX = text[train_len + test_len: train_len + test_len + dev_len]\n\n trY = labels[0:train_len]\n teY = labels[train_len:train_len + test_len]\n vaY = labels[train_len + test_len: train_len + test_len + dev_len]\n\n dat1 = pd.DataFrame({'label': trY})\n dat2 = pd.DataFrame({'sentence': trX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"train_binary_sent.csv\"), encoding='utf-8', index=False)\n\n\n dat1 = pd.DataFrame({'label': teY})\n dat2 = pd.DataFrame({'sentence': teX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"test_binary_sent.csv\"), encoding='utf-8', index=False)\n\n dat1 = pd.DataFrame({'label': vaY})\n dat2 = pd.DataFrame({'sentence': vaX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"dev_binary_sent.csv\"), encoding='utf-8', index=False)", "def pre_process_data(filepath):\n positive_path = os.path.join(filepath, 'pos')\n negative_path = os.path.join(filepath, 'neg')\n\n pos_label = 1\n neg_label = 0\n\n dataset = []\n\n for filename in glob.glob(os.path.join(positive_path, '*.txt')):\n with open(filename, 'r', encoding=\"utf-8\") as f:\n dataset.append((pos_label, f.read()))\n\n for filename in glob.glob(os.path.join(negative_path, '*.txt')):\n with open(filename, 'r', encoding=\"utf-8\") as f:\n dataset.append((neg_label, f.read()))\n\n shuffle(dataset)\n\n return dataset", "def load_preprocess_training_batch(batch_id, batch_size):\n path, dataset = select_dataset(training = True)\n data = dataset_lib.get_data(batch_id, dataset=dataset, path=path)\n features = [np.array(x[1]) for x in data]\n labels = np.array([x[0] for x in data])\n\n # Return the training data in batches of size <batch_size> or less\n return batch_features_labels(features, labels, batch_size)", "def process_raw_data(data_dir='/home/data/nbc/athena/athena-data/'):\n\n # Calls the process_corpus function, defined below\n # process_corpus reads in the text, performs abbreviation, spelling,\n # translation, and overall text Processing\n # process_corpus outputs the processed text for each file and the stemmed file\n for feature_source in ['abstract', 'full']:\n process_corpus(data_dir, feature_source)\n\n # Calls the label_data function, defined below\n # label_data reads in the metadata csv files, concatenates them, then\n # reads in the processed text files\n # label_data outputs a binary pmid by label metadata matrix\n label_data(data_dir)\n generate_gazetteer(data_dir)", "def load_data(self, training_path : str =\"data/training/mapping\"):\n\n array_badly_mapped = np.load(join(training_path, \"array_badly_mapped.npy\"))\n array_SV = np.load(join(training_path, \"array_SV.npy\"))\n\n labels_SV = np.zeros(len(array_SV))\n labels_badly_mapped= np.ones(len(array_badly_mapped))\n\n features = np.concatenate((array_SV, array_badly_mapped)).reshape((-1, 2))\n labels = np.concatenate((labels_SV, labels_badly_mapped))\n\n self.X_train, self.X_valid, self.y_train, self.y_valid = train_test_split(\n features, labels\n )", "def load_preprocess_training_batch(batch_id, batch_size):\r\n filename = 'preprocess_batch_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n# labels = np.argmax(labels,1)\r\n# num = len(labels)\r\n# arr = np.zeros((num, 1))\r\n# for i in range(num):\r\n# arr[i][0] = labels[i]\r\n# np.reshape(features,(2500,150528))\r\n# ind = [i for i in range(len(features))]\r\n# random.shuffle(ind)\r\n# features = features[ind]\r\n# labels = labels[ind]\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return features[0:batch_size],labels[0:batch_size]", "def read_data(self, path, **kwargs):\n\n from glob import glob\n import os\n sc = self.sc\n pdt_lc = np.dtype([('pos', 'f4', 3),('vel', 'f4', 3)])\n\n blockids = kwargs['blockids']\n\n def set_particle_IDs_partition(index, iterator): \n \"\"\"\n Use the aggregate partition counts to set monotonically increasing \n particle indices\n \"\"\"\n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n arr['iGroup'] = loc_to_glob_map_b.value[index]\n local_index += len(arr)\n yield arr\n \n def read_file(index, i, chunksize=102400): \n for part,filename in i:\n timein = time.time()\n with open(filename,'rb') as f: \n header = f.read(62500)\n while True:\n chunk = f.read(chunksize*24)\n if len(chunk): \n p_arr = np.frombuffer(chunk, pdt_lc)\n new_arr = np.zeros(len(p_arr), dtype=pdt)\n new_arr['pos'] = p_arr['pos']\n yield new_arr\n else: \n t_elapsed = time.time()-timein\n rate = os.path.getsize(filename)/1e6/t_elapsed\n print 'spark_fof: reading %s took %d seconds in partition %d, %f MB/sec'%(filename, t_elapsed, index, rate)\n break\n \n # determine which files to read\n get_block_ids = re.compile('blk\\.(\\d+)\\.(\\d+)\\.(\\d+)?')\n\n if blockids is None: \n files = glob(os.path.join(self.path,'*/*'))\n else: \n files = []\n for dirname, subdirlist, filelist in os.walk(path):\n try: \n dirnum = int(os.path.basename(dirname))\n if dirnum in blockids: \n for f in filelist:\n ids = get_block_ids.findall(f)\n if len(ids) > 0:\n if all(int(x) in blockids for x in ids[0]):\n files.append(os.path.join(dirname,f))\n except ValueError: \n pass\n\n files.sort()\n nfiles = len(files) \n self.nPartitions = nfiles\n\n print 'spark_fof: Number of input files: ', nfiles\n\n # get particle counts per partition\n nparts = {i:_get_nparts(filename,62500,pdt_lc.itemsize) for i,filename in enumerate(files)}\n\n print 'spark_fof: Total number of particles: ', np.array(nparts.values()).sum()\n \n # set up the map from x,y,z to partition id \n ids = map(lambda x: tuple(map(int, get_block_ids.findall(x)[0])), files)\n ids_map = {x:i for i,x in enumerate(ids)}\n self.ids_map = ids_map\n loc_to_glob_map_b = self.local_to_global_map\n \n ids_map_b = sc.broadcast(ids_map)\n loc_to_glob_map_b = sc.broadcast(loc_to_glob_map_b)\n\n partition_counts = sc.broadcast(nparts)\n\n rec_rdd = (sc.parallelize(zip(ids,files), numSlices=self.nPartitions)\n .map(lambda (id,filename): (ids_map_b.value[id],filename))\n .partitionBy(self.nPartitions).cache()\n .mapPartitionsWithIndex(read_file, preservesPartitioning=True)\n .mapPartitionsWithIndex(set_particle_IDs_partition, \n preservesPartitioning=True))\n \n return rec_rdd", "def load_pkl_data(path):\n with open(path, 'rb') as fi:\n data = pickle.load(fi)\n return data", "def load_data(path):\n # Training Images Details\n IMG_SIZE = 224 # Size of images used for training\n IMG_MEAN = [0.485, 0.456, 0.406] # image normalization mean\n IMG_SDEV = [0.229, 0.224, 0.225] # image normalization standard deviation\n\n # Training phases\n phases = ['train', 'valid', 'test']\n\n # Define data locations\n data_dir = {n: path + n for n in phases}\n\n # Define transforms for the training, validation, and testing sets\n data_transforms = {\n 'train':\n transforms.Compose([\n transforms.RandomRotation(30),\n transforms.RandomResizedCrop(IMG_SIZE),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.ToTensor(),\n transforms.Normalize(IMG_MEAN, IMG_SDEV)]),\n 'valid':\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(IMG_SIZE),\n transforms.ToTensor(),\n transforms.Normalize(IMG_MEAN, IMG_SDEV)]),\n 'test':\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(IMG_SIZE),\n transforms.ToTensor(),\n transforms.Normalize(IMG_MEAN, IMG_SDEV)])\n }\n\n # Load the datasets\n image_datasets = {n: datasets.ImageFolder(\n data_dir[n], transform=data_transforms[n])\n for n in phases}\n\n # Create the PyTorch dataloaders\n dataloaders = {n: torch.utils.data.DataLoader(\n image_datasets[n], batch_size=64, shuffle=True)\n for n in phases}\n\n # mapping of classes to training indices\n class_to_idx = image_datasets['train'].class_to_idx\n\n return dataloaders, class_to_idx", "def load_data_filenames_from_path(path: str) -> List[str]:\n return load_filenames_from_path(path, extension='.bin')", "def read_data_set(self, path):\n data = self._preprocessor.create_empty_input_target_data()\n\n for filename in glob.iglob(os.path.join(path, \"*\" + NoiseReader.file_extension())):\n exp_name = os.path.splitext(os.path.basename(filename))[0]\n\n experiment = self.read_experiment(path, exp_name)\n data = self._preprocessor.concat_input_target_data(data, experiment)\n\n return data", "def load_preprocess_training_batch(batch_id, batch_size):\r\n filename = 'preprocess_batch_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return batch_features_labels(features, labels, batch_size)", "def load_data_from_files(self):\n # separated method to allow mock easier\n logger.info(\"Loading data...\")\n parent = Path(__file__).parent\n path = parent / \"resources\" / \"scores.txt\"\n self.scorer.load_from_file(path)\n path = parent / \"resources\" / \"american-english-large.txt\"\n self.trie.load_from_file(path)\n path = parent / \"resources\" / \"reels.txt\"\n self.reels = Reel.get_from_file(path)\n logger.info(\"Data loaded!\")", "def preprocess(data_path, dataset):\n il_data_path = os.path.join(data_path, 'il' + dataset)\n train_path = os.path.join(il_data_path, 'train')\n val_path = os.path.join(il_data_path, 'val')\n\n if os.path.isdir(il_data_path):\n return\n\n os.makedirs(train_path)\n os.makedirs(val_path)\n\n train_set = _datasets[dataset](data_path, train=True, download=True)\n val_set = _datasets[dataset](data_path, train=False, download=True)\n\n # dump pickles for each class\n for cur_set, cur_path in [[train_set, train_path], [val_set, val_path]]:\n for idx, item in enumerate(cur_set):\n label = item[1]\n if not os.path.exists(os.path.join(cur_path, str(label))):\n os.makedirs(os.path.join(cur_path, str(label)))\n with open(os.path.join(cur_path, str(label), str(idx) + '.p'), 'wb') as f:\n pickle.dump(item, f)", "def get_test_loader(id_list = './data/sample_submission.csv', root_dir = './data/test/'):\n data = HumanProteinDataset(id_list, root_dir, transform = transforms.Compose([\n Rescale((256, 256)), \n ToTensor()\n ]))\n\n indices = np.arange(len(data))\n dataloader_test = DataLoader(data, batch_size=10, num_workers=5)\n\n return dataloader_test", "def batch(data_path):\n train, _, _ = get_datasets(\n data_path=data_path,\n nb_nodes=7,\n task_type=\"classification\",\n nb_classes=2,\n split=None,\n k_fold=None,\n seed=1234,\n )\n for batch in torch.utils.data.DataLoader(\n train, shuffle=False, batch_size=25, drop_last=False\n ):\n return batch", "def input_fn(path, is_train: bool, batch_size = 64, epochs=100):\n\n import re\n if is_train:\n CHUNK_RE = re.compile(r\"train.*\\.tfrecords\")\n else:\n CHUNK_RE = re.compile(r\"test.*\\.tfrecords\")\n\n chunk_files = [os.path.join(path, fname)\n for fname in os.listdir(path)\n if CHUNK_RE.match(fname)]\n\n # 1. create the dataset\n dataset = tf.data.TFRecordDataset(chunk_files)\n\n # 2. map with the actual work (preprocessing, augmentation…) using multiple\n # parallel calls\n dataset = dataset.map(_parser, num_parallel_calls=4)\n if is_train:\n dataset = dataset.map(_train_preprocess,\n num_parallel_calls=4)\n else:\n dataset = dataset.map(_val_preprocess,\n num_parallel_calls=4)\n\n # 3. shuffle (with a big enough buffer size)\n # In response to a question on OpenReview, Hinton et al. wrote the\n # following:\n # https://openreview.net/forum?id=HJWLfGWRb&noteId=rJgxonoNnm\n # \"We did not have any special ordering of training batches and we random\n # shuffle. In terms of TF batch:\n # capacity=2000 + 3 * batch_size, ensures a minimum amount of shuffling of\n # examples. min_after_dequeue=2000.\"\n capacity = 2000 + 3 * batch_size\n dataset = dataset.shuffle(buffer_size=capacity)\n\n # 4. batch\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n # 5. repeat\n dataset = dataset.repeat(count=epochs)\n\n # 6. prefetch\n dataset = dataset.prefetch(1)\n\n return dataset", "def load_data_to_db(self, path):\n table_names = ['train_transaction', 'train_identity', 'test_transaction', 'test_identity']\n for table_name in table_names:\n pat = self.TRANSACTION_NON_NUMBER_PATTERN if 'transaction' in table_name else self.IDENTITY_NON_NUMBER_PATTERN\n print(\"Loading table: \" + table_name)\n fn = os.path.join(path, table_name + '.csv')\n self.dbinstance.build_table_from_csv(fn, pat, table_name)\n print(\"Loaded table \" + table_name)", "def load_data_from_fold(data_path):\r\n print(\"\\nLoading data from json folder {}\".format(data_path))\r\n\r\n SAMPLES_TO_CONSIDER = 22050\r\n\r\n data = preprocess_dataset(data_path, SAMPLES_TO_CONSIDER)\r\n\r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y", "def loadData(dataset_path):\n with open(dataset_path, 'rb') as handle:\n data = pickle.load(handle)\n word2id = data['word2id']\n id2word = data['id2word']\n training_data = data['trainingSamples']\n return word2id, id2word, training_data", "def load_from_path(self, paths, label_key='labels'):\n data = []\n labels = []\n for path in paths:\n with tf.io.gfile.GFile(path, 'rb') as f:\n d = {\n k.decode('utf8'): v\n for k, v in cPickle.load(f, encoding='bytes').items()\n }\n data.append(d['data'])\n labels.append(d[label_key])\n data = np.concatenate(data, axis=0)\n data = data.reshape((data.shape[0], 3, 32, 32))\n labels = np.concatenate(labels, axis=0)\n labels = np.reshape(labels, (len(labels), 1))\n\n if tf.keras.backend.image_data_format() == 'channels_last':\n data = data.transpose(0, 2, 3, 1)\n\n return data, labels", "def raw_text_to_mmap(args):\n MMapTextDataset.tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, use_fast=True)\n assert len(MMapTextDataset.tokenizer) < 65535 # will use uint16 to store token ids\n all_files = glob.glob(f'{args.input_dir}/c4-*')\n print(len(all_files), MMapTextDataset.tokenizer)\n if os.path.exists(f'{args.output_dir}/cache/train.bin') and os.path.exists(f'{args.input_dir}/cache/val.bin'):\n logger.info(\"Cache already exists. Remove the cache directory to regenerate\")\n return\n try:\n os.mkdir(f'{args.output_dir}/cache/')\n except FileExistsError:\n pass\n try:\n os.mkdir(f'{args.output_dir}/shards-{args.shard_size}/')\n except FileExistsError:\n pass\n try:\n os.mkdir(f'{args.output_dir}/logs-{args.shard_size}/') # log progrss to be able to resume\n except FileExistsError:\n pass\n\n # STEP1: tokenizing and saving to shards\n if args.num_preprocessing_workers > 1:\n from multiprocessing.pool import Pool\n with Pool(args.num_preprocessing_workers) as p:\n list(tqdm(p.imap(MMapTextDataset._process_file, all_files), total=len(all_files)))\n else:\n [MMapTextDataset._process_file(f) for f in tqdm(all_files)]\n\n if args.data_type == 'raw_text': # c4 tfrecords are already sharded\n # STEP2: shuffling shards and combining them into train.bin and val.bin files\n all_shards = glob.glob(f'{args.output_dir}/shards-{args.shard_size}/*.bin')\n random.shuffle(all_shards) # shuffling based on shards not individual lines\n val_shards_count = int(args.train_dev_split * len(all_shards))\n val_shards = all_shards[:val_shards_count]\n train_shards = all_shards[val_shards_count:]\n # TODO: if MMapTextDataset._combining_shards is very slow for large files, it can be skipped but we nned to\n # update the dataset to read from multiple shards directly\n MMapTextDataset._combine_shards(f'{args.output_dir}/cache/val.bin', val_shards)\n MMapTextDataset._combine_shards(f'{args.output_dir}/cache/train.bin', train_shards)\n elif args.data_type == 'tfrecord':\n train_shards = glob.glob(f'{args.output_dir}/*train*.bin')\n val_shards = glob.glob(f'{args.output_dir}/*val*.bin')\n MMapTextDataset._combine_shards(f'{args.output_dir}/val.bin', val_shards)\n MMapTextDataset._combine_shards(f'{args.output_dir}/train.bin', train_shards)\n del MMapTextDataset.tokenizer", "def source_data_files(self, data_dir, tmp_dir, dataset_split):\n raise NotImplementedError()", "def load_data(data_path=DATA_PATH):\n with open (os.path.join(DATA_PATH, \"imdb_extrait.pkl\"),\"rb\") as file:\n \n [data , id2titles , fields ]= pk.load(file)\n \n \n datax = data [: ,:33]\n datay = np.array([1 if x [33] >6.5 else -1 for x in data ])\n \n return datax, datay, id2titles, fields", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def load_raw_data(self, input_files):\n\n log.debug(f\"Loading dataset {input_files}\") \n print(f\"Loading dataset\")\n\n # Load stroke information from XML files\n for file in input_files:\n new_strokeset = strokeset.StrokeSet(file)\n self.strokesets.append(new_strokeset)\n self.stroke_matrix.append(new_strokeset.as_delta_array())\n self.stroke_ascii.append(new_strokeset.get_text())\n\n done_msg = \"Finished parsing dataset. Imported {} lines\".format(len(self.get_strokesets()))\n print (done_msg)\n log.info(done_msg)", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def sample_data(path, driverIDs, tripIDs, sc):\n try:\n combos = zip(driverIDs, tripIDs)\n samplefiles = [path + '/' + 'driver_' + i + '.csv' for i in driverIDs]\n\n #### NOTE: this set() action is a hack for small num. files\n samplefiles = ','.join(set(samplefiles))\n #### NOTE: with large num. files, might need to set num. partitions\n RDD = sc.textFile(samplefiles)\n RDDsplit = RDD.map(lambda x: x.split(','))\n RDDsamples = RDDsplit.filter(lambda x: (x[2],x[3]) in combos)\n RDDsamples.cache()\n return RDDsamples\n except Exception as e:\n print e", "def load_data(self, data_path, use_plus_minus_feats):\n loaded = np.load(data_path + '-targets.npz')\n self.max_num_ans = int(loaded['max_num_ans'])\n self.max_prob_set_id = int(loaded['max_prob_set_id'])\n targets = loaded['targets']\n if use_plus_minus_feats:\n print(\"using plus minus feats!!!\")\n inputs = sp.load_npz(data_path + '-inputs-plus-minus.npz')\n self.encoding_dim = self.max_prob_set_id + 1\n else:\n inputs = sp.load_npz(data_path + '-inputs.npz')\n self.encoding_dim = 2 * self.max_prob_set_id + 1\n self.target_ids = sp.load_npz(data_path + '-targetids.npz')\n\n return inputs, targets", "def Preprocess_MR(path=\"datasets/raw/rt10662\"):\n\n output_path = \"datasets/preprocessed/MR_Data\"\n\n # load positive and negative data\n with io.open(os.path.join(path, \"rt-polarity.pos\"), encoding='latin-1') as f:\n pos_data = f.readlines()\n pos_data = [sentence.strip() for sentence in pos_data]\n with io.open(os.path.join(path, \"rt-polarity.neg\"), encoding='latin-1') as f:\n neg_data = f.readlines()\n neg_data = [sentence.strip() for sentence in neg_data]\n\n labels = compute_labels(pos_data, neg_data)\n text, labels = shuffle_data(pos_data + neg_data, labels)\n\n # split data in 70%/20%/10% train/test/dev split\n train_len = ((len(text) / 10) * 7) + (len(text) % 10)\n test_len = (len(text) / 10) * 2\n dev_len = len(text) / 10\n\n trX = text[0:train_len]\n teX = text[train_len:train_len + test_len]\n vaX = text[train_len + test_len: train_len + test_len + dev_len]\n\n trY = labels[0:train_len]\n teY = labels[train_len:train_len + test_len]\n vaY = labels[train_len + test_len: train_len + test_len + dev_len]\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n dat1 = pd.DataFrame({'label': trY})\n dat2 = pd.DataFrame({'sentence': trX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"train_binary_sent.csv\"), encoding='utf-8', index=False)\n\n\n dat1 = pd.DataFrame({'label': teY})\n dat2 = pd.DataFrame({'sentence': teX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"test_binary_sent.csv\"), encoding='utf-8', index=False)\n\n dat1 = pd.DataFrame({'label': vaY})\n dat2 = pd.DataFrame({'sentence': vaX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"dev_binary_sent.csv\"), encoding='utf-8', index=False)", "def load_batch(fpath, label_key='labels'):\n f = open(fpath, 'rb')\n if sys.version_info < (3,):\n d = cPickle.load(f)\n else:\n d = cPickle.load(f, encoding='bytes')\n # decode utf8\n d_decoded = {}\n for k, v in d.items():\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n f.close()\n data = d['data']\n labels = d[label_key]\n\n data = data.reshape(data.shape[0], 3, 32, 32)\n return data, labels", "def load_batch(self, fpath, match, in_num):\n if in_num == None:\n in_num = input('Please specify IN number: ')\n\n if match == None:\n match = input('Please specify filename string to match for batch loading (ex. \\'_s2_\\'): ')\n\n # get a list of all matching files\n glob_match = f'{fpath}/*{match}*'\n files = glob.glob(glob_match)\n\n # load & concatenate files into a single dataframe\n data = pd.concat((pd.read_csv(file, header = [0, 1], index_col = 0, parse_dates=True, low_memory=False) for file in files)).sort_index()\n\n # extract sampling frequency\n s_freq = 1/(data.index[1] - data.index[0]).total_seconds()\n\n # reset the index to continuous time\n ind_freq = str(int(1/s_freq*1000000))+'us'\n ind_start = '1900-01-01 00:00:00.000'\n ind = pd.date_range(start = ind_start, periods=len(data), freq=ind_freq)\n data.index = ind\n\n # set metadata & attributes\n self.metadata = {'file_info':{'in_num': in_num, 'files': files, 'dir': fpath,\n 'match_phrase': match},\n 'analysis_info':{'s_freq': s_freq} }\n self.data = data\n self.s_freq = s_freq", "def prepareData(args):\n print(\"Starting preprocessing\")\n\n # params\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = args['note_range']\n window_size = args['window_size']\n sr = args['sr']\n hop_length = args['hop_length']\n wav_dir = args['wav_dir']\n\n datapath = os.path.join(args['proj_root'], 'Features')\n bin_multiple = int(args['bin_multiple'])\n\n framecnt = 0\n maxFramesPerFile = args['maxFramesPerFile']\n maxFrames = args['maxFrames']\n\n fileappend = str(maxFramesPerFile) + 'pf_max' + str(maxFrames) + '.dat'\n\n filenameIN = os.path.join(datapath, 'input_' + fileappend)\n filenameOUT = os.path.join(datapath, 'output_' + fileappend)\n\n if os.path.isfile(filenameIN) and os.path.isfile(filenameOUT):\n n_bins = note_range * bin_multiple\n print('loading precomputed data from ' + filenameIN)\n mmi = np.memmap(filenameIN, mode='r', dtype=\"float64\")\n inputs = np.reshape(mmi, (-1, window_size, n_bins))\n\n mmo = np.memmap(filenameOUT, mode='r', dtype=\"float64\")\n outputs = np.reshape(mmo, (-1, note_range))\n\n return inputs, outputs, datapath\n\n inputs, outputs = [], []\n addCnt, errCnt = 0, 0\n\n # hack to deal with high PPQ from MAPS\n # https://github.com/craffel/pretty-midi/issues/112\n pretty_midi.pretty_midi.MAX_TICK = 1e10\n\n for s in os.listdir(wav_dir):\n subdir = os.path.join(wav_dir, s)\n if not os.path.isdir(subdir):\n continue\n # recursively search in subdir\n print(subdir)\n for dp, dn, filenames in os.walk(subdir):\n # in each level of the directory, look at filenames ending with .mid\n for f in filenames:\n # if there exists a .wav file and .midi file with the same name\n\n if f.endswith('.wav'):\n audio_filename = f\n fprefix = audio_filename.split('.wav')[0]\n mid_fn = fprefix + '.mid'\n txt_fn = fprefix + '.txt'\n print(\"Handling files {}\".format(fprefix))\n if mid_fn in filenames:\n # extract_features\n audio_filename = os.path.join(dp, audio_filename)\n inputnp = extract_features(audio_filename, args)\n times = librosa.frames_to_time(np.arange(inputnp.shape[0]), sr=sr, hop_length=hop_length)\n # mid2outputnp\n mid_fn = os.path.join(dp, mid_fn)\n pm_mid = pretty_midi.PrettyMIDI(mid_fn)\n\n outputnp = mid2outputnp(pm_mid, times, args)\n\n # check that num onsets is equal\n if inputnp.shape[0] == outputnp.shape[0]:\n # Some filtering highly pragmatic filtering on the data!!\n # take only frames that are \"sufficiently loud\", ...\n good2take = np.array(inputnp.max(axis=(1, 2)) > 0.05)\n # ... and always omit the last frame as this has been padded ...\n good2take[-1] = False # omit last\n # ... and only take frames with at least one true label (i.e. some tone is played)\n good2take = good2take & (outputnp.max(axis=1) > 0)\n outputnp = outputnp[good2take, ]\n inputnp = inputnp[good2take, ]\n\n addCnt += 1\n if inputnp.shape[0] > maxFramesPerFile > 0:\n inputnp = inputnp[:maxFramesPerFile]\n outputnp = outputnp[:maxFramesPerFile]\n framecnt += inputnp.shape[0]\n print(\"framecnt is {}\".format(framecnt))\n inputs.append(inputnp)\n outputs.append(outputnp)\n else:\n print(\"error for fprefix {}\".format(fprefix))\n errCnt += 1\n print(inputnp.shape)\n print(outputnp.shape)\n\n if framecnt > maxFrames > 0:\n print(\"have enought frames, leaving {}\".format(subdir))\n break\n if framecnt > maxFrames > 0:\n print(\"have enought frames, leaving {}\".format(wav_dir))\n break\n\n if framecnt > maxFrames > 0:\n print(\"have enought frames, leaving {}\".format(wav_dir))\n break\n\n print(\"{} examples in dataset\".format(addCnt))\n print(\"{} examples couldnt be processed\".format(errCnt))\n\n # concatenate dynamic list to numpy list of example\n if addCnt:\n inputs = np.concatenate(inputs)\n outputs = np.concatenate(outputs)\n\n print(\"inputs.shape\")\n print(inputs.shape)\n print(\"outputs.shape\")\n print(outputs.shape)\n mmi = np.memmap(filename=filenameIN, mode='w+', shape=inputs.shape, dtype=\"float64\")\n mmi[:] = inputs[:]\n mmo = np.memmap(filename=filenameOUT, mode='w+', shape=outputs.shape, dtype=\"float64\")\n mmo[:] = outputs[:]\n del mmi\n del mmo\n\n return inputs, outputs, datapath", "def _read_data(path, typ):\n\n suffix = '_mini'\n\n try:\n data_path = os.path.join(path, 'data',\n 'data_{0}{1}.npy'.format(typ, suffix))\n src = np.load(data_path, mmap_mode='r')\n\n labels_path = os.path.join(path, 'data',\n 'labels_{0}{1}.csv'.format(typ, suffix))\n labels = pd.read_csv(labels_path)\n except IOError:\n raise IOError(\"'data/data_{0}.npy' and 'data/labels_{0}.csv' are not \"\n \"found. Ensure you ran 'python download_data.py' to \"\n \"obtain the train/test data\".format(typ))\n\n # convert the dataframe with crater positions to list of\n # list of (x, y, radius) tuples (list of arrays of shape (n, 3) with n\n # true craters on an image\n\n # determine locations of craters for each patch in the labels array\n n_true_patches = labels.groupby('i').size().reindex(\n range(src.shape[0]), fill_value=0).values\n # make cumulative sum to obtain start/stop to slice the labels\n n_cum = np.array(n_true_patches).cumsum()\n n_cum = np.insert(n_cum, 0, 0)\n\n labels_array = labels[['row_p', 'col_p', 'radius_p']].values\n y = [[tuple(x) for x in labels_array[i:j]]\n for i, j in zip(n_cum[:-1], n_cum[1:])]\n # convert list to object array of lists\n y_array = np.empty(len(y), dtype=object)\n y_array[:] = y\n\n return src, y_array", "def load_dummy(path, subset=\"all\", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False):\n\n data = bunch.Bunch()\n if subset in ('train', 'test'):\n data[subset] = load_files(\"{0}/{1}\".format(path, subset), charset=\"latin1\", load_content=True, random_state=rnd)\n elif subset == \"all\":\n data[\"train\"] = load_files(\"{0}/{1}\".format(path, \"train\"), charset=\"latin1\", load_content=True,\n random_state=rnd)\n data[\"test\"] = load_files(\"{0}/{1}\".format(path, \"test\"), charset=\"latin1\", load_content=True, random_state=rnd)\n else:\n raise ValueError(\n \"subset can only be 'train', 'test' or 'all', got '%s'\" % subset)\n if not raw:\n data = process_data(data, fix_k, min_size, vct)\n return data", "def load_dataset(data_dir,data_name, model_params, inference_mode=False):\n data_filepath = os.path.join(data_dir, data_name)\n if six.PY3:\n data = np.load(data_filepath, encoding='latin1', allow_pickle=True)\n else:\n data = np.load(data_filepath, allow_pickle=True)\n train_strokes = data['train']\n valid_strokes = data['valid']\n test_strokes = data['test']\n \n\n eval_model_params = sketch_rnn_model.copy_hparams(model_params)\n\n eval_model_params.use_input_dropout = 0\n eval_model_params.use_recurrent_dropout = 0\n eval_model_params.use_output_dropout = 0\n eval_model_params.is_training = 1\n\n if inference_mode:\n eval_model_params.batch_size = 1\n eval_model_params.is_training = 0\n\n sample_model_params = sketch_rnn_model.copy_hparams(eval_model_params)\n sample_model_params.batch_size = 1 # only sample one at a time\n sample_model_params.max_seq_len = 1 # sample one point at a time\n\n train_set = utils.DataLoader(\n train_strokes,\n model_params.batch_size,\n max_seq_length=model_params.max_seq_len,\n random_scale_factor=0.0,\n augment_stroke_prob=model_params.augment_stroke_prob)\n\n normalizing_scale_factor = train_set.calculate_normalizing_scale_factor()\n train_set.normalize(normalizing_scale_factor)\n\n valid_set = utils.DataLoader(\n valid_strokes,\n eval_model_params.batch_size,\n max_seq_length=eval_model_params.max_seq_len,\n random_scale_factor=0.0,\n augment_stroke_prob=0.0)\n valid_set.normalize(normalizing_scale_factor)\n\n test_set = utils.DataLoader(\n test_strokes,\n eval_model_params.batch_size,\n max_seq_length=eval_model_params.max_seq_len,\n random_scale_factor=0.0,\n augment_stroke_prob=0.0)\n test_set.normalize(normalizing_scale_factor)\n tf.logging.info('normalizing_scale_factor %4.4f.', normalizing_scale_factor)\n result = [\n train_set, valid_set, test_set, model_params, eval_model_params,\n sample_model_params\n ]\n return result", "def coco_raw_data(data_path=None):\n train= _read_chars(os.path.join(data_path, \"train_caps.txt\"))\n val = _read_chars(os.path.join(data_path, \"dev_caps.txt\"))\n test = _read_chars(os.path.join(data_path, \"test_caps.txt\"))\n chars = set(train)\n id_2_word = dict(enumerate(chars))\n word_to_id = {i: w for w, i in id_2_word.items()}\n train_data = _file_to_word_ids(train, word_to_id)\n valid_data = _file_to_word_ids(val, word_to_id)\n test_data = _file_to_word_ids(test, word_to_id)\n return train_data, valid_data, test_data, word_to_id, id_2_word", "def load_data(\n self, file_path: str = os.path.join(os.getcwd(), \"data_breast_cancer.p\")\n ) -> None:\n with open(file_path, \"rb\") as file:\n data = pickle.load(file)\n self.x_train, self.y_train = data[\"x_train\"], data[\"y_train\"]\n self.x_test, self.y_test = data[\"x_test\"], data[\"y_test\"]", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def load_data(path, rng, epoch, batch_size, x_,y_):\n #global x_,t_,y_,\n #global first_report2 \n #first_report2 = True\n start_time = time()\n v,p,skeleton_feature,l = load_gzip(path)\n v = v[:,:,:res_shape[2]]\n res_shape[0] = v.shape[0]\n v_new = empty(res_shape,dtype=\"uint8\")\n\n for i in xrange(v.shape[0]): #batch\n if p[i] < 10: p[i] = 100\n ofs = p[i]*ratio\n mid = v.shape[-1]/2.\n sli = None\n if ofs < mid:\n start = int(round(mid-ofs))\n end = int(round(mid+ofs))\n sli = slice(start,end)\n\n for j in xrange(v.shape[2]): #maps\n for k in xrange(v.shape[3]): #frames\n #body\n img = v[i,0,j,k]\n img = cut_img(img,5)\n img = misc.imresize(img,(h,h))\n # if j==0: img = 255-misc.imfilter(img,\"contour\")\n v_new[i,0,j,k] = img\n\n #hand\n img = v[i,1,j,k]\n img = img[sli,sli]\n img = misc.imresize(img,(h,h))\n v_new[i,1,j,k] = img\n\n vid, lbl = v_new,l\n\n #if epoch==0: print \"get in\",str(time()-start_time)[:3]+\"s\",\n # shuffle data\n ind = rng.permutation(l.shape[0])\n ind = ind[:batch_size]\n vid = vid[:,:,:,:4,:,:]\n vid, skeleton_feature, lbl = vid[ind].astype(floatX), skeleton_feature[ind].astype(floatX),lbl[ind].astype(floatX)\n #vid, skeleton_feature, lbl = vid.astype(floatX), skeleton_feature.astype(floatX),lbl.astype(floatX)\n\n # vid = vid/(255./(scaler*2.))-scaler\n #traj = traj/(255./(scaler_traj*2.))-scaler_traj\n # traj = traj/(255./5.)\n\n # Wudi already made labels start from 0\n #lbl -= 1 \n\n #if first_report2:\n # print \"data range:\",vid.min(),vid.max()\n # print \"traj range:\",skeleton_feature.min(),skeleton_feature.max()\n # print \"lbl range:\",lbl.min(),lbl.max()\n # first_report2 = False\n\n # set value\n x_.set_value(vid, borrow=True)\n #t_.set_value(skeleton_feature, borrow=True)\n y_.set_value(lbl, borrow=True)", "def load_data():\n\n if 'data' not in os.listdir('.'):\n os.mkdir('data') \n \n if 'id_to_word.pkl' not in os.listdir('data'):\n print('Loading data...')\n (x_train, y_train), (x_val, y_val) = imdb.load_data(num_words=max_features, skip_top=20, index_from=3)\n word_to_id = imdb.get_word_index()\n word_to_id ={k:(v+3) for k,v in word_to_id.items()}\n word_to_id[\"<PAD>\"] = 0\n word_to_id[\"<START>\"] = 1\n word_to_id[\"<UNK>\"] = 2\n id_to_word = {value:key for key,value in word_to_id.items()}\n\n print(len(x_train), 'train sequences')\n print(len(x_val), 'test sequences')\n\n print('Pad sequences (samples x time)')\n x_train = sequence.pad_sequences(x_train, maxlen=maxlen)\n x_val = sequence.pad_sequences(x_val, maxlen=maxlen)\n y_train = np.eye(2)[y_train]\n y_val = np.eye(2)[y_val] \n\n np.save('./data/x_train.npy', x_train)\n np.save('./data/y_train.npy', y_train)\n np.save('./data/x_val.npy', x_val)\n np.save('./data/y_val.npy', y_val)\n with open('data/id_to_word.pkl','wb') as f:\n pickle.dump(id_to_word, f) \n\n else:\n x_train, y_train, x_val, y_val = np.load('data/x_train.npy'),np.load('data/y_train.npy'),np.load('data/x_val.npy'),np.load('data/y_val.npy')\n with open('data/id_to_word.pkl','rb') as f:\n id_to_word = pickle.load(f)\n\n return x_train, y_train, x_val, y_val, id_to_word", "def load_data_in_folder(self):\n if self.data_filenames:\n print('removing existing data files')\n for f in tqdm(self.data_filenames):\n os.remove(f)\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in tqdm(range(0, idx_max-1)):\n data = []\n for f in self.filenames[idx:idx+self.batch_size]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def load_data(\n path,\n shuffle=False,\n max_traces=None,\n max_trace_len=None,\n filter_by_len=True,\n return_idxs=False,\n verbose=True,\n):\n labels = []\n data = []\n idxs = []\n\n filenames = os.listdir(path)\n if shuffle:\n random.shuffle(filenames)\n else:\n filenames = sorted(filenames)\n\n num_traces = 0\n file_iter = filenames\n if verbose:\n prog_bar = tqdm(\n total=len(filenames) if max_traces is None else max_traces, ascii=True\n )\n file_iter = tqdm(filenames, ascii=True)\n\n for filename in file_iter:\n file_path = os.path.join(path, filename)\n if os.path.isfile(file_path):\n trace = load_cell_data(\n file_path, max_len=max_trace_len, filter_by_len=filter_by_len\n )\n\n # Trace might be None if it was longer than allowed.\n if trace is not None:\n label = 0 if \"-\" in str(filename) else 1\n data.append(trace)\n labels.append(label)\n idxs.append(filename)\n\n num_traces += 1\n if verbose:\n prog_bar.update(1)\n if max_traces is not None and num_traces >= max_traces:\n break\n\n labels = np.array(labels)\n data = np.array(data)\n idxs = np.array(idxs)\n if return_idxs:\n return idxs, data, labels\n else:\n return data, labels", "def load_data(path_to_data, raw_times):\n\n loaded_data = {}\n file_names = [\n \"fore_train_ip\",\n \"fore_valid_ip\",\n \"train_ip\",\n \"valid_ip\",\n \"test_ip\",\n \"fore_train_op\",\n \"fore_valid_op\",\n \"train_op\",\n \"valid_op\",\n \"test_op\",\n ]\n for key in file_names:\n with open(os.path.join(path_to_data, key + \".json\"), \"r\") as openfile:\n loaded_data[key] = json.load(openfile)\n fore_train_ip = [np.array(x) for x in loaded_data[\"fore_train_ip\"]]\n fore_valid_ip = [np.array(x) for x in loaded_data[\"fore_valid_ip\"]]\n train_ip = [np.array(x) for x in loaded_data[\"train_ip\"]]\n valid_ip = [np.array(x) for x in loaded_data[\"valid_ip\"]]\n test_ip = [np.array(x) for x in loaded_data[\"test_ip\"]]\n fore_train_op = np.array(loaded_data[\"fore_train_op\"])\n fore_valid_op = np.array(loaded_data[\"fore_valid_op\"])\n train_op = np.array(loaded_data[\"train_op\"])\n valid_op = np.array(loaded_data[\"valid_op\"])\n test_op = np.array(loaded_data[\"test_op\"])\n del loaded_data\n\n if not raw_times:\n # default is False, so times usually WILL be normalized\n # compute mean and variance of times in training set while ignoring padding\n missing_idx = fore_train_ip[3] == 0\n tmp_times = copy.deepcopy(fore_train_ip[1])\n tmp_times[missing_idx] = np.nan\n time_mean = np.nanmean(tmp_times)\n time_stddev = np.nanstd(tmp_times)\n tmp_times = (tmp_times - time_mean) / time_stddev\n tmp_times[missing_idx] = 0\n fore_train_ip = [\n fore_train_ip[0],\n tmp_times,\n fore_train_ip[2],\n fore_train_ip[3],\n ]\n\n # normalize val set times\n missing_idx = fore_valid_ip[3] == 0\n tmp_times = copy.deepcopy(fore_valid_ip[1])\n tmp_times[missing_idx] = np.nan\n tmp_times = (tmp_times - time_mean) / time_stddev\n tmp_times[missing_idx] = 0\n fore_valid_ip = [\n fore_valid_ip[0],\n tmp_times,\n fore_valid_ip[2],\n fore_valid_ip[3],\n ]\n\n # normalize labeled datasets\n for tmp_ip in [train_ip, valid_ip, test_ip]:\n missing_idx = tmp_ip[3] == 0\n tmp_times = copy.deepcopy(tmp_ip[1])\n tmp_times[missing_idx] = np.nan\n tmp_times = (tmp_times - time_mean) / time_stddev\n tmp_times[missing_idx] = 0\n tmp_ip[1] = tmp_times\n else:\n time_mean = time_stddev = None\n\n return (\n fore_train_ip,\n fore_train_op,\n fore_valid_ip,\n fore_valid_op,\n train_ip,\n train_op,\n valid_ip,\n valid_op,\n test_ip,\n test_op,\n time_mean,\n time_stddev,\n )", "def read_data_from_dataset(data_dir_path: str):\n data_list = []\n for fname in ['X_train', 'y_train', 'X_test', 'y_test']:\n with open(f'{data_dir_path}/{fname}.pkl', 'rb') as f:\n data = pickle.load(f)\n data_list.append(data)\n return tuple(data_list)", "def inputs(data_dir, batch_size,num_data_files=FLAGS.num_data_files):\n\n \n filenames = [os.path.join(data_dir, 'fc6pool4mask_batch_%d' % i)\n for i in xrange(1,num_data_files+1)]\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_record(filename_queue)\n\n # Subtract off the mean and divide by the variance of the pixels??\n \n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.1\n min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *\n min_fraction_of_examples_in_queue)\n\n #print(min_queue_examples)\n print ('Filling queue with %d bottlenecked inputs before starting to train. '\n 'This will take a few minutes.' % min_queue_examples)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return _generate_bottlenecked_batch(read_input.fc6, read_input.pool,read_input.mask,\n min_queue_examples, batch_size,\n shuffle=True)", "def get_train_examples(self, data_path):\r\n return self.create_examples(self.read_data(data_path), 'train')", "def _load_unlabeled(self, path):\n signal, info = wfdb.rdsamp(path)\n self.fs = 250\n self.lead_match = ['anonymous1', 'anonymous2']\n self.raw_data = np.transpose(np.array([signal]), (2, 0, 1))\n self.symbol = []\n self.coords = []\n self.label_name = None\n self._generate_beatlabel_from_estimation()", "def load_data(path, train=True):\n COLUMNS = ['utterance_ID', 'dialog_act', 'utterance_t-3', \n 'utterance_t-2', 'utterance_t-1', 'utterance_t']\n\n if not train:\n COLUMNS.remove('dialog_act')\n \n df = (pd.read_csv(path, sep='\\t|;', engine='python', names=COLUMNS)\n .set_index('utterance_ID')\n .astype(str))\n df[COLUMNS[2:]] = df[COLUMNS[2:]].apply(preprocess)\n return df", "def convert_data(DataPath, labeldict):\n\n inputlist, inputnamelist = ark_parser(DataPath, 'train.ark')\n \n label = []\n assert len(inputnamelist) == len(labeldict.keys())\n\n for name in inputnamelist:\n label.append(labeldict[name])\n\n convert_label_to_int(DataPath, '/48phone_char.map', label)\n\n with open('./train_data.pkl', 'wb') as train_data:\n pickle.dump(inputlist, train_data)", "def process_data(fileprefix=DEFAULT_FILE_PREFIX):\n\n # TODO wow this is uggo code\n FILE_PREFIX = fileprefix\n\n MAX_SAMP=1500\n\n # Get data from file\n tf_record_file_names = [join(FILE_PREFIX, f) for f in listdir(FILE_PREFIX) if isfile(join(FILE_PREFIX, f)) and 'tfrecord' in f]\n assert len(tf_record_file_names) > 0\n\n dataset_it = iter(tf.data.TFRecordDataset(tf_record_file_names, compression_type='').take(MAX_SAMP))\n\n # Run the computation !\n with tqdm_joblib(tqdm(desc=\"My calculation\", total=MAX_SAMP)) as progress_bar:\n results = Parallel(n_jobs=-1)(\n delayed(_process_single)(data) for data in dataset_it\n )", "def load_csv_data(data_path):\n print(\"LOADING CSV FILE FROM {}\".format(data_path))\n y = np.genfromtxt(data_path, delimiter=\",\", skip_header=1, dtype=str, usecols=[1])\n x = np.genfromtxt(data_path, delimiter=\",\", skip_header=1)\n ids = x[:, 0].astype(np.int)\n input_data = x[:, 2:]\n\n # convert class labels from strings to binary (-1,1)\n yb = np.ones(len(y))\n yb[np.where(y == 'b')] = -1\n\n return yb, input_data, ids", "def build_data_loader(txt_path, in_vocab_path, out_vocab_path,\n batch_size=1, drop_last=False, num_workers=0):\n dataset = PuncDataset(txt_path, in_vocab_path, out_vocab_path)\n batch_sampler = RandomBucketBatchSampler(dataset,\n batch_size=batch_size,\n drop_last=drop_last)\n collate_fn = TextAudioCollate()\n data_loader = DataLoader(dataset, batch_sampler=batch_sampler,\n collate_fn=collate_fn, num_workers=num_workers)\n return data_loader", "def __init__(self, path='data'):\r\n self.nb_data = 3\r\n self.path = path\r\n self.data_train_name = 'Xtr'\r\n self.data_test_name = 'Xte'\r\n self.features_name = '_mat100'\r\n self.label_train_name = 'Ytr'\r\n self.label_test_name = 'Ytr'\r\n # load raw data\r\n self.raw_data = {'train': self.load_data(self.data_train_name),\r\n 'test': self.load_data(self.data_test_name)}\r\n # load data features\r\n self.data_features = {'train': self.load_data(self.data_train_name, self.features_name, type_='features'),\r\n 'test': self.load_data(self.data_test_name, self.features_name, type_='features')}\r\n # load labels\r\n self.labels = {'train': self.load_data(self.label_train_name),\r\n 'test': self.load_data(self.label_test_name)}\r\n\r\n # toy data\r\n self.toy_data_functions = {\r\n 'blobs': blobs,\r\n 'two_moons': two_moons\r\n }\r\n self.toy_data = dict()", "def load_data_and_labels():\n # Load data from files\n positive_examples = []\n for file in os.listdir('with_datarace'):\n filename = os.fsdecode(file)\n ast_file = open('with_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n positive_examples.append(token_vector)\n file_names.append(filename)\n\n negative_examples = []\n for file in os.listdir('without_datarace\\\\'):\n filename = os.fsdecode(file)\n ast_file = open('without_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n negative_examples.append(token_vector) # List of lists\n file_names.append(filename)\n\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = [s.strip() for s in negative_examples]\n\n # Split by words\n x_text = positive_examples + negative_examples # why we didn't cobine it from the beginning?\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n\n return [x_text, y]", "def tokenize(self, path):\n assert os.path.exists(path)\n tokens = 0\n maxLen = 0\n # Find code path and create dictionary\n with open(path, 'r') as f:\n for i, line in enumerate(f):\n filename = line.strip()\n code_path = RAW_DATA_PATH + filename\n assert os.path.exists(code_path)\n try:\n with open(code_path, 'r') as code_f:\n code = code_f.read()\n if len(code) > 100000:\n continue\n kwargs = {'vocab':self.vocab}\n words = tokenizer.tokenize_wrapper(code, **kwargs)\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n except:\n pass\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n filename = line.strip()\n code_path = RAW_DATA_PATH + filename\n assert os.path.exists(code_path)\n try:\n with open(code_path, 'r') as code_f:\n code = code_f.read()\n if len(code) > 100000:\n continue\n kwargs = {'vocab':self.vocab}\n words = tokenizer.tokenize_wrapper(code, **kwargs)\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n self.tic_marks.append(len(words))\n except Exception as e:\n #raise e\n pass\n return ids", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def _read(self, path: str):\n num_samples, length = [int(x) for x in path.split(\":\")]\n random.seed(self.seed)\n for _ in range(num_samples):\n tokens, tags = self._sample(length)\n yield self.text_to_instance(tokens, tags)", "def preprocess(data_path, glove_path, embed_size):\n train_data = read_imdb(data_path, 'train')\n test_data = read_imdb(data_path, 'test')\n\n train_tokenized = []\n test_tokenized = []\n for review, _ in train_data:\n train_tokenized.append(tokenizer(review))\n for review, _ in test_data:\n test_tokenized.append(tokenizer(review))\n\n vocab = set(chain(*train_tokenized))\n vocab_size = len(vocab)\n print(\"vocab_size: \", vocab_size)\n\n word_to_idx = {word: i + 1 for i, word in enumerate(vocab)}\n word_to_idx['<unk>'] = 0\n\n train_features = np.array(pad_samples(encode_samples(train_tokenized, word_to_idx))).astype(np.int32)\n train_labels = np.array([score for _, score in train_data]).astype(np.int32)\n test_features = np.array(pad_samples(encode_samples(test_tokenized, word_to_idx))).astype(np.int32)\n test_labels = np.array([score for _, score in test_data]).astype(np.int32)\n\n weight_np = collect_weight(glove_path, vocab, word_to_idx, embed_size)\n return train_features, train_labels, test_features, test_labels, weight_np, vocab_size", "def prepare(self):\n\n # step 0: load only when not loaded yet\n if TRAINING in self.data \\\n and VALIDATION in self.data:\n return\n\n # step 1: load the file names\n file_list = sorted(glob.glob(self.location+\"*.mhd\"))\n # count the number of data points\n\n # make a stratified validation set\n # note, the seed decides the validation set, but it is deterministic in the names\n random.seed(317070)\n patient_names = [self.patient_name_from_file_name(f) for f in file_list]\n validation_patients = random.sample(patient_names, int(VALIDATION_SET_SIZE*len(patient_names)))\n\n # make the static data empty\n for s in self.datasets:\n self.data[s] = []\n self.labels[s] = []\n self.names[s] = []\n\n # load the filenames and put into the right dataset\n labels_as_dict = defaultdict(list)\n\n with open(paths.LUNA_LABELS_PATH, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(reader) # skip the header\n for row in reader:\n label = (float(row[1]), float(row[2]), float(row[3]), float(row[4]))\n labels_as_dict[str(row[0])].append(label)\n\n for patient_file in file_list:\n patient_name = self.patient_name_from_file_name(patient_file)\n\n if patient_name in validation_patients:\n s = VALIDATION\n else:\n s = TRAINING\n label = labels_as_dict[str(patient_name)]\n if self.only_positive and not label:\n continue\n self.data[s].append(patient_file)\n \n if self.pick_nodule:\n self.labels[s].append([random.choice(label)]) \n else:\n self.labels[s].append(label)\n \n \n self.names[s].append(patient_name)\n\n # give every patient a unique number\n last_index = -1\n for s in self.datasets:\n self.indices[s] = range(last_index+1,last_index+1+len(self.data[s]))\n if len(self.indices[s]) > 0:\n last_index = self.indices[s][-1]\n print s, len(self.indices[s]), \"samples\"", "def _load_single_data(self, dataset_path):\n dataset_file = os.path.join(dataset_path, 'corpus.txt')\n group_text_data = self._load_multi_data(dataset_file)\n self.group_text_data = split_data([text_data for text_data in group_text_data], self.split_ratio)", "def load_data(path_to):\n Y = np.load(path_to + \"Y.npy\")\n path_source_token_idxs = np.load(path_to + \"path_source_token_idxs.npy\")\n path_idxs = np.load(path_to + \"path_idxs.npy\")\n path_target_token_idxs = np.load(path_to + \"path_target_token_idxs.npy\")\n context_valid_masks = np.load(path_to + \"context_valid_masks.npy\")\n X = path_source_token_idxs, path_idxs, path_target_token_idxs, context_valid_masks\n\n return X, Y", "def load_data():\n\n dump_path = dump_base + '/micro_poi/mpoi_info/'\n\n assert os.path.exists(dump_path)\n\n dpath = dump_path + 'shortest_path.pickle'\n paths = joblib.load(dpath)\n\n dpath = dump_path + 'path_list.pickle'\n path_list = joblib.load(dpath)\n\n dpath = dump_path + 'gain.pickle'\n gain = joblib.load(dpath)\n\n dpath = dump_path + 'stay.pickle'\n stay_time = joblib.load(dpath)\n\n dpath = dump_path + 'reach.pickle'\n reach_time = joblib.load(dpath)\n\n spath = dump_base + '/micro_poi/model_params.list'\n model_params = np.loadtxt(spath)\n\n return np.array(paths), path_list, gain, stay_time, reach_time, model_params", "def load_python_data(path):\n data = []\n with codecs.open(path, encoding='UTF-8', mode='r') as fi:\n for line in fi:\n data.append(eval(line))\n return data", "def prep_data(self, num_processes):\n filenames = os.listdir(self.beatmaps_root)\n processes = []\n for i in range(num_processes):\n start = i * (len(filenames) // num_processes)\n end = None\n if i != num_processes - 1:\n end = (i + 1) * (len(filenames) // num_processes)\n else:\n end = len(filenames)\n processes.append(Process(target=self._prep_data_worker,\n args=(start, end, filenames)))\n for p in processes:\n p.start()\n for p in processes:\n p.join()" ]
[ "0.67146957", "0.6536038", "0.6505355", "0.6482044", "0.62835264", "0.62004197", "0.61886907", "0.6138994", "0.6122752", "0.6041423", "0.6030121", "0.5910836", "0.58546567", "0.5850749", "0.5726605", "0.5721663", "0.5707934", "0.5683992", "0.5656485", "0.5635938", "0.56157595", "0.55814546", "0.5576371", "0.5552021", "0.55249715", "0.55188847", "0.55072373", "0.5505226", "0.5488263", "0.54665494", "0.54597825", "0.54463786", "0.5421215", "0.5406557", "0.5383287", "0.53825504", "0.5372302", "0.536051", "0.53481174", "0.5342412", "0.5337283", "0.5337232", "0.53326696", "0.5332572", "0.53261006", "0.53186595", "0.5314862", "0.5270989", "0.5268719", "0.52651507", "0.52614933", "0.52558875", "0.5255647", "0.5255233", "0.5245169", "0.5233916", "0.52244", "0.5223971", "0.5221365", "0.5198569", "0.5195152", "0.51924086", "0.51912785", "0.51888716", "0.51831394", "0.5180424", "0.51786566", "0.51783204", "0.5166652", "0.516333", "0.5162036", "0.5152765", "0.51504177", "0.51483184", "0.51478815", "0.5135315", "0.5131913", "0.5130617", "0.51304793", "0.5121066", "0.51194865", "0.51145136", "0.51132977", "0.5106877", "0.5105901", "0.5105529", "0.5105428", "0.51054156", "0.5101773", "0.51013076", "0.50981027", "0.50948423", "0.5092632", "0.50920385", "0.5090154", "0.50884235", "0.50875926", "0.5085582", "0.5081013", "0.50785226" ]
0.6501877
3
Load raw data from data directory "data_path".
def europarl_raw_data( data_path='bigdata/training', lang1='de-en-german.txt', lang2='de-en-english.txt', max_train_len=32, train_size=1600000, val_size=160000, ): lang1_path = os.path.join(data_path, lang1) lang2_path = os.path.join(data_path, lang2) split_data = _train_val_test_split( [_read_lines(lang1_path), _read_lines(lang2_path)], train_size, val_size ) lang1_train, lang1_val, lang1_test = split_data[0] lang2_train, lang2_val, lang2_test = split_data[1] lang1_idx2word, lang1_word2idx = _build_vocab_from_sentences(lang1_train) lang2_idx2word, lang2_word2idx = _build_vocab_from_sentences(lang2_train) lang1_train_vectorized = _convert_sentences_to_ids( lang1_train, lang1_word2idx ) lang1_val_vectorized = _convert_sentences_to_ids( lang1_val, lang1_word2idx ) lang1_test_vectorized = _convert_sentences_to_ids( lang1_test, lang1_word2idx ) lang2_train_vectorized = _convert_sentences_to_ids( lang2_train, lang2_word2idx ) X_train, y_train = _convert_to_numpy_by_length( lang1_train_vectorized, lang2_train_vectorized, max_train_len, ) X_val = _convert_to_numpy(lang1_val_vectorized) X_test = _convert_to_numpy(lang1_test_vectorized) return { 'vocab': { 'lang1_idx2word': lang1_idx2word, 'lang1_word2idx': lang1_word2idx, 'lang2_idx2word': lang2_idx2word, 'lang2_word2idx': lang2_word2idx, }, 'train': { 'X': X_train, 'y': y_train, }, 'val': { 'X': X_val, 'y': lang2_val, }, 'test': { 'X': X_test, 'y': lang2_test, }, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_data(self, path):\n with open(self.path_to_file, \"r\") as f:\n data = f.read()\n\n return data", "def load_data(path):\n input_file = os.path.join(path)\n with open(input_file, 'r', encoding='utf-8') as f:\n return f.read()", "def load_data(path):\n with open(path) as f:\n return f.readlines()", "def load_data(path):\n ns = {}\n execfile(path, ns)\n return ns['data']", "def load_data(self, data):\n self._load_raw_data = data", "def get_data(path):\n root = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(root, 'data', path)", "def get_data(path):\n return os.path.join(_ROOT, 'data', path)", "def load_pkl_data(path):\n with open(path, 'rb') as fi:\n data = pickle.load(fi)\n return data", "def load_data(self) -> None:", "def read_data(self, file_path):\n raise NotImplementedError('should be overridden with specific data reader')", "def load_data(data_path):\n if '.csv' in data_path:\n return pd.read_csv(data_path, header=None)\n else:\n with open(data_path, 'r') as handle:\n data = [seq.strip() for seq in handle]\n return data", "def load_data(path_dataset):\n data = read_txt(path_dataset)[1:]\n return preprocess_data(data)", "def _get_data_file(self, data_path):\n\n return json.load(open(data_path))", "def load_data(path):\n try:\n data = pd.read_csv(path, sep='\\t')\n except FileNotFoundError:\n logger.exception(\"Traceback of data file '{}' not found.\".format(path))\n else:\n return data", "def load_data_file(path):\n with open(path, encoding='utf-8') as f:\n return json.load(f)", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_test = np.load(\"data/X_test.npy\")\n\t\t\t\tY_test = np.load(\"data/Y_test.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tX_test = np.zeros((10000,64,64,3))\n\t\t\t\tY_test = []\n\n\t\t\t\t\n\t\t\t\twith open(path, 'rb') as fo:\n\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\tY_test.extend(temp_element[b'labels'])\n\n\t\t\t\tfor j in range(10000):\n\t\t\t\t\tX_test[j] = self._reshape(temp_data[j])\n\n\t\t\t\tY_test = np.eye(10)[np.array(Y_test)]\n\t\t\t\t\n\t\t\t\tnp.save(\"./data/X_test\", X_test)\n\t\t\t\tnp.save(\"./data/Y_test\", Y_test)\n\n\t\t\t\tbreak\n\n\n\t\treturn X_test, Y_test", "def load(self, path):\n pass", "def load(self, path):\n pass", "def pickle_load(path):\n data = pickle.load(open(os.path.join(os.getcwd(), path), 'rb'))\n return data", "def load_training_data(file_path):\n return load_data(file_path)", "def loadRaw(self, path, preprocfunc=None):\n # Only for 8 and 32 bit images\n depth = self.getDepth()\n if depth==1:\n mamba.raiseExceptionOnError(mambaCore.ERR_BAD_DEPTH)\n \n # Loading the file\n f = file(path, 'rb')\n data = f.read()\n f.close()\n \n # Preprocessing the data if a function was given\n if preprocfunc:\n data = preprocfunc(data)\n \n # Verification over data size\n (w,h) = self.getSize()\n im_size = w*h*(depth/8)\n assert(len(data)==im_size*self.length)\n \n # Loading the data\n for i,im in enumerate(self.seq):\n err = mambaCore.MB_Load(im.mbIm, data[i*im_size:(i+1)*im_size], im_size)\n mamba.raiseExceptionOnError(err)\n self.name = path", "def load(path):\n pass", "def load_data(self):", "def load_CSV_data(path):\n return np.genfromtxt(os.path.join('data/traffic_data', path))", "def loadFromFile(self, path):\n\n if \"~\" in path:\n path = os.path.expanduser(path)\n f = open(path)\n body = f.read()\n f.close()\n self._path = path\n self.loadFromString(body)", "def load(self, path: str):\n pass", "def load_data(path=DEFAULT_PICKLE_FILE, pickle=True):\n if type(path) is list:\n dfs = [load_data(p, pickle='%s.pickle' % os.path.basename(p))\n for p in path]\n return pd.concat(dfs)\n elif type(path) is str:\n print(\"Loading\", path)\n if os.path.isfile(path):\n df = pd.read_pickle(path)\n else:\n df = parse_directory(path)\n dataset = basename(normpath(path))\n PICKLE_FILE = join(DATA_DIR, '%s.pickle' % dataset)\n if pickle:\n pickle_path = PICKLE_FILE\n if type(pickle) is str:\n pickle_path = pickle\n print(\"Pickling to\", pickle_path)\n df.to_pickle(pickle_path)\n return df", "def loadData(self,dataPath,resetChannelSelector=True):\r\n self.dataPath = dataPath\r\n self.loadFNIRS(dataPath)\r\n if resetChannelSelector:# Remove all dataplayers and import new channel configuration\r\n self.deleteAllDataplayers()\r\n self.channelSelector.loadData(dataPath)\r\n for dp in self.dataPlayers:\r\n dp.loadData()\r\n dp.draw()", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def _from_file(self, path):\r\n\r\n file = pathlib.Path(path)\r\n log.info(\"Extracting data from file: {}\".format(file))\r\n\r\n with open(file, 'r') as f:\r\n lines = f.readlines()\r\n\r\n self._extract_raw_data(lines)\r\n\r\n return", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_train = np.load(\"data/X_train.npy\")\n\t\t\t\tX_val = np.load(\"data/X_val.npy\")\n\t\t\t\tY_train = np.load(\"data/Y_train.npy\")\n\t\t\t\tY_val = np.load(\"data/Y_val.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tdata_temp = np.zeros((50000,64,64,3))\n\t\t\t\tlabel_temp = []\n\n\t\t\t\tfor i in range(5):\n\n\t\t\t\t\tfile = path + str(i+1)\n\t\t\t\t\twith open(file, 'rb') as fo:\n\t\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\t\tlabel_temp.extend(temp_element[b'labels'])\n\n\t\t\t\t\tfor j in range(10000):\n\t\t\t\t\t\tdata_temp[j+(i*10000)] = self._reshape(temp_data[j])\n\n\t\t\t\tlabel_temp = np.eye(10)[np.array(label_temp)]\n\n\t\t\t\tnp.random.seed(123)\n\t\t\t\tpermutations = list(np.random.permutation(50000))\n\t\t\t\tX = data_temp[permutations, :, : , :] \n\t\t\t\tY = label_temp[permutations, :]\n\t\t\t\tX_train = X[0:40000, :, :, :] \n\t\t\t\tY_train = Y[0:40000, :]\n\t\t\t\tX_val = X[40000:50000, :, :, :] \n\t\t\t\tY_val = Y[40000:50000, :]\n\n\t\t\t\tnp.save(\"./data/X_train\", X_train)\n\t\t\t\tnp.save(\"./data/X_val\", X_val)\n\t\t\t\tnp.save(\"./data/Y_train\", Y_train)\n\t\t\t\tnp.save(\"./data/Y_val\", Y_val)\n\t\t\t\tbreak\n\n\t\treturn X_train, X_val, Y_train, Y_val", "def _read_data(self, path: str) -> T:\n raise NotImplementedError", "def download_and_load(self, data_path=None):\n if data_path is None:\n data_path = 'data'\n\n if not self.check_files(data_path + '/cifar-10-batches-py'):\n self.download_and_extract(data_path=data_path)\n\n self.load_cifar10_data(data_path=data_path + '/cifar-10-batches-py')", "def load(self, path):\n self.df = pd.read_csv(path)\n print(\"Loaded data from {}\".format(path))", "def _load_data(self, imagepath):\n im = cv2.imread(imagepath)\n self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)", "def __init__(self, data_path=root.joinpath(\"data\")):\n self.data_path = data_path", "def _load_dataset(self, data_path, augmentation, batch_size):\n if path.split(data_path)[1] == \"\":\n # Deal with edge case where there's a \"/\" at the end of the path.\n data_path = path.split(data_path)[0]\n\n if path.split(data_path)[1].endswith(\"training\"):\n dataset_name = \"training dataset\"\n else:\n dataset_name = \"validation dataset\"\n\n start_time = time.time()\n self._update_status(\"Loading {}.\".format(dataset_name))\n\n\n dataset = MapillaryDataset(data_path, augmentation, self.iaa)\n data_loader = DataLoader(dataset,\n batch_size,\n shuffle=True)\n\n self._update_status(\"{} loaded. ({} ms)\".format(\n dataset_name.capitalize(),\n int((time.time() - start_time) * 1000)))\n\n return data_loader", "def load_data_filenames_from_path(path: str) -> List[str]:\n return load_filenames_from_path(path, extension='.bin')", "def data_file(self, path):\n return open(os.path.join(self.resource_path, path)).read()", "def load_data(\n self, file_path: str = os.path.join(os.getcwd(), \"data_breast_cancer.p\")\n ) -> None:\n with open(file_path, \"rb\") as file:\n data = pickle.load(file)\n self.x_train, self.y_train = data[\"x_train\"], data[\"y_train\"]\n self.x_test, self.y_test = data[\"x_test\"], data[\"y_test\"]", "def load(self, path):\n\n missing_files = self.check_for_missing_files(path)\n\n if missing_files is not None:\n raise IOError('Invalid dataset of type {}: files {} not found at {}'.format(self.type(), ' '.join(missing_files), path))\n\n loading_dataset = dataset.Dataset(path, loader=self)\n\n self._load(loading_dataset)\n\n return loading_dataset", "def read_data_from_dataset(data_dir_path: str):\n data_list = []\n for fname in ['X_train', 'y_train', 'X_test', 'y_test']:\n with open(f'{data_dir_path}/{fname}.pkl', 'rb') as f:\n data = pickle.load(f)\n data_list.append(data)\n return tuple(data_list)", "def load_data(file_path):\n data = pandas.read_csv(file_path)\n\n return data", "def load_data(path):\n # Training Images Details\n IMG_SIZE = 224 # Size of images used for training\n IMG_MEAN = [0.485, 0.456, 0.406] # image normalization mean\n IMG_SDEV = [0.229, 0.224, 0.225] # image normalization standard deviation\n\n # Training phases\n phases = ['train', 'valid', 'test']\n\n # Define data locations\n data_dir = {n: path + n for n in phases}\n\n # Define transforms for the training, validation, and testing sets\n data_transforms = {\n 'train':\n transforms.Compose([\n transforms.RandomRotation(30),\n transforms.RandomResizedCrop(IMG_SIZE),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.ToTensor(),\n transforms.Normalize(IMG_MEAN, IMG_SDEV)]),\n 'valid':\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(IMG_SIZE),\n transforms.ToTensor(),\n transforms.Normalize(IMG_MEAN, IMG_SDEV)]),\n 'test':\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(IMG_SIZE),\n transforms.ToTensor(),\n transforms.Normalize(IMG_MEAN, IMG_SDEV)])\n }\n\n # Load the datasets\n image_datasets = {n: datasets.ImageFolder(\n data_dir[n], transform=data_transforms[n])\n for n in phases}\n\n # Create the PyTorch dataloaders\n dataloaders = {n: torch.utils.data.DataLoader(\n image_datasets[n], batch_size=64, shuffle=True)\n for n in phases}\n\n # mapping of classes to training indices\n class_to_idx = image_datasets['train'].class_to_idx\n\n return dataloaders, class_to_idx", "def _loadData(self, data):\n Clip._loadData(self, data)\n PlexSession._loadData(self, data)", "def _read_dataset(self, dataset_path):\n dataset = pd.read_pickle(dataset_path)\n return dataset", "def _load_data(self):\n path = os.path.join(self._cache_path, '%s.data' % self._name)\n\n if not os.path.exists(path):\n raise IOError('Data cache missing at %s' % path)\n\n f = bz2.BZ2File(path)\n data = pickle.loads(f.read())\n f.close()\n\n return data", "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)", "def load_raw_data(path: str) -> pd.DataFrame:\n data = []\n with open(path) as file:\n for line in file:\n data.append(line)\n data_df = pd.DataFrame(data, columns = {'tweet'})\n return data_df", "def load_data(path='./data/train'):\n print(\"Loading IMDB Data...\")\n data = []\n\n dir = os.path.dirname(__file__)\n file_list = glob.glob(os.path.join(dir, path + '/pos/*'))\n file_list.extend(glob.glob(os.path.join(dir, path + '/neg/*')))\n print(\"Parsing %s files\" % len(file_list))\n for i, f in enumerate(file_list):\n with open(f, \"r\", encoding=\"utf8\") as openf:\n s = openf.read()\n data.append(imp.preprocess(s)) # NOTE: Preprocessing code called here on all reviews\n return data", "def load_data(self):\n raise NotImplementedError()", "def load_data(data_path, input_shape):\n # load the original data.\n orig_data = pickle.load(open(data_path, 'rb'), encoding='iso-8859-1')\n\n # Get the set of snr & modulations\n mode_snr = list(orig_data.keys())\n mods, snrs = [sorted(list(set(x[i] for x in mode_snr))) for i in [0, 1]]\n mods.remove('AM-SSB')\n mods.remove('WBFM')\n mods.remove('8PSK')\n mods.remove('BPSK')\n\n # Build the train set.\n samples = []\n labels = []\n samples_snr = []\n mod2cate = dict()\n cate2mod = dict()\n for cate in range(len(mods)):\n cate2mod[cate] = mods[cate]\n mod2cate[mods[cate]] = cate\n\n for snr in snrs:\n for mod in mods:\n samples.extend(orig_data[(mod, snr)])\n labels.extend(1000 * [mod2cate[mod]])\n samples_snr.extend(1000 * [snr])\n\n shape = [len(labels), height, width, 1]\n samples = np.array(samples).reshape(shape)\n samples_snr = np.array(samples_snr)\n labels = np.array(labels)\n return samples, labels, mod2cate, cate2mod, snrs, mods, samples_snr", "def load(self, path):\n\n # Extract files to temporary directory and load content\n with TemporaryDirectory() as directory:\n # Unpack files\n archive = ArchiveFactory.create(directory)\n archive.load(path, \"tar\")\n\n # Load graph backend\n self.loadgraph(f\"{directory}/graph\")\n\n # Load categories, if necessary\n path = f\"{directory}/categories\"\n if os.path.exists(path):\n with open(path, \"rb\") as handle:\n self.categories = pickle.load(handle)\n\n # Load topics, if necessary\n path = f\"{directory}/topics\"\n if os.path.exists(path):\n with open(path, \"rb\") as handle:\n self.topics = pickle.load(handle)", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def loadData(path):\n try:\n return pd.read_csv(path)\n except Exception as e:\n raise Exception(\"Could not read df, possbily incorrect path: {}\".format(e))", "def load_python_data(path):\n data = []\n with codecs.open(path, encoding='UTF-8', mode='r') as fi:\n for line in fi:\n data.append(eval(line))\n return data", "def load_data(filepath):\n with open(filepath, 'r') as f:\n data = f.read()\n return data", "def load_data(self, data_path):\n data = []\n with open(data_path, \"r\") as f:\n data = [line.split(\"\\t\") for line in f if len(line.strip()) > 0 and\n line.strip()[0] != '#']\n return data", "def load_data():\n with open('data.pickle', 'rb') as f:\n data = pickle.load(f)\n return data", "def load_data_from_files(self):\n # separated method to allow mock easier\n logger.info(\"Loading data...\")\n parent = Path(__file__).parent\n path = parent / \"resources\" / \"scores.txt\"\n self.scorer.load_from_file(path)\n path = parent / \"resources\" / \"american-english-large.txt\"\n self.trie.load_from_file(path)\n path = parent / \"resources\" / \"reels.txt\"\n self.reels = Reel.get_from_file(path)\n logger.info(\"Data loaded!\")", "def load_data(path='mnist.npz'):\n origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'\n path = get_file(\n path,\n origin=origin_folder + 'mnist.npz',\n file_hash=\n '731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1')\n print('############################################' + path) \n with np.load(path, allow_pickle=True) as f: # pylint: disable=unexpected-keyword-arg\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n\n return (x_train, y_train), (x_test, y_test)", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n return data", "def read_and_save_raw_data(dataPath, filename):\n # Read in Raw data\n datafileNm = os.path.join(dataPath, 'nyt-ingredients-snapshot-2015.csv')\n nytData = pd.read_csv(datafileNm, index_col=None)\n nytData.drop(columns='index', inplace=True)\n\n # Generate training data from NY Times Ingredient Tagging Model\n cleaned_dat = reshape_data(Cli(nytData).df)\n cleaned_dat.to_pickle(os.path.join(dataPath, filename))", "def load_dataset(file_path):\n return Dataset.load(file_path)", "def _load_data(path, sep=sep):\n filename, file_extension = os.path.splitext(path)\n\n if file_extension in [\".csv\", \".tsv\"]:\n df = pd.read_csv(path, index_col=0, sep=sep)\n else:\n df = pd.read_excel(path, index_col=0)\n\n return df", "def changeDataPath(self,path):\n self.dataPath = path", "def __init__(self,path):\n self.path = path\n self.data = {}\n self.hasChanged = False\n #--Load\n if os.path.exists(self.path):\n ins = open(self.path)\n inData = compat.uncpickle(ins)\n self.data.update(inData)", "def load_data(path_to_data, raw_times):\n\n loaded_data = {}\n file_names = [\n \"fore_train_ip\",\n \"fore_valid_ip\",\n \"train_ip\",\n \"valid_ip\",\n \"test_ip\",\n \"fore_train_op\",\n \"fore_valid_op\",\n \"train_op\",\n \"valid_op\",\n \"test_op\",\n ]\n for key in file_names:\n with open(os.path.join(path_to_data, key + \".json\"), \"r\") as openfile:\n loaded_data[key] = json.load(openfile)\n fore_train_ip = [np.array(x) for x in loaded_data[\"fore_train_ip\"]]\n fore_valid_ip = [np.array(x) for x in loaded_data[\"fore_valid_ip\"]]\n train_ip = [np.array(x) for x in loaded_data[\"train_ip\"]]\n valid_ip = [np.array(x) for x in loaded_data[\"valid_ip\"]]\n test_ip = [np.array(x) for x in loaded_data[\"test_ip\"]]\n fore_train_op = np.array(loaded_data[\"fore_train_op\"])\n fore_valid_op = np.array(loaded_data[\"fore_valid_op\"])\n train_op = np.array(loaded_data[\"train_op\"])\n valid_op = np.array(loaded_data[\"valid_op\"])\n test_op = np.array(loaded_data[\"test_op\"])\n del loaded_data\n\n if not raw_times:\n # default is False, so times usually WILL be normalized\n # compute mean and variance of times in training set while ignoring padding\n missing_idx = fore_train_ip[3] == 0\n tmp_times = copy.deepcopy(fore_train_ip[1])\n tmp_times[missing_idx] = np.nan\n time_mean = np.nanmean(tmp_times)\n time_stddev = np.nanstd(tmp_times)\n tmp_times = (tmp_times - time_mean) / time_stddev\n tmp_times[missing_idx] = 0\n fore_train_ip = [\n fore_train_ip[0],\n tmp_times,\n fore_train_ip[2],\n fore_train_ip[3],\n ]\n\n # normalize val set times\n missing_idx = fore_valid_ip[3] == 0\n tmp_times = copy.deepcopy(fore_valid_ip[1])\n tmp_times[missing_idx] = np.nan\n tmp_times = (tmp_times - time_mean) / time_stddev\n tmp_times[missing_idx] = 0\n fore_valid_ip = [\n fore_valid_ip[0],\n tmp_times,\n fore_valid_ip[2],\n fore_valid_ip[3],\n ]\n\n # normalize labeled datasets\n for tmp_ip in [train_ip, valid_ip, test_ip]:\n missing_idx = tmp_ip[3] == 0\n tmp_times = copy.deepcopy(tmp_ip[1])\n tmp_times[missing_idx] = np.nan\n tmp_times = (tmp_times - time_mean) / time_stddev\n tmp_times[missing_idx] = 0\n tmp_ip[1] = tmp_times\n else:\n time_mean = time_stddev = None\n\n return (\n fore_train_ip,\n fore_train_op,\n fore_valid_ip,\n fore_valid_op,\n train_ip,\n train_op,\n valid_ip,\n valid_op,\n test_ip,\n test_op,\n time_mean,\n time_stddev,\n )", "def load(self, path):\n with open(path, \"rt\") as open_file:\n data = json.load(open_file)\n return data", "def read_data(self, content_path):\n\n if not os.path.basename(content_path).endswith(\".dat\"):\n raise ValueError(\"this content path is not a data file\")\n\n try:\n # read binary data\n data = self._zip_file.read(content_path)\n\n # decode using big-endian integer\n result = []\n for i in range(int(len(data) / 4)):\n result.append(unpack('!i', data[i * 4:(i + 1) * 4]))\n\n # returning integer-encoded raw data vector\n return np.array(result)\n except IOError:\n print(\"can't read data file\")", "def loaddata(path):\n if path.endswith(\".tiff\") or path.endswith(\".tif\"):\n try:\n from vigra.impex import readVolume\n except ImportError:\n raise ImportError(\"Vigra is needed to read/write TIFF volumes, but could not be imported.\")\n\n volume = readVolume(path)\n return volume\n\n elif path.endswith(\".h5\"):\n try:\n from Antipasti.netdatautils import fromh5\n except ImportError:\n raise ImportError(\"h5py is needed to read/write HDF5 volumes, but could not be imported.\")\n\n volume = fromh5(path)\n return volume\n\n else:\n raise NotImplementedError(\"Can't load: unsupported format. Supported formats are .tiff and .h5\")", "def load_binarized(path, params):\n assert path.endswith('.pth')\n assert os.path.isfile(path), path\n logger.info(\"Loading dataOld from %s ...\" % path)\n data = torch.load(path)\n # dataOld = process_binarized(dataOld, params)\n return data", "def load_data(path):\n with open(path) as f:\n content = f.readlines()\n\n data = []\n for i, row in enumerate(content[1:]):\n fields = row.split(';')\n time = fields[1].split(':')\n time = 60*float(time[0]) + float(time[1]) + np.random.rand()\n try:\n data.append(np.array([float(v) for v in fields[2:]] + [time]))\n except ValueError:\n pass\n\n data = np.array(data)\n return data", "def load(self, path):\n self.q = np.load(path)", "def loadData(dataPath):\n\n if not os.path.isdir(dataPath):\n api = KaggleApi()\n api.authenticate()\n api.dataset_download_files(\n dataset='sumanthvrao/daily-climate-time-series-data',\n path=dataPath,\n quiet=True,\n unzip=True\n )\n\n filepath1 = os.path.join(dataPath, 'DailyDelhiClimateTrain.csv')\n df1 = DatasetUtility.sortByDate(pd.read_csv(filepath1, header='infer'))\n\n filepath2 = os.path.join(dataPath, 'DailyDelhiClimateTest.csv')\n df2 = DatasetUtility.sortByDate(pd.read_csv(filepath2, header='infer'))\n\n return pd.concat([df1, df2], axis=0, ignore_index=True)", "def json_data_loader(path):\n res = open(path, 'r').read()\n logging.info(\"Loading file using a pyspark.read.json\")\n data_rdd = Spark.instance.sc().parallelize([res])\n return Spark.instance.spark().read.json(data_rdd)", "def load_data(path):\n train = pd.read_csv(os.path.join(path,'train.csv'))\n test = pd.read_csv(os.path.join(path,'test.csv'))\n \n return train, test", "def __load_raw_data(path: str,\n filename: str):\n filepath = os.path.join(path, filename)\n f = open(filepath)\n data = f.read()\n f.close()\n\n lines = data.split('\\n')\n header = lines[0].split(',')\n lines = lines[1:]\n\n float_data = np.zeros((len(lines), len(header) - 1))\n for i, line in enumerate(lines):\n values = [float(x) for x in line.split(',')[1:]]\n float_data[i, :] = values\n\n return float_data", "def load_data(self, cpath):\n print \"Loading data: %s\" %(time.strftime(ISOTIMEFORMAT, time.localtime()))\n self.click = np.loadtxt(cpath, dtype=np.str, delimiter='|')\n self.click[:, 3] = np.array([time.localtime(float(x))[3] if x.count('.') == 0 else 0 for x in self.click[:, 3]])\n self.click = self.click[:, indices]", "def text8_raw_data(data_path=None):\n text8 = _read_chars(data_path)\n train = text8[:int(9e7)]\n val = text8[int(9e7):int(95e6)]\n test = text8[int(95e6):]\n word_to_id, id_2_word = _build_vocab(train)\n train_data = _file_to_word_ids(train, word_to_id)\n valid_data = _file_to_word_ids(val, word_to_id)\n test_data = _file_to_word_ids(test, word_to_id)\n return train_data, valid_data, test_data, word_to_id, id_2_word", "def load_datamodule(cls, path: Union[str, Path]):\n if isinstance(path, str):\n path = Path(path)\n if not path.exists():\n raise FileNotFoundError(f\"{path} does not exist.\")\n datamodule = joblib.load(path)\n return datamodule", "def load(path, reset=False):\n pass", "def load_data_pickle(path):\n with open(path, 'rb') as f:\n documents = pickle.load(f, encoding=\"bytes\")\n print(\"Loaded: {}\".format(path))\n\n return documents", "def load_pickle(path):\n with open(path, 'rb') as f:\n data = pickle.load(f)\n return data", "def load(self, path):\n if isinstance(path, str) and path.startswith(\"s3://\"):\n raise NotImplementedError(\"TODO: Implement loading from s3\")\n \n # Make a folder to save things into\n path = Path(path).resolve()\n \n if path.suffix == \".zip\":\n folder = path.parent / f\"tmp_{path.stem}\"\n folder.mkdir(parents=True, exist_ok=False)\n \n zf = ZipFile(path, mode='r')\n zf.extractall(folder)\n else:\n folder = path\n \n # Load the trace - ugly hack needed due to multiindex not being supported yet\n ifd = az.from_netcdf(folder / \"ifd.nc\")\n ifd.constant_data = ifd.constant_data.set_index({\"idx\": id_cols})\n ifd.observed_data = ifd.observed_data.set_index({\"idx\": id_cols})\n ifd.prior_predictive = ifd.prior_predictive.set_index({\"idx\": id_cols})\n ifd.posterior_predictive = ifd.posterior_predictive.set_index({\"idx\": id_cols})\n ifd.log_likelihood = ifd.log_likelihood.set_index({\"idx\": id_cols})\n self.ifd_ = ifd\n \n # Load other data\n self.C_mean_ = pd.read_csv(folder / \"C_mean_.csv\", index_col=0)[\"C_mean_\"]\n self.C_std_ = pd.read_csv(folder / \"C_std_.csv\", index_col=0)[\"C_std_\"]\n \n # Ideally we would save parameters as YAML or something, but no time\n if path.suffix == \".zip\":\n shutil.rmtree(folder)", "def load_data(self):\n try:\n self.manager.load()\n except error:\n show_error_message(title='Initialization error!',\n message='File lords.sdb was not found!')\n else:\n self.update_widgets_values()", "def load_data(data_config):\n return tfds.load(data_config.path, with_info=data_config.load_with_info)", "def load_data(data_config):\n return tfds.load(data_config.path, with_info=data_config.load_with_info)", "def load_data(\n path: str,\n) -> Union[np.ndarray, sparse.csr.csr_matrix]:\n if osp.splitext(path)[-1] == \".npy\":\n print(\"Assuming sparse matrix...\")\n X_raw = np.load(path, allow_pickle=True)\n X_raw = X_raw.item()\n elif osp.splitext(path)[-1] == \".csv\":\n X_raw = np.loadtxt(path, delimiter=\",\")\n elif osp.splitext(path)[-1] == \".h5ad\":\n adata = sc.read_h5ad(path)\n X_raw = utils.get_adata_asarray(adata=adata)\n elif osp.splitext(path)[-1] == \".loom\":\n adata = sc.read_loom(path)\n X_raw = utils.get_adata_asarray(adata=adata)\n else:\n raise ValueError(\n \"unrecognized file type %s for counts\" % osp.splitext(path)[-1]\n )\n\n return X_raw", "def load_data(root=dataset_dir):\n assert(os.path.exists(root))\n if not os.path.exists(save_file):\n init_data(root)\n with open(save_file, \"rb\") as f:\n dataset = pickle.load(f)\n return dataset", "def _load_data(self):\n\n # This allows a simulated dataset to use the same constructor.\n if self.input_file is None:\n return\n\n logging.info(f\"Loading data from file {self.input_file}\")\n\n # Load the dataset.\n if os.path.isdir(self.input_file):\n self.data = get_matrix_from_mtx(self.input_file)\n else:\n self.data = get_matrix_from_h5(self.input_file)", "def load_data(self):\n\t\ti = 0\n\n\t\tpaths = glob.glob(self.file_path+'/rollout_*')\n\t\tself.rollouts = []\n\n\n\t\tfor path in paths:\n\t\t\tdata_point = np.load(path,encoding='latin1')\n\t\t\tself.rollouts.append(data_point)\n\n\t\treturn paths", "def setDataPath(_path_data_bundle, _path_bin_data, preload=True, verbose=True):\n global path_bin_data\n global path_data_bundle\n path_data_bundle = _path_data_bundle\n path_bin_data = _path_bin_data\n if preload:\n loadExistent(verbose)", "def load_data_from_fold(data_path):\r\n print(\"\\nLoading data from json folder {}\".format(data_path))\r\n\r\n SAMPLES_TO_CONSIDER = 22050\r\n\r\n data = preprocess_dataset(data_path, SAMPLES_TO_CONSIDER)\r\n\r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y", "def _load_static_data(module_path):\n equi7_data = None\n fname = os.path.join(os.path.dirname(module_path), \"data\", \"equi7grid.dat\")\n with open(fname, \"rb\") as f:\n equi7_data = pickle.load(f)\n return equi7_data", "def get_raw(path_data=path_data):\n folder = path.join(path_data, CIFAR10H.base_folder_cifarh, \"data\")\n file = path.join(\n folder,\n \"cifar10h-raw.csv\",\n )\n if not path.isfile(file):\n with zipfile.ZipFile(\n path.join(\n folder,\n \"cifar10h-raw.zip\",\n ),\n \"r\",\n ) as zip_ref:\n zip_ref.extractall(folder)\n df = pd.read_csv(file)\n return df", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def _load_data(self):\n\n if not self._cache.exists(config.DATAFRAME_SONG_DATA):\n source_path = os.path.join(config.S3_SONG_DATA, 'A/A/A/*.json') # Note: song database is way big, so we get only a slice of it.\n dataframe = self._get_spark_session().read.json(source_path)\n self._cache.set_source(config.DATAFRAME_SONG_DATA, dataframe)", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def _loadData(self, data):\n Clip._loadData(self, data)\n PlexHistory._loadData(self, data)" ]
[ "0.7591733", "0.7125923", "0.70399314", "0.70078355", "0.69444233", "0.6751344", "0.67399246", "0.6689886", "0.6673148", "0.66665107", "0.66659683", "0.6657956", "0.6638113", "0.654055", "0.64056915", "0.63831306", "0.6358966", "0.6358966", "0.63364625", "0.6292081", "0.62728083", "0.62694573", "0.6243247", "0.6221181", "0.6215229", "0.6214256", "0.62113607", "0.61864364", "0.6186269", "0.6186269", "0.6181808", "0.6174439", "0.6163494", "0.6161362", "0.61609805", "0.6145942", "0.61373645", "0.6129515", "0.6127996", "0.61149466", "0.6096118", "0.6086653", "0.60820854", "0.6071469", "0.606466", "0.6060999", "0.60601586", "0.6052799", "0.60447556", "0.6043322", "0.6039579", "0.6034171", "0.6032088", "0.60239434", "0.60154873", "0.6009859", "0.60043216", "0.60009366", "0.5995813", "0.5992812", "0.5985529", "0.5981116", "0.59697354", "0.5968292", "0.5967471", "0.5950496", "0.5948508", "0.5945634", "0.5938311", "0.5931439", "0.5918855", "0.5915558", "0.5913087", "0.59056294", "0.59022015", "0.58993876", "0.58960575", "0.5890205", "0.588929", "0.58798176", "0.58772886", "0.58705044", "0.58696014", "0.5865556", "0.58585554", "0.58557755", "0.58448935", "0.58373666", "0.58373666", "0.58365196", "0.5829604", "0.5827436", "0.58205116", "0.5816756", "0.5807458", "0.5804753", "0.5799527", "0.57903874", "0.5789583", "0.5782676", "0.57816666" ]
0.0
-1
Iterate on the raw PTB data. This chunks up raw_data into batches of examples and returns Tensors that are drawn from these batches.
def ptb_producer(raw_data, batch_size, num_steps, name=None): with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]): raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32) data_len = tf.size(raw_data) batch_len = data_len // batch_size data = tf.reshape(raw_data[0 : batch_size * batch_len], [batch_size, batch_len]) epoch_size = (batch_len - 1) // num_steps assertion = tf.assert_positive( epoch_size, message="epoch_size == 0, decrease batch_size or num_steps") with tf.control_dependencies([assertion]): epoch_size = tf.identity(epoch_size, name="epoch_size") i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue() x = tf.strided_slice(data, [0, i * num_steps], [batch_size, (i + 1) * num_steps]) x.set_shape([batch_size, num_steps]) y = tf.strided_slice(data, [0, i * num_steps + 1], [batch_size, (i + 1) * num_steps + 1]) y.set_shape([batch_size, num_steps]) return x, y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ptb_iterator(raw_data, batch_size, num_steps):\n raw_data = np.array(raw_data, dtype=np.int32)\n\n data_len = len(raw_data)\n batch_len = data_len // batch_size\n data = np.zeros([batch_size, batch_len], dtype=np.int32)\n for i in range(batch_size):\n data[i] = raw_data[batch_len * i:batch_len * (i + 1)]\n\n epoch_size = (batch_len - 1) // num_steps\n\n if epoch_size == 0:\n raise ValueError(\"epoch_size == 0, decrease batch_size or num_steps\")\n\n for i in range(epoch_size):\n x = data[:, i*num_steps:(i+1)*num_steps]\n y = data[:, i*num_steps+1:(i+1)*num_steps+1]\n yield (x, y)", "def ptb_iterator(raw_data, batch_size, num_steps, augment=False):\n raw_data = np.array(raw_data, dtype=np.int32)\n\n if augment:\n # https://github.com/cooijmanstim/recurrent-batch-normalization/blob/master/penntreebank.py#L93\n offset = np.random.randint(num_steps)\n raw_data = raw_data[offset:]\n\n data_len = len(raw_data)\n batch_len = data_len // batch_size\n data = np.zeros([batch_size, batch_len], dtype=np.int32)\n for i in range(batch_size):\n data[i] = raw_data[batch_len * i:batch_len * (i + 1)]\n\n\n epoch_size = (batch_len - 1) // num_steps\n\n if epoch_size == 0:\n raise ValueError(\"epoch_size == 0, decrease batch_size or num_steps\")\n\n for i in range(epoch_size):\n x = data[:, i*num_steps:(i+1)*num_steps]\n y = data[:, i*num_steps+1:(i+1)*num_steps+1]\n yield (x, y)", "def get_data(self):\n if self.random_seeds: \n self._validate_random_seeds()\n seed_iter = list(map(iter,self.random_seeds))\n nsamples = len(self.random_seeds[0])\n else:\n seed_iter = None\n nsamples = self.numsamples\n self._set_meta_features()\n for _ in tqdm(range(nsamples)):\n self._update_meta_features(seed_iter)\n self._sample()\n yield self._extract_features()", "def example_generator(data_path, single_pass, device_rank,data_as_tf_example=True):\n\t\t\n\trandom.seed(device_rank+1)\n\tif data_as_tf_example:\n\t\tepoch = 0\n\t\twhile True:\n\t\t\tfilelist = glob.glob(data_path) # get the list of datafiles\n\t\t\tassert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty\n\t\t\tif single_pass:\n\t\t\t\tfilelist = sorted(filelist)\n\t\t\telse:\n\t\t\t\trandom.shuffle(filelist)\n\t\t\t#tf.logging.info(filelist)\t\t\t\t\n\t\t\tfor file_no, f in enumerate(filelist):\n\t\t\t\treader = open(f, 'rb')\n\t\t\t\tall_examples = []\n\t\t\t\twhile True:\n\t\t\t\t\tlen_bytes = reader.read(8)\n\t\t\t\t\tif not len_bytes: \n\t\t\t\t\t\tif not single_pass:\n\t\t\t\t\t\t\trandom.shuffle(all_examples)\n\t\t\t\t\t\tfor k in all_examples:\n\t\t\t\t\t\t\tyield example_pb2.Example.FromString(k), epoch\n\t\t\t\t\t\tbreak # finished reading this file\n\t\t\t\t\t\n\t\t\t\t\tstr_len = struct.unpack('q', len_bytes)[0]\n\t\t\t\t\texample_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]\n\t\t\t\t\tall_examples.append(example_str)\n\n\t\t\tif single_pass:\n\t\t\t\tprint \"example_generator completed reading all datafiles. No more data.\"\n\t\t\t\tbreak\n\n\t\t\n\telse:\n\t\t#pickle format\n\t\twhile True:\n\t\t\tif single_pass:\n\t\t\t\tfor data_ in data_path:\n\t\t\t\t\tfor i in data_:\n\t\t\t\t\t\tyield i\n\t\t\telse:\n\t\t\t\trandom.shuffle(data_path)\n\t\t\t\tfor data_ in data_path:\n\t\t\t\t\tnew_data = data_\n\t\t\t\t\tx = np.arange(len(new_data))\n\t\t\t\t\tnp.random.shuffle(x)\n\t\t\t\t\t# random.shuffle(new_data)\n\t\t\t\t\tfor i in x:\n\t\t\t\t\t\tyield new_data[i]\n\t\t\tif single_pass:\n\t\t\t\tbreak", "def process_data(\n raw_samples: List[Dict[str, Any]],\n relation_vocab: Dict[str, int],\n spacy_model: Any,\n tokenizer: Any,\n) -> Tuple[List[Dict[str, Any]], Dict[str, int]]:\n\n skipped = 0\n processed_samples = []\n for sample in tqdm.tqdm(raw_samples):\n processed_sample, relation_vocab = process_sample(\n sample=sample,\n relation_vocab=relation_vocab,\n spacy_model=spacy_model,\n tokenizer=tokenizer,\n )\n if processed_sample is not None:\n processed_samples.append(processed_sample)\n else:\n skipped += 1\n\n logging.info('Skipped %s samples', skipped)\n\n return processed_samples, relation_vocab", "def get_raw_batch(data, min_index=0, max_index=None, \r\n batch_size=16, n_steps=150, step_length=1000):\r\n\r\n if max_index is None:\r\n #max_index = len(data) - 1\r\n max_index = len(data)\r\n\r\n # Pick indices of ending positions\r\n #rows = np.random.randint(min_index + n_steps * step_length, max_index, size=batch_size)\r\n rows = np.random.randint(min_index, max_index/(n_steps*step_length), size=batch_size) + 1\r\n rows = rows * n_steps * step_length\r\n \r\n # Initialize feature matrices and targets\r\n samples = np.zeros((batch_size, n_steps, n_features))\r\n data_list = []\r\n\r\n for j, row in enumerate(rows):\r\n data_list.append([X_preprocessor(data=data[:, 0], last_index=row, \r\n n_steps=n_steps, step_length=step_length), # may be modified\r\n data[row - 1, 1]])\r\n\n #watch_dog.feeding(os.popen(\"free -h\").read()) # debugging\r\n\r\n return data_list", "def testData(self, ):\n count = 0\n while count < len(self.RAD_sequences_test):\n RAD_filename = self.RAD_sequences_test[count] \n RAD_complex = loader.readRAD(RAD_filename)\n if RAD_complex is None:\n raise ValueError(\"RAD file not found, please double check the path\")\n ### NOTE: Gloabl Normalization ###\n RAD_data = helper.complexTo2Channels(RAD_complex)\n RAD_data = (RAD_data - self.config_data[\"global_mean_log\"]) / \\\n self.config_data[\"global_variance_log\"]\n ### load ground truth instances ###\n gt_filename = loader.gtfileFromRADfile(RAD_filename, \\\n self.config_data[\"test_set_dir\"])\n gt_instances = loader.readRadarInstances(gt_filename)\n if gt_instances is None:\n raise ValueError(\"gt file not found, please double check the path\")\n\n ### NOTE: decode ground truth boxes to YOLO format ###\n gt_labels, has_label, raw_boxes = self.encodeToLabels(gt_instances)\n\n if has_label:\n yield (RAD_data, gt_labels, raw_boxes)\n count += 1", "def get_data():\n samples = []\n for fn in files:\n samples.extend(_json.load(open(fn, \"r\")))\n for sample in samples:\n graph = _nx.readwrite.json_graph.node_link_graph(sample)\n _edges = graph.edges(data=True)\n _nodes = dict(graph.nodes(data=True)).values()\n sources, targets, edges = zip(*[(src, tgt, edge) for src, tgt, edge in _edges])\n edge_features = _tf.constant(_np.array([\n [edge[k] for k in edge_feature_names if k in edge] for edge in edges\n ]))\n edge_sources = _tf.squeeze(_tf.constant(_np.array(sources)))\n edge_targets = _tf.squeeze(_tf.constant(_np.array(targets)))\n node_features = _tf.constant(_np.array([\n [node[k] for k in node_feature_names if k in node]\n for node in _nodes\n ]))\n additional_inputs = (\n _tf.constant(_np.array([\n [node[k] for k in additional_inputs_names if k in node]\n for node in _nodes\n ]))\n if local else\n _tf.constant(_np.array([\n graph.graph[additional_input] for additional_input in additional_inputs_names\n if additional_input in graph.graph\n ]))\n )\n data = GNNInput(\n edge_features=edge_features,\n edge_sources=edge_sources,\n edge_targets=edge_targets,\n node_features=node_features,\n additional_inputs=additional_inputs,\n )\n if local:\n y = _tf.squeeze(_tf.constant(_np.array([\n [node[k] for k in target if k in node] for node in _nodes\n ])))\n else:\n y = _tf.constant(_np.array([\n graph.graph[_target] for _target in target if _target in graph.graph\n ]))\n yield data, y", "def get_data_gen_datasets(raw_data,normalize_data=True):\n\n training_set,validation_set,test_set,class_labels = raw_data\n print \"Loading data succeeded\"\n\n if normalize_data:\n training_set_samples = normalize(training_set[:,0:-1])\n validation_set_samples = normalize(validation_set[:,0:-1])\n testing_set_samples = normalize(test_set[:,0:-1])\n\n print \"Normalized data\"\n\n # assume square patches\n inp_shape = int(numpy.sqrt(training_set_samples.shape[1]))\n\n training_set_samples = theano.shared(training_set_samples.astype(theano.config.floatX))\n validation_set_samples = theano.shared(validation_set_samples.astype(theano.config.floatX))\n test_set_samples = theano.shared(testing_set_samples.astype(theano.config.floatX))\n\n training_set_labels = theano.shared(training_set[:,-1].astype('int32')) # FIXME: uint8 should be sufficient\n validation_set_labels = theano.shared(validation_set[:,-1].astype('int32'))\n test_set_labels = theano.shared(test_set[:,-1].astype('int32'))\n\n print \"Unpacked data\"\n\n return ((training_set_samples, training_set_labels), \\\n (validation_set_samples, validation_set_labels), \\\n (test_set_samples, test_set_labels), \\\n inp_shape,class_labels)", "def __iter__(self):\n for sample in self.data:\n yield sample", "def data_generator(self, data):\n X, y = [], []\n while 1:\n np.random.shuffle(data)\n for line in data:\n img = Image.open(line[0])\n img = img.resize((32, 16))\n img = np.asarray(img, dtype=np.float32)\n img = img / 128. - 1.\n img = np.transpose(img, (2, 0, 1)) \n X.append(img)\n y.append(line[1])\n if len(X) == self.config.batch_size:\n batch = (np.asarray(X), np.asarray(y))\n X = []\n y = []\n yield batch", "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def process(self, data_batch: Sequence[Dict],\n data_samples: Sequence[Dict]) -> None:\n for data_sample in data_samples:\n pred_labels = data_sample.get('pred_instances').get(self.key).cpu()\n gt_labels = data_sample.get('gt_instances').get(self.key).cpu()\n\n result = dict(\n pred_labels=pred_labels.flatten(),\n gt_labels=gt_labels.flatten())\n self.results.append(result)", "def iter_raw(self, ftcols=['x', 'y']):\n self.makeTree()\n data = self.frametracks[ftcols].values\n coords = self.coords\n sd = selfdistance\n for i in self.loopindices:\n dists, inds = self.nntree.query(coords[i], self.nnmaxcount,\n distance_upper_bound=self.nncutoff)\n yield data[i], data[inds.compress((dists > sd) & ~np.isinf(dists))]", "def sample(self,\n data: Sequence[Sequence[torch.Tensor]],\n n_epochs: int = 1) -> Tuple[List[List[int]], List[List[int]], List[int]]:\n\n all_queries = []\n all_targets = []\n for q, t in data:\n all_queries.append(q)\n all_targets.append(t)\n\n print(f'sampler size: {len(all_queries)}')\n\n\n self.n_batch = int(np.ceil(data.__len__() / self.batch_size))\n print(\"n_batch:\", self.n_batch)\n\n for i in range(self.n_batch):\n # position = i * self.batch_size\n # queries = all_queries[position:position + self.batch_size]\n # targets = all_targets[position:position + self.batch_size]\n sample_index = np.random.choice(len(all_queries), self.batch_size)\n queries = [all_queries[i] for i in sample_index]\n targets_label = [all_targets[i] for i in sample_index]\n\n # targets = self.transform_label(targets_label)\n\n # labels = np.arange(len(queries))\n\n # queriess = np.array(queries)\n all_targets_text = self.all_targets\n queries = pad_sequence(queries, batch_first=self.batch_first, padding_value=0)\n\n # targets, queries, labels = torch.tensor(targets), torch.tensor(labels)\n # print(queries[:5])\n # print(len(all_targets_text))\n\n\n targets_label = torch.tensor(targets_label)\n yield (queries, all_targets_text, targets_label)", "def prepare_typerec_dataset(self, data_raw):\n\n self._logger.info(f'Preparing Wikidata-TypeRec dataset ({len(data_raw)} lines)...')\n data = []\n line_count = 0\n sample_count = 0\n sample_count_failed = 0\n\n for line in tqdm(data_raw):\n line_count += 1\n\n try:\n sample = self.prepare_typerec_sample(line)\n data.append(sample)\n sample_count += 1\n except Exception as e:\n self._logger.info(str(e))\n sample_count_failed += 1\n\n self._logger.info(f'Prepared {sample_count} samples from {line_count} lines (skipped {sample_count_failed} failed)')\n\n return data", "def prepare_dataset(self, data_raw):\n\n self._logger.debug(f'Preparing dataset ({len(data_raw)} lines)...')\n data = []\n line_count = 0\n sample_count = 0\n sample_count_failed = 0\n\n for line in tqdm(data_raw):\n line_count += 1\n #self._logger.debug(f'Line {line_count}/{len(data_raw)}')\n\n try:\n # TODO Call prepare_sample() here?\n sample = {}\n\n sample['text'] = line['text']\n sample['text_tokenized'] = None # set by add_tokens()\n sample['text_attention_mask'] = None # set by add_tokens()\n sample['item_name'] = line['string']\n self.add_tokens(sample)\n sample['text_mention_mask'] = None # set by add_mention_mask()\n self.add_mention_mask(sample)\n\n # Once for correct Wikidata item\n sample['item_id'] = line['correct_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['correct_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = True\n data.append(sample)\n sample_count += 1\n\n # Once for wrong Wikidata item\n sample['item_id'] = line['wrong_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['wrong_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = False\n data.append(sample)\n sample_count += 1\n\n except ValueError as e: # skip sample when there is no embedding found\n self._logger.info(str(e))\n sample_count_failed += 1\n continue\n\n self._logger.debug(f'Prepared {sample_count} samples from {line_count} lines (skipped {sample_count_failed} failed)')\n\n return data", "def generate_batches(data, labels, batch_size):\n for start in range(0, len(data), batch_size):\n yield Tensor(data[start:start+batch_size, ...]), Tensor(labels[start:start+batch_size, ...])", "def data_gen(\n v: int, batch: int, nbatches: int, device: torch.device = torch.device(\"cpu\")\n) -> Iterator[Batch]: # TODO bad name\n for i in range(nbatches):\n data = np.random.randint(1, v, size=(batch, 10))\n data[:, 0] = 1\n src: LongTensorType = torch.from_numpy(data)\n tgt: LongTensorType = torch.from_numpy(data)\n src, tgt = src.to(device), tgt.to(device)\n yield Batch(src, tgt, 0)", "def pi_iterator(raw_data, batch_size, reshuffle=True):\n\n prems, hyps, prem_len, hyp_len, labels = raw_data\n data_len = len(labels)\n\n if reshuffle:\n shuffler = list(range(data_len))\n random.shuffle(shuffler)\n\n prems = [prems[i] for i in shuffler]\n hyps = [hyps[i] for i in shuffler]\n prem_len = [prem_len[i] for i in shuffler]\n hyp_len = [hyp_len[i] for i in shuffler]\n labels = [labels[i] for i in shuffler]\n\n num_epoch = data_len // batch_size\n\n prems = np.array(prems, dtype=np.int32)\n premmasks = np.zeros((data_len, max(prem_len) + 1)) # need + 1 because of \"<eos>\"\n premmasks[np.arange(data_len), np.array(prem_len, dtype=np.int32)] = 1.0\n\n hyps = np.array(hyps, dtype=np.int32)\n hypmasks = np.zeros((data_len, max(hyp_len) + 1)) # need + 1 because of \"<eos>\"\n hypmasks[np.arange(data_len), np.array(hyp_len, dtype=np.int32)] = 1.0\n\n labels = np.array(labels, dtype=np.int32)\n\n for i in range(num_epoch):\n prem = prems[i * batch_size: (i+1) * batch_size]\n premmask = premmasks[i * batch_size: (i+1) * batch_size]\n\n hyp = hyps[i * batch_size: (i+1) * batch_size]\n hypmask = hypmasks[i * batch_size: (i+1) * batch_size]\n\n label = labels[i * batch_size: (i+1) * batch_size]\n prem_lens = []\n for mask in premmask:\n print(mask)\n print(len(mask))\n prem_len.append(len(mask))\n\n hyp_lens = []\n for mask in hypmask:\n hyp_len.append(len(mask))\n yield (prem, hyp, prem_lens, hyp_lens, label)", "def batch_data(cls, train_data, train_labels, batch_size):\n for batch in range(int(np.ceil(train_data.shape[0] / batch_size))):\n start = batch_size * batch\n end = start + batch_size\n if end > train_data.shape[0]:\n yield batch, (train_data[start:train_data.shape[0]], \\\n train_labels[start:train_data.shape[0]])\n else:\n yield batch, (train_data[start:end], \\\n train_labels[start:end])", "def data_iterator(self, ithFileReader):\n print('data_iterator', ithFileReader, threading.current_thread())\n while True:\n sampX, sampY = self.sampleTrain(ithFileReader) if self.config.is_train else self.sampleValid(ithFileReader)\n yield sampX, sampY", "def data_generator(batch_size, preprocessor, x, y):\n num_examples = len(x)\n examples = zip(x, y)\n examples = sorted(examples, key = lambda x: x[0].shape[0])\n end = num_examples - batch_size + 1\n batches = [examples[i:i+batch_size]\n for i in range(0, end, batch_size)]\n random.shuffle(batches)\n while True:\n for batch in batches:\n x, y = zip(*batch)\n yield preprocessor.process(x, y)", "def ptb_producer(raw_data, batch_size, num_steps, word_to_id):\n x = []\n y = []\n n_batches = len(raw_data) // batch_size\n for sentence in raw_data:\n mask_index = get_mask_index(sentence)\n current_label = sentence[mask_index]\n sentence[mask_index] = word_to_id['<mask>']\n y.append(current_label)\n x.append(sentence)\n x = np.array(x)\n x = x[:n_batches*batch_size]\n x = np.reshape(x, [n_batches, batch_size, num_steps])\n y = np.array(y)\n y = y[:n_batches * batch_size]\n y = np.reshape(y, [n_batches, batch_size])\n return x, y", "def next(self):\n with self.lock:\n index_array = next(self.index_generator)\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n return self._get_batches_of_transformed_samples(index_array)", "def _yield_testing(self, batch_index):\n samples_start = batch_index % self.num_samples\n samples_end = (batch_index+1) % self.num_samples\n if samples_start < samples_end:\n batch_samples = self.test_data[samples_start:samples_end]\n else:\n batch_samples = self.test_data[samples_start:]\n batch_samples.extend(self.test_data[:samples_end])\n images = []\n rois = []\n for sample in batch_samples:\n # 'sample' has this structure:\n # {path: {\n # 'roi_origin_x': test_sample[1]['roi_origin_x'],\n # 'roi_origin_y': test_sample[1]['roi_origin_y'],\n # 'roi_width': test_sample[1]['roi_width'],\n # 'roi_height': test_sample[1]['roi_height'] \n # } \n # }\n img_path = os.path.join(self.dataset_root_path, list(sample.keys())[0])\n img = cv2.imread(img_path) # watch out for slashes (/)\n # if the path does not exist or there are problems while reading the image\n if img is None:\n print('[DATA LOADER ERROR] cannot find image at path: ', img_path)\n continue\n roi_data = list(sample.values())[0]\n roi = {\n 'upper_left_x': roi_data['roi_origin_x'],\n 'upper_left_y': roi_data['roi_origin_y'],\n 'width': roi_data['roi_width'],\n 'height': roi_data['roi_height']\n }\n img = img.astype('float32')\n images.append(img)\n rois.append(roi)\n return images, rois", "def _creatExamplesTensorData(self, examples):\n\n images = []\n \n images2 = []\n images3 = []\n images4 = []\n images5 = [] \n labels = []\n for (img_idx, label) in examples:\n img = self.dataset[img_idx][0]\n #print(img)\n ##exit(0)\n if self.load:\n img = Image.fromarray(img)\n else:\n img = read_image(img)\n #print(img.size)\n #print(np.array(img).shape)\n #exit(0)\n if self.transform is not None:\n img1 = self.transform(img)\n\n img2 = self.transform_test(img)\n img3 = self.transform_test(img)\n img4 = self.transform_test(img)\n img5 = self.transform_test(img) \n #print((img2-img1).abs().sum(),(img3-img1).abs().sum(),(img2-img3).abs().sum())\n #print(img.shape,'located in test_loader.py at 146')\n #exit(0)\n images.append(img1)\n \n images2.append(img2)\n images3.append(img3)\n images4.append(img4)\n images5.append(img5) \n labels.append(label)\n images = torch.stack(images, dim=0)\n\n images2 = torch.stack(images2, dim=0)\n images3 = torch.stack(images3, dim=0)\n images4 = torch.stack(images4, dim=0)\n images5 = torch.stack(images5, dim=0) \n labels = torch.LongTensor(labels)\n return images, images2,images3,images4,images5,labels", "def ptb_raw_data(data_path=None, prefix=\"ptb\"):\n\n train_path = os.path.join(data_path, prefix + \".train.txt\")\n valid_path = os.path.join(data_path, prefix + \".valid.txt\")\n test_path = os.path.join(data_path, prefix + \".test.txt\")\n train_w = _read_words(train_path)\n valid_w = _read_words(valid_path)\n test_w = _read_words(test_path)\n word_to_id, id_2_word = _build_vocab(train_w)\n train_data = _file_to_word_ids(train_w, word_to_id)\n valid_data = _file_to_word_ids(valid_w, word_to_id)\n test_data = _file_to_word_ids(test_w, word_to_id)\n return train_data, valid_data, test_data, word_to_id, id_2_word", "def __iter__(self):\n batch = []\n for sample in self.dataset:\n batch.append(sample)\n if len(batch) == self.size:\n yield self.transform(batch)\n batch = []\n if batch:\n # the last batch may be less then batch size.\n yield self.transform(batch)", "def __iter__(self):\n for sample in self.samples:\n yield sample", "def _generate_examples(self, filepath):\n # Simultaneously iterating through the different data sets in the hdf5\n # file is >100x slower and the data set is small (26.7MB). Hence, we first\n # load everything into memory before yielding the samples.\n with tfds.core.lazy_imports.h5py.File(filepath, \"r\") as h5dataset:\n image_array = np.array(h5dataset[\"imgs\"])\n class_array = np.array(h5dataset[\"latents\"][\"classes\"])\n values_array = np.array(h5dataset[\"latents\"][\"values\"])\n\n for i, (image, classes, values) in enumerate(moves.zip(\n image_array, class_array, values_array)):\n record = dict(\n image=np.expand_dims(image, -1),\n label_shape=classes[1],\n label_scale=classes[2],\n label_orientation=classes[3],\n label_x_position=classes[4],\n label_y_position=classes[5],\n value_shape=values[1],\n value_scale=values[2],\n value_orientation=values[3],\n value_x_position=values[4],\n value_y_position=values[5])\n if self.version > \"2.0.0\":\n record[\"id\"] = \"{:06d}\".format(i)\n yield i, record", "def trainDataGenerator(num_epochs):\r\n samples, all_files = get_filenames()\r\n for num in range(num_epochs):\r\n for i in range(len(samples)):\r\n sample = samples[i]\r\n for file in all_files[i]:\r\n ohvs, Y = prepData(sample, file)\r\n if (ohvs == []):\r\n continue\r\n X = np.array([ohvs[:800]])\r\n yield X, Y\r\n # for i in range(0, len(ohvs), 400):\r\n # X = np.array([ohvs[i : i+400]])\r\n # print(\"\\tX shape =\", X.shape)\r\n # yield X, Y\r", "def test_data() -> Iterator[Tuple[Label, ChanneledImage]]:\n return zip(*get_data(TEST_FILES, 10000))", "def gen_batches(data, batch_size=2048):\n indices = torch.randperm(len(data))\n indices = indices.cuda()\n\n for idx in range(0, len(data) - batch_size + 1, batch_size):\n sample = indices[idx:idx + batch_size]\n l_words, r_words = data.L_words[sample], data.R_words[sample]\n l_vecs = data.l_vecs[l_words]\n r_vecs = data.r_vecs[r_words]\n l_bias = data.l_biases[l_words]\n r_bias = data.r_biases[r_words]\n weight = data.weights[sample]\n y = data.y[sample]\n yield weight, l_vecs, r_vecs, y, l_bias, r_bias", "def _generate_examples(self, data_dir_path):\n\n for class_name in tf.io.gfile.listdir(data_dir_path):\n class_dir_path = os.path.join(data_dir_path, class_name)\n for image_name in tf.io.gfile.listdir(class_dir_path):\n image = os.path.join(class_dir_path, image_name)\n yield image, {\n \"image\": image,\n \"label\": class_name,\n }", "def train_data() -> Iterator[Tuple[Label, ChanneledImage]]:\n return zip(*get_data(TRAIN_FILES, 60000))", "def next(self):\n # Keeps under lock only the mechanism which advances\n # the indexing of each batch.\n with self.lock:\n index_array = next(self.index_generator)\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n return self._get_batches_of_transformed_samples(index_array)", "def trainData(self,):\n count = 0\n while count < len(self.RAD_sequences_train):\n RAD_filename = self.RAD_sequences_train[count] \n RAD_complex = loader.readRAD(RAD_filename)\n if RAD_complex is None:\n raise ValueError(\"RAD file not found, please double check the path\")\n ### NOTE: Gloabl Normalization ###\n RAD_data = helper.complexTo2Channels(RAD_complex)\n RAD_data = (RAD_data - self.config_data[\"global_mean_log\"]) / \\\n self.config_data[\"global_variance_log\"]\n ### load ground truth instances ###\n gt_filename = loader.gtfileFromRADfile(RAD_filename, \\\n self.config_data[\"train_set_dir\"])\n gt_instances = loader.readRadarInstances(gt_filename)\n if gt_instances is None:\n raise ValueError(\"gt file not found, please double check the path\")\n\n ### NOTE: decode ground truth boxes to YOLO format ###\n gt_labels, has_label, raw_boxes = self.encodeToLabels(gt_instances)\n\n if has_label:\n yield (RAD_data, gt_labels, raw_boxes)\n count += 1\n if count == len(self.RAD_sequences_train) - 1:\n # np.random.seed() # should I add seed here ?\n np.random.shuffle(self.RAD_sequences_train)", "def process(self, data_batch: Sequence[dict],\n data_samples: Sequence[dict]) -> None:\n for data_sample in data_samples:\n # predicted keypoints coordinates, [1, K, D]\n pred_coords = data_sample['pred_instances']['keypoints']\n # ground truth data_info\n gt = data_sample['gt_instances']\n # ground truth keypoints coordinates, [1, K, D]\n gt_coords = gt['lifting_target']\n # ground truth keypoints_visible, [1, K, 1]\n mask = gt['lifting_target_visible'].astype(bool).reshape(1, -1)\n # instance action\n img_path = data_sample['target_img_path']\n _, rest = osp.basename(img_path).split('_', 1)\n action, _ = rest.split('.', 1)\n\n result = {\n 'pred_coords': pred_coords,\n 'gt_coords': gt_coords,\n 'mask': mask,\n 'action': action\n }\n\n self.results.append(result)", "def ptb_raw_data(data_path=None, prefix=\"ptb\"):\n\n train_path = os.path.join(data_path, prefix + \".train.txt\")\n valid_path = os.path.join(data_path, prefix + \".valid.txt\")\n test_path = os.path.join(data_path, prefix + \".test.txt\")\n\n word_to_id, id_2_word = _build_vocab(train_path)\n train_data = _file_to_word_ids(train_path, word_to_id)\n valid_data = _file_to_word_ids(valid_path, word_to_id)\n test_data = _file_to_word_ids(test_path, word_to_id)\n return train_data, valid_data, test_data, word_to_id, id_2_word", "def __data_generation(self, rows):\n samples = np.zeros((rows, self.image_width, self.image_height, self.image_depth))\n targets = np.zeros((rows, self.image_width, self.image_height, self.num_classes))\n for j in range(rows):\n for row1, row2 in zip(self.reader1, self.reader2):\n array_row1 = np.array(row1, dtype=np.float)\n samples[j,:,:,:] = preprocess_feature(array_row1,\n self.image_width, self.image_height, self.image_depth)\n try:\n next(self.reader1)\n except StopIteration:\n print(\"CSV iteration end for feature. Calling 'break'.\")\n break\n\n array_row2 = np.array(row2, dtype=np.int)\n targets[j,:,:,:] = preprocess_label(array_row2,\n self.image_width, self.image_height, self.num_classes)\n try:\n next(self.reader2)\n except StopIteration:\n print(\"CSV iteration end for label. Calling 'break'.\")\n break\n\n return samples, targets", "def data(self, train=True, batch_size=2):\n if train:\n elements = self.prepare_batch(self.training_albums)\n else:\n elements = self.prepare_batch(self.validation_albums)\n\n while len(elements) > 0:\n # Collect the batch\n batch = []\n for _ in range(min(batch_size, len(elements))):\n batch.append(elements.pop())\n\n # Get same sequence size for all elements of the batch\n albums, labels = self.batchify(batch)\n yield albums, labels", "def mock_raw_data(tmp_dir, raw_dim=1024, num_channels=3, num_images=1):\n\n tf.gfile.MakeDirs(tmp_dir)\n\n for image_id in range(num_images):\n\n raw_image_path = os.path.join(tmp_dir, \"%s.jpg\" % image_id)\n\n mock_raw_image(x_dim=raw_dim, y_dim=raw_dim,\n num_channels=num_channels,\n output_path=raw_image_path)", "def fill_example_queue(self):\n\t\tinput_gen = self.text_generator(data.example_generator(self._data, self._single_pass,self._device_id, data_as_tf_example=self._data_as_tf_example))\n\t\tcount = 0\n\t\tquery = None\n\t\tword_edge_list = None\n\t\tquery_edge_list = None\n\t\tif self._data_as_tf_example:\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\t article, abstract, word_edge_list, query, query_edge_list, epoch_num = input_gen.next() # read the next example from file. article and abstract are both strings.\n\t\t\t\t\t #tf.logging.info(random.randint(1,101))\n\t\t\t\texcept StopIteration: # if there are no more examples:\n\t\t\t\t\ttf.logging.info(\"The example generator for this example queue filling thread has exhausted data.\")\n\t\t\t\t\tif self._single_pass:\n\t\t\t\t\t\ttf.logging.info(\"single_pass mode is on, so we've finished reading dataset. This thread is stopping.\")\n\t\t\t\t\t\tself._finished_reading = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"single_pass mode is off but the example generator is out of data; error.\")\n\t\t\t\tabstract_sentences = [sent.strip() for sent in data.abstract2sents(abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.\n\t\t\t\texample = Example(article, abstract_sentences, self._vocab, self._hps, word_edge_list=word_edge_list, query=query, query_edge_list=query_edge_list, epoch_num=epoch_num, bert_vocab=self.bert_vocab)\n\t\t\t\tself._example_queue.put(example)\n\t\telse:\n\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\tcurr_data = input_gen.next()\n\t\t\t\t\tcount = count + 1\n\t\t\t\t\tarticle = curr_data['article']\n\t\t\t\t\tabstract = curr_data['abstract'].strip()\n\t\t\t\t\tif self._hps.word_gcn.value:\n\t\t\t\t\t\tword_edge_list = curr_data['word_edge_list']\n\t\t\t\t\tif self._hps.query_encoder.value:\n\t\t\t\t\t\tquery = curr_data['query']\n\t\t\t\t\tif self._hps.query_gcn.value:\n\t\t\t\t\t\tquery_edge_list = curr_data['query_edge_list']\n\t\t\t\texcept Exception as e: # if there are no more examples:\n\t\t\t\t\ttf.logging.info(\"The example generator for this example queue filling thread has exhausted data.\")\n\t\t\t\t\tif self._single_pass:\n\t\t\t\t\t\ttf.logging.info(\n\t\t\t\t\t\t\t\"single_pass mode is on, so we've finished reading dataset. This thread is stopping.\")\n\t\t\t\t\t\tself._finished_reading = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\ttf.logging.info(e)\n\t\t\t\t\t\traise Exception(\"single_pass mode is off but the example generator is out of data; error.\")\n\n\t\t\t\tabstract_sentences = [sent.strip() for sent in data.abstract2sents(abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.\n\t\t\t\texample = Example(article, abstract_sentences, self._vocab, self._hps, word_edge_list=word_edge_list, query=query, query_edge_list=query_edge_list, epoch_num=epoch_num)\n\t\t\t\tself._example_queue.put(example) # place the Example in the example queue.", "def next(self):\n #print('next')\n batch_size = self.batch_size\n batch_data = nd.empty((batch_size,)+self.data_shape)\n batch_label = nd.empty((batch_size,)+self.label_shape)\n i = 0\n #self.cutoff = random.randint(800,1280)\n try:\n while i < batch_size:\n #print('N', i)\n data, label, annot = self.next_sample()\n R = self.get_data(data, label, annot)\n if R is None:\n continue\n data_out, label_out, flip_data_out, flip_label_out = R\n if not self.use_coherent:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n i += 1\n else:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n data2 = nd.array(flip_data_out)\n data2 = nd.transpose(data2, axes=(2, 0, 1))\n label2 = nd.array(flip_label_out)\n #M = nd.array(M)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n #i+=1\n j = i+self.per_batch_size//2\n batch_data[j][:] = data2\n batch_label[j][:] = label2\n i += 1\n if j%self.per_batch_size==self.per_batch_size-1:\n i = j+1\n except StopIteration:\n if i<batch_size:\n raise StopIteration\n\n #return {self.data_name : batch_data,\n # self.label_name : batch_label}\n #print(batch_data.shape, batch_label.shape)\n return mx.io.DataBatch([batch_data], [batch_label], batch_size - i)", "def _compute_samples(self, samples):\n return samples", "def generator(data_dir, samples, batch_size=32):\n num_samples = len(samples)\n while 1:\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n measurements = []\n for batch_sample in batch_samples:\n filename = csv_log_to_image_filename(data_dir,\n batch_sample[0])\n image = cv2.imread(filename)\n if image is not None:\n images.append(image)\n measurements.append(batch_sample[1])\n else:\n print(\"File \" + filename + \" is missing.\")\n\n X_data = np.array(images)\n y_data = np.array(measurements)\n yield sklearn.utils.shuffle(X_data, y_data)", "def generate_samples(data, root_path, batch_size=128):\n while True:\n # Generate random batch of indices\n indices = np.random.permutation(data.count()[0])\n\n for batch in range(0, len(indices), batch_size):\n batch_i = indices[batch:(batch + batch_size)]\n\n x = np.empty([0, img_h, img_w, img_c], dtype=np.float32)\n y = np.empty([0], dtype=np.float32)\n\n x, y = augment_data(x, y, data, root_path, batch_i)\n x, y = flip_images(x, y)\n\n yield (x, y)", "def generate_samples(t, num_samples=1000):\n\n i = tf.Variable(0)\n result = tf.TensorArray(t().dtype, num_samples)\n\n def cond(b_i, result):\n return tf.less(b_i, num_samples)\n\n def body(b_i, b_result):\n b_result = b_result.write(b_i, t())\n return b_i + 1, b_result\n\n i, result = tf.while_loop(cond, body, (i, result))\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n return sess.run(result.stack())", "def ptb_raw_data(data_path=None):\n\n\t# train_path = os.path.join(data_path, \"ptb.train.txt\")\n\t# valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n\t# test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n\tdata = np.load(data_path)\n\t# data = np.load(data_path).item()\n\t# f = open(data_path)\n\t# data = f.readlines()\n\tword_to_id, id_to_word, wordList = build_vocab_(data)\n\t# word_to_id = _build_vocab(train_path)\n\ttrain_data = _file_to_word_ids(wordList[int(len(wordList)*0.3):int(len(wordList)*1.0)], word_to_id)\n\tvalid_data = _file_to_word_ids(wordList[int(len(wordList)*0.2):int(len(wordList)*0.3)], word_to_id)\n\ttest_data = _file_to_word_ids(wordList[int(len(wordList)*0):int(len(wordList)*0.2)], word_to_id)\n\tvocabulary = len(word_to_id)\n\treturn train_data, valid_data, test_data, vocabulary", "def test(batch_size=1, num_sample=16):\n return paddle.batch(_read_creater(num_sample=num_sample), batch_size)", "def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r", "def next_simple_dataset(dataset, batch_size: int, datatype):\n while True:\n x_batch = []\n y_batch = []\n for i in range(batch_size):\n try:\n x, y, data_unit, index = create_xy(dataset, datatype)\n # x = normalize(x)\n x_batch.append(x)\n y_batch.append(y)\n except StopIteration:\n break\n x_batch, y_batch = np.array(x_batch), np.array(y_batch)\n if datatype != DataType.test:\n x_batch = SEQ_CVXTZ.augment_images(x_batch).astype(\"float32\")\n x_batch = np.array([normalize(x) for x in x_batch])\n # org_shape = x_batch.shape\n # org_width = x_batch.shape[1]\n # corner = int((org_width - ROI_IMAGE_SIZE) // 2)\n # print(f\"0: org_shape:{org_shape} x_batch:{x_batch.shape} corner:{corner}\")\n # x_batch = x_batch[:, corner:(org_width - corner), corner:(org_width - corner), :]\n # resized_x_batch = []\n # for x in x_batch:\n # img = Image.fromarray(np.uint8(x))\n # img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.LANCZOS)\n # resized_x_batch.append(normalize(np.array(img)))\n # print(f\"1: org_shape:{org_shape} corner:{corner} x_batch:{x_batch.shape}\")\n # yield np.array(resized_x_batch), y_batch\n yield np.array(x_batch), y_batch", "def test_generate_data_produces_examples_of_correct_shape(self):\n\n with TemporaryDirectory() as tmp_dir:\n mock_raw_data(tmp_dir, raw_dim=256, num_images=100)\n with TemporaryDirectory() as data_dir:\n for problem_obj in self.all_problems:\n problem_object = problem_obj()\n\n problem_object.generate_data(data_dir, tmp_dir)\n\n for mode in [Modes.TRAIN, Modes.EVAL]:\n\n dataset = problem_object.dataset(mode, data_dir)\n example = tfe.Iterator(dataset).next()\n\n num_channels = problem_object.num_channels\n\n # Check that the input tensor has the right shape\n input_dim = problem_object.input_dim\n self.assertEqual(example[\"inputs\"].numpy().shape,\n (input_dim, input_dim, num_channels))\n\n # Check that the targets tensor has the right shape\n output_dim = problem_object.output_dim\n self.assertEqual(example[\"targets\"].numpy().shape,\n (output_dim, output_dim, num_channels))", "def tfrecord_iterator(\n data_path: str,\n index_path: typing.Optional[str] = None,\n shard: typing.Optional[typing.Tuple[int, int]] = None,\n compression_type: typing.Optional[str] = None,\n) -> typing.Iterable[memoryview]:\n if compression_type == \"gzip\":\n file = gzip.open(data_path, \"rb\")\n elif compression_type is None:\n file = io.open(data_path, \"rb\")\n else:\n raise ValueError(\"compression_type should be either 'gzip' or None\")\n length_bytes = bytearray(8)\n crc_bytes = bytearray(4)\n datum_bytes = bytearray(1024 * 1024)\n\n def read_records(start_offset=None, end_offset=None):\n nonlocal length_bytes, crc_bytes, datum_bytes\n\n if start_offset is not None:\n file.seek(start_offset)\n if end_offset is None:\n end_offset = os.path.getsize(data_path)\n while file.tell() < end_offset:\n if file.readinto(length_bytes) != 8:\n raise RuntimeError(\"Failed to read the record size.\")\n if file.readinto(crc_bytes) != 4:\n raise RuntimeError(\"Failed to read the start token.\")\n length, = struct.unpack(\"<Q\", length_bytes)\n if length > len(datum_bytes):\n datum_bytes = datum_bytes.zfill(int(length * 1.5))\n datum_bytes_view = memoryview(datum_bytes)[:length]\n if file.readinto(datum_bytes_view) != length:\n raise RuntimeError(\"Failed to read the record.\")\n if file.readinto(crc_bytes) != 4:\n raise RuntimeError(\"Failed to read the end token.\")\n yield datum_bytes_view\n\n if index_path is None:\n yield from read_records()\n else:\n index = np.loadtxt(index_path, dtype=np.int64)[:, 0]\n if shard is None:\n offset = np.random.choice(index)\n yield from read_records(offset)\n yield from read_records(0, offset)\n else:\n num_records = len(index)\n shard_idx, shard_count = shard\n start_index = (num_records * shard_idx) // shard_count\n end_index = (num_records * (shard_idx + 1)) // shard_count\n start_byte = index[start_index]\n end_byte = index[end_index] if end_index < num_records else None\n yield from read_records(start_byte, end_byte)\n\n file.close()", "def parse_raw(data):\n for sample in data:\n assert \"src\" in sample\n json_line = sample[\"src\"]\n obj = json.loads(json_line)\n assert \"key\" in obj\n assert \"wav\" in obj\n assert \"txt\" in obj\n key = AishellKeyMapper.encode(obj[\"key\"])\n wav_file = obj[\"wav\"]\n txt = obj[\"txt\"]\n try:\n if \"start\" in obj:\n assert \"end\" in obj\n sample_rate = torchaudio.backend.sox_io_backend.info(wav_file).sample_rate\n start_frame = int(obj[\"start\"] * sample_rate)\n end_frame = int(obj[\"end\"] * sample_rate)\n waveform, _ = torchaudio.backend.sox_io_backend.load(\n filepath=wav_file, num_frames=end_frame - start_frame, frame_offset=start_frame\n )\n else:\n waveform, sample_rate = torchaudio.load(wav_file)\n example = dict(key=key, txt=txt, wav=waveform, sample_rate=sample_rate)\n yield example\n except Exception as ex:\n logging.warning(\"Failed to read {}\".format(wav_file))", "def numericalize(examples, n):\n\n assert n*BATCH_SIZE <= len(examples)\n\n for i in range(n):\n\n #get the raw data\n \n raw_batch_name, raw_batch_body, batch_lengths = zip(*examples[BATCH_SIZE*i:BATCH_SIZE*(i+1)])\n \n #create a tensor to store the batch\n \n tensor_n = torch.zeros(BATCH_SIZE).long() #name\n tensor_l = torch.zeros((BATCH_SIZE, MAX_LENGTH)).long() #left node\n tensor_p = torch.zeros((BATCH_SIZE, MAX_LENGTH)).long() #path\n tensor_r = torch.zeros((BATCH_SIZE, MAX_LENGTH)).long() #right node\n mask = torch.ones((BATCH_SIZE, MAX_LENGTH)).float() #mask\n \n #for each example in our raw data\n \n for j, (name, body, length) in enumerate(zip(raw_batch_name, raw_batch_body, batch_lengths)):\n \n #convert to idxs using vocab\n #use <unk> tokens if item doesn't exist inside vocab\n temp_n = target2idx.get(name, target2idx['<unk>'])\n temp_l, temp_p, temp_r = zip(*[(word2idx.get(l, word2idx['<unk>']), path2idx.get(p, path2idx['<unk>']), word2idx.get(r, word2idx['<unk>'])) for l, p, r in body])\n \n #store idxs inside tensors\n tensor_n[j] = temp_n\n tensor_l[j,:] = torch.LongTensor(temp_l)\n tensor_p[j,:] = torch.LongTensor(temp_p)\n tensor_r[j,:] = torch.LongTensor(temp_r) \n \n #create masks\n mask[j, length:] = 0\n\n yield tensor_n, tensor_l, tensor_p, tensor_r, mask", "def get_data(self):\n if self.random_seeds: \n self._validate_random_seeds()\n seed_iter = list(map(iter,self.random_seeds))\n nsamples = len(self.random_seeds[0])\n else:\n seed_iter = None\n nsamples = self.numsamples\n progress_bar = tqdm(range(nsamples))\n self._set_meta_features()\n task_dict = {}\n finished_tasks = 0\n for _ in range(min(nsamples,self.numworkers)): \n self._prepare_and_start_task(task_dict,seed_iter)\n while finished_tasks < nsamples: \n done_ids, pending_ids = ray.wait(list(task_dict.keys()))\n if done_ids:\n id = done_ids[0]\n finished_tasks += 1\n try:\n data, times, pid = ray.get(id)\n except Exception as exception:\n self.logger.info(\"task with id %s failed with Traceback:\" %task_dict[id], exc_info=True)\n raise exception\n times[-1] = time() # add getter time\n data['idx'] = task_dict.pop(id)\n self.logger.info('id %i on pid %i: finished task.' %(data['idx'],pid))\n self._log_execution_time(data['idx'], times, pid)\n if (nsamples - self._idx) > 0: # directly _schedule next task\n self._prepare_and_start_task(task_dict,seed_iter)\n progress_bar.update()\n yield data", "def get_data(self):\n if self.with_encoder:\n for i in count():\n batchdata = pd.read_csv(SEQUENTIAL_TRAIN_PATH,\n nrows=GAN_BATCH_SIZE,\n skiprows=i * GAN_BATCH_SIZE + 1,\n names=SEQUENTIAL_COLUMN_NAMES.keys(),\n dtype=SEQUENTIAL_COLUMN_NAMES)\n if len(batchdata) < GAN_BATCH_SIZE:\n yield None\n batchdata = batchdata['seq_contents'].values\n yield get_data_for_lstm_ae(batchdata)\n else:\n # shuffles data\n self.encoded_data = self.encoded_data[np.random.permutation(self.encoded_data.shape[0])]\n for i in count():\n result = self.encoded_data[i*GAN_BATCH_SIZE:(i+1)*GAN_BATCH_SIZE,:]\n if result.shape[0] < GAN_BATCH_SIZE:\n yield None\n yield result", "def enhancer_iterator(self, data, labels, batch_size, num_steps):\n def seq_to_ints(seq):\n return [self.vocab.word_to_index[c] for c in seq]\n\n # Map raw data to array of ints. if all sequences are the same length L, \n # raw_data will be N-by-L\n mdata = np.array([seq_to_ints(i) for i in data], dtype=np.int32)\n num_batches = len(mdata) // batch_size\n \n # data will have batch_len elements, each of size batch_size\n # ASSUME FIXED SEQUENCE LENGTHS OFF 1000 FOR NOW (5/20/16)\n # Just grab middle self.config.num_steps nucleotides\n a = int(len(mdata[0,:])/2-self.config.num_steps/2)\n b = int(len(mdata[0,:])/2+self.config.num_steps/2)\n for i in range(num_batches):\n x = mdata[batch_size*i:batch_size*(i+1),a:b]\n if labels is not None:\n y = labels[batch_size*i:batch_size*(i+1)]\n else:\n y = None\n yield(x,y)", "def process_data(self, spec):\n with torch.no_grad():\n \n assert(len(spec) == 5), 'dataloader should return (spec_masked, pos_enc, mask_label, attn_mask, spec_stacked)'\n # Unpack and Hack bucket: Bucketing should cause acoustic feature to have shape 1xBxTxD'\n spec_masked = spec[0].squeeze(0)\n pos_enc = spec[1].squeeze(0)\n mask_label = spec[2].squeeze(0)\n attn_mask = spec[3].squeeze(0)\n spec_stacked = spec[4].squeeze(0)\n\n spec_masked = spec_masked.to(device=self.device)\n pos_enc = torch.FloatTensor(pos_enc).to(device=self.device)\n mask_label = torch.ByteTensor(mask_label).to(device=self.device)\n attn_mask = torch.FloatTensor(attn_mask).to(device=self.device)\n spec_stacked = spec_stacked.to(device=self.device)\n\n return spec_masked, pos_enc, mask_label, attn_mask, spec_stacked # (x, pos_enc, mask_label, attention_mask. y)", "def ptb_producer(raw_data, unigrams, batch_size, num_steps, num_true, num_sampled, vocab_size, name=None):\n with tf.name_scope(name, \"PTBProducer\", [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int64)\n\n data_len = tf.size(raw_data)\n batch_len = data_len // batch_size\n data = tf.reshape(raw_data[0 : batch_size * batch_len],\n [batch_size, batch_len])\n\n epoch_size = (batch_len - 1) // num_steps\n assertion = tf.assert_positive(\n epoch_size,\n message=\"epoch_size == 0, decrease batch_size or num_steps\")\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data, [0, i * num_steps],\n [batch_size, (i + 1) * num_steps])\n x.set_shape([batch_size, num_steps])\n y = tf.strided_slice(data, [0, i * num_steps + 1],\n [batch_size, (i + 1) * num_steps + 1])\n y.set_shape([batch_size, num_steps])\n ns = None\n\n psw_list = get_split_weights_for_perp(tf.reshape(y, [-1]), unigrams, vocab_size, batch_size*num_steps)\n \n for i in range(len(psw_list)):\n psw_list[i] = tf.reshape(psw_list[i], [batch_size, num_steps])\n\n '''\n if num_sampled > 0:\n y_list = tf.unpack(y, axis=1)\n ns_list = []\n for i in range(num_steps):\n ns = get_neg_samples(batch_size, num_true, num_sampled, vocab_size, y_list[i], unigrams)\n ns_list.append(ns)\n else:\n ns = None\n '''\n\n return x, y, ns, psw_list", "def get_GP_samples(self):\n\n Z = tf.zeros([0, self.max_x_len, self.input_dim])\n\n # setup tf while loop (have to use this bc loop size is variable)\n def cond(i, Z):\n return i < self.N\n\n def body(i, Z):\n Yi = tf.reshape(tf.slice(self.Y, [i, 0], [1, self.num_obs_values[i]]), [-1])\n Ti = tf.reshape(tf.slice(self.T, [i, 0], [1, self.num_obs_times[i]]), [-1])\n ind_kfi = tf.reshape(tf.slice(self.ind_kf, [i, 0], [1, self.num_obs_values[i]]), [-1])\n ind_kti = tf.reshape(tf.slice(self.ind_kt, [i, 0], [1, self.num_obs_values[i]]), [-1])\n Xi = tf.reshape(tf.slice(self.X, [i, 0], [1, self.num_rnn_grid_times[i]]), [-1])\n X_len = self.num_rnn_grid_times[i]\n #T_len = self.num_obs_times[i]\n\n GP_draws = self.draw_GP(Yi, Ti, Xi, ind_kfi, ind_kti)\n pad_len = self.max_x_len - X_len # pad by this much\n padded_GP_draws = tf.concat([GP_draws, tf.zeros((self.n_mc_smps, pad_len, self.num_features))], 1)\n\n if self.use_med_cov:\n medcovs = tf.slice(self.med_cov_grid, [i, 0, 0], [1, -1, -1])\n tiled_medcovs = tf.tile(medcovs, [self.n_mc_smps, 1, 1])\n padded_GP_draws = tf.concat([padded_GP_draws, tiled_medcovs], 2)\n\n Z = tf.concat([Z, padded_GP_draws], 0)\n\n return i + 1, Z\n\n i = tf.constant(0)\n i, Z = tf.while_loop(cond, body, loop_vars=[i, Z],\n shape_invariants=[i.get_shape(), tf.TensorShape([None, None, None])])\n\n Z.set_shape([None, None, self.input_dim]) # somehow lost shape info, but need this\n\n return Z", "def sample(self, num_samples, current_device):\n z = torch.randn(num_samples, self.latent_dim).to(current_device)\n samples = self.decode(z)\n return samples", "def batch_generator(self, num_epochs=1, shuffle=False):\n def parse_fn(tfrecord):\n return parse_mnist_tfrec(\n tfrecord, self.name, self.features_shape, True\n )\n dataset = tf.data.TFRecordDataset(\n self.filenames_list, compression_type=self.compression_type\n )\n if shuffle:\n dataset = dataset.shuffle(buffer_size=256)\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.map(parse_fn).prefetch(self.batch_size)\n dataset = dataset.batch(self.batch_size)\n iterator = dataset.make_one_shot_iterator()\n batch_features, batch_labels = iterator.get_next()\n return batch_features, batch_labels", "def get_tensor_examples_from_custom_input(self, samples):\n tensorizer = Tensorizer(self.config, self.tokenizer)\n tensor_samples = [tensorizer.tensorize_example(sample, False) for sample in samples]\n tensor_samples = [(doc_key, self.convert_to_torch_tensor(*tensor)) for doc_key, tensor in tensor_samples]\n return tensor_samples, tensorizer.stored_info", "def _create_test_iterator(self):\n input_ids = tf.range(self.left_images.shape[0])\n dataset = tf.data.Dataset.from_tensor_slices(input_ids)\n # NOTE: Loads 1 sample, i.e. batch mode not implemented yet.\n dataset = dataset.map(self._test_parse_function)\n iterator = dataset.make_one_shot_iterator()\n\n return iterator", "def _train_batch(self):\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n result = self._batch_iter(source, target, i)\n\n # yield\n yield result", "def data_iter(data, vocab, batch_size, shuffle=True, cuda=False):\n\n buckets = defaultdict(list)\n for pair in data:\n src_sent = pair[0]\n buckets[len(src_sent)].append(pair)\n\n batched_data = []\n for src_len in buckets:\n tuples = buckets[src_len]\n if shuffle: np.random.shuffle(tuples)\n batched_data.extend(list(HNCMDataLoader.batch_slice(tuples, batch_size)))\n\n if shuffle:\n np.random.shuffle(batched_data)\n for src_sents, trg_sents, fact_sents in batched_data:\n num_trg_word = sum(len(s[:-1]) for s in trg_sents)\n src_lengths = [len(s) for s in src_sents]\n src_seqs_var = to_input_var(src_sents, vocab.src, cuda)\n trg_seqs_var = to_input_var(trg_sents, vocab.trg, cuda)\n fact_lengths = [[len (s) for s in fact_sent] for fact_sent in fact_sents]\n fact_seqs_var = to_input_var_2d(fact_sents, vocab.src, cuda)\n\n yield {\n 'src_seq': src_seqs_var, 'src_lengths': src_lengths,\n 'fact_seq': fact_seqs_var, 'fact_lengths': fact_lengths,\n 'trg_seq': trg_seqs_var[:, :-1],\n 'target': trg_seqs_var[:, 1:],\n 'num_trg_word': num_trg_word, 'num_trg_seq': len(trg_sents)\n }", "def get_samples(self) -> McmcPtResult:", "def get_iter_data(dataset):\n num_samples = dataset.num_examples\n\n handle = dataset.open()\n features = []\n targets = []\n for i in xrange(num_samples):\n data = dataset.get_data(handle)\n features.append(data[0])\n targets.append(data[1])\n\n dataset.close(handle)\n\n targets_arr = targets[0]\n for i in xrange(1, num_samples):\n targets_arr = np.vstack((targets_arr, targets[i]))\n\n return features, targets_arr", "def batch_iter(X, *tensors, batch_size=256):\n idxs = torch.randperm(X.size(0))\n if X.is_cuda:\n idxs = idxs.cuda()\n for batch_idxs in idxs.split(batch_size):\n res = [X[batch_idxs]]\n for tensor in tensors:\n res.append(tensor[batch_idxs])\n yield res", "def get_examples(data_dir, mode, task_id, shard_id):\n file_path = get_full_filename(data_dir, mode, task_id, shard_id)\n relative_path = \"/\".join(file_path.split(\"/\")[3:])\n tf.logging.info(\"Reading file: %s\" % (file_path))\n print(relative_path)\n #client = storage.Client(projectname, credentials=credentials)\n #bucket = client.get_bucket(bucket_name)\n blob = storage_bucket.blob(relative_path)\n if not blob.exists():\n tf.logging.info(\"Path doesn't exist\")\n return None\n nq_data = extract_nq_data(file_path)\n tf.logging.info(\"NQ data Size: \" + str(len(nq_data.keys())))\n\n tf.logging.info(\"Performing entity extraction\")\n fact_extracted_data = entity_link_nq(nq_data)\n return fact_extracted_data", "def data_generator(delta=1, batch_size=32):\n while True:\n yield generate_samples(delta=delta, n=batch_size)", "def make_data_iter_fn(filename, batch_size, is_train):\n def parse_fn(example_proto):\n features = {\n 'input': tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n 'target': tf.VarLenFeature(tf.int64),\n 'input_length': tf.FixedLenFeature([1], tf.int64),\n 'target_length': tf.FixedLenFeature([1], tf.int64)\n }\n parsed_features = tf.parse_single_example(example_proto, features=features)\n\n parsed_features['input'] = tf.cast(parsed_features['input'], tf.int32)\n t = parsed_features['target']\n parsed_features['target'] = tf.SparseTensor(indices=t.indices, \n values=tf.cast(t.values, tf.int32),\n dense_shape=t.dense_shape)\n \n parsed_features['input_length'] = tf.cast(parsed_features['input_length'], tf.int32)\n parsed_features['target_length'] = tf.cast(parsed_features['target_length'], tf.int32)\n\n return parsed_features\n\n def input_fn():\n dataset = tf.data.TFRecordDataset(filename)\n dataset = dataset.map(parse_fn)\n \n if is_train:\n dataset = dataset.shuffle(500).repeat()\n \n dataset = dataset.batch(batch_size).prefetch(2)\n features = dataset.make_one_shot_iterator().get_next()\n return features, features['target']\n\n return input_fn", "def data_parallel(self, batch_size, inputs):\n inputs = list(inputs)\n\n # quick path: only one device, do not slice\n if len(self.work_devices) == 1:\n assert(self.main_device == self.work_devices[0])\n yield self.main_device, False, tuple(inputs)\n\n # slow path: multi-GPUs\n else:\n # the GPUs are not in the same group, place variables on CPU\n if self.main_device not in self.work_devices:\n yield self.main_device, True, tuple(inputs)\n\n # build the paralleled computation graph for each device\n with tf.name_scope('data_parallel') as ns:\n pass # generate a name scope to place our data slicing ops\n\n k = len(self.work_devices)\n for i, device in enumerate(self.work_devices):\n dev_inputs = []\n with tf.name_scope(ns + 'tower_gpu_{}'.format(i)):\n for inp in inputs:\n slice_len = (batch_size + k - 1) // k\n low, high = slice_len * i, slice_len * (i + 1)\n dev_inputs.append(inp[low: high])\n yield device, False, tuple(dev_inputs)", "def process(self, example):\n self.get_counter(\"examples-total\").inc()\n label = example_util.get_bytes_feature(example, _LABEL_COLUMN)[0]\n self.get_counter(\"examples-{}\".format(label)).inc()\n yield example", "def example_loader(\n data_path: str,\n index_path: typing.Union[str, None],\n description: typing.Union[typing.List[str], typing.Dict[str, str], None] = None,\n shard: typing.Optional[typing.Tuple[int, int]] = None,\n compression_type: typing.Optional[str] = None,\n) -> typing.Iterable[typing.Dict[str, np.ndarray]]:\n\n typename_mapping = {\n \"byte\": \"bytes_list\",\n \"float\": \"float_list\",\n \"int\": \"int64_list\"\n }\n\n record_iterator = tfrecord_iterator(\n data_path=data_path,\n index_path=index_path,\n shard=shard,\n compression_type=compression_type,\n )\n\n for record in record_iterator:\n example = example_pb2.Example()\n example.ParseFromString(record)\n\n yield extract_feature_dict(example.features, description, typename_mapping)", "def batch_iter(data, labels, lengths, batch_size, num_epochs):\n assert len(data) == len(labels) == len(lengths)\n # print(f'The length of the data: {len(data)} input samples')\n\n data_size = len(data)\n epoch_length = int(data_size / batch_size)\n # print(f'Total number of batches per epoch: {epoch_length}')\n\n for _ in range(num_epochs):\n for batch_num in range(epoch_length):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n\n xdata = data[start_index: end_index]\n ydata = labels[start_index: end_index]\n ldata = lengths[start_index: end_index]\n\n yield xdata, ydata, ldata", "def batch_iter(data, batch_size, shuffle=False):\n batch_num = math.ceil(len(data) / batch_size)\n index_array = list(range(len(data)))\n\n if shuffle:\n np.random.shuffle(index_array)\n\n for i in range(batch_num):\n indices = index_array[i * batch_size: (i + 1) * batch_size]\n examples = [data[idx] for idx in indices]\n\n examples = sorted(examples, key=lambda e: len(e[0]), reverse=True)\n src_sents = [e[0] for e in examples]\n tgt_sents = [e[1] for e in examples]\n\n yield src_sents, tgt_sents", "def batch_iter(data, batch_size, shuffle=False):\n batch_num = math.ceil(len(data) / batch_size)\n index_array = list(range(len(data)))\n\n if shuffle:\n np.random.shuffle(index_array)\n\n for i in range(batch_num):\n indices = index_array[i * batch_size: (i + 1) * batch_size]\n examples = [data[idx] for idx in indices]\n\n examples = sorted(examples, key=lambda e: len(e[0]), reverse=True)\n src_sents = [e[0] for e in examples]\n tgt_sents = [e[1] for e in examples]\n\n yield src_sents, tgt_sents", "def walk(self):\n data = open(self.data_file_path, 'rb')\n read_metric = globals()[\"ProtoDefinition\"].Payload()\n read_metric.ParseFromString(data.read())\n\n # One record for the whole file\n self.payload_metadata = read_metric.payloadMetadata\n self.device = read_metric.device\n\n # Get list of all *repeated* field types\n field_names = []\n for field_desc in read_metric.DESCRIPTOR.fields:\n field_name = field_desc.name\n\n if field_desc.label == field_desc.LABEL_REPEATED:\n field_names.append(field_name)\n\n # For each repeated field type, get the data and yield one item at a time\n for field_name in field_names:\n stream_samples = getattr(read_metric, field_name)\n for sample in stream_samples:\n yield self.device, sample", "def batch(data_path):\n train, _, _ = get_datasets(\n data_path=data_path,\n nb_nodes=7,\n task_type=\"classification\",\n nb_classes=2,\n split=None,\n k_fold=None,\n seed=1234,\n )\n for batch in torch.utils.data.DataLoader(\n train, shuffle=False, batch_size=25, drop_last=False\n ):\n return batch", "def next_sample(self, batch_size=1):\n\n X = []\n y = []\n\n for count in range(batch_size):\n #check for abrupt drift\n if count % self.abrupt_drift_rate == 0:\n dimfaks = [round(np.random.rand() * 4, 1) for _ in range(self.dims)]\n dimpots = [1 + round(np.random.rand() * 2) for _ in range(self.dims)]\n dimvars = [np.random.rand() * self.var for _ in range(self.dims)]\n dimmeans = [5 + np.random.rand() * 10 for _ in range(self.dims)]\n print(\"Random Polynomconcept: \", end=\"\")\n for i in range(self.dims):\n print(dimfaks[i],\" * x\", i+1, \"^\", dimpots[i], \" + \",end=\"\", sep=\"\")\n print()\n\n value = 0\n sample = []\n for i in range(self.dims):\n sample.append(np.random.normal(loc=dimmeans[i], scale=dimvars[i]))\n value += dimfaks[i] * (sample[i] ** dimpots[i])\n \n X.append(sample)\n y.append(value)\n\n self._x_idx += batch_size\n\n return (X, y)", "def batch_generator(data_frame_encoded):\n labels = data_frame_encoded[-1]\n # data = np.delete(data_frame_encoded, -1, axis=0)\n data = data_frame_encoded[:-1]\n\n num_features = len(data)\n num_batches = len(data[0])\n for i in range(num_batches):\n batch_compiled = []\n for j in range(num_features):\n if type(data[j][i]) is np.ndarray:\n batch_compiled.extend(data[j][i])\n else:\n batch_compiled.extend([data[j][i]])\n yield batch_compiled, labels[i]", "def ptb_raw_data(data_path, simple):\n\n train_path = os.path.join(data_path, \"ptb.train.txt\")\n valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n word_to_id, probs = _build_vocab(train_path)\n train_data = _file_to_word_ids(train_path, word_to_id, simple)\n valid_data = _file_to_word_ids(valid_path, word_to_id, simple)\n test_data = _file_to_word_ids(test_path, word_to_id, simple)\n return train_data, valid_data, test_data, probs", "def generate_data(self, pict_ids):\n indx = 0\n to = indx + self.chunk_size\n while indx <= len(pict_ids):\n images, errs = self.get_images_online(pict_ids[indx: to])\n if len(errs) > 0:\n # get only labels for images which were correctly loade\n img_labels = self.get_encoded_labels(\n [name for name in pict_ids[indx: to] if name not in errs])\n else:\n img_labels = self.get_encoded_labels(pict_ids[indx: to])\n # get next boundaries\n to += self.chunk_size\n indx += self.chunk_size\n if to != len(pict_ids) and (indx + self.chunk_size) > len(pict_ids):\n # chunk increase overflow, we need to get the last chunk of data, which is smaller then defined\n to = len(pict_ids)\n\n yield images, img_labels", "def generate(\n self,\n dataset: Tensor,\n labels: Tensor,\n chunk_size: int) -> Tuple[\n int, Iterator[Tuple[Tensor, Tensor]]]:", "def _train(self):\n epoch_training_time = 0\n epoch_metrics_time = 0\n self.epoch_ += 1\n for i_batch, sample_batched in enumerate(self.dataloader):\n self.global_step_ += 1\n batch_start_time = time.time()\n data_sample = sample_batched[0].to(self.device)\n\n # Get model samples, either from replay buffer or noise.\n if self.model_samples_ is None:\n self.model_samples_ = deque(\n [\n self.net_.sample_from_prior(\n data_sample.shape[0], device=self.device\n ).detach()\n ]\n )\n elif len(self.model_samples_) > self.max_replay:\n self.model_samples_.popleft()\n replay_sample = random.choices(\n self.model_samples_,\n # favor more recent samples:\n weights=list(range(1, len(self.model_samples_) + 1)),\n )[0]\n noise_sample = self.net_.sample_from_prior(\n replay_sample.shape[0], device=self.device\n )\n mask = torch.rand(replay_sample.shape[0]) < self.replay_prob\n while len(mask.shape) < len(replay_sample.shape):\n # Add extra feature-dims\n mask.unsqueeze_(dim=-1)\n\n model_sample = torch.where(\n mask.to(self.device), replay_sample, noise_sample\n )\n\n self.net_.eval()\n # Run at least one iteration\n model_sample = self.net_.sample_fantasy(\n model_sample,\n num_mc_steps=self.num_mc_steps,\n mc_dynamics=self.sampler,\n ).detach()\n\n self.model_samples_.append(model_sample)\n\n # Sanity checks:\n assert (\n data_sample.shape[1:] == self.net_.input_shape\n ), \"Data is incompatible with network.\"\n assert (\n model_sample.shape[1:] == data_sample.shape[1:]\n ), \"Model and data samples are incompatible.\"\n\n # Forward gradient:\n self.net_.train()\n self.net_.zero_grad()\n data_energy_mean = self.net_(data_sample).mean()\n model_energy = self.net_(model_sample)\n model_energy_mean = model_energy.mean()\n\n # Estimate the odds of the data's energy based on a normal fitted to\n # model samples:\n data_erf = torch.erf(\n (data_energy_mean - model_energy_mean) / model_energy.std()\n )\n\n objective = data_energy_mean - model_energy_mean\n objective.backward()\n torch.nn.utils.clip_grad.clip_grad_value_(self.net_.parameters(), 1e2)\n self.optimizer_.step()\n\n batch_training_time = time.time() - batch_start_time\n epoch_training_time += batch_training_time\n self.logger_(energy_diff=float(objective))\n self.logger_(data_erf=float(data_erf))\n\n tr_metrics_start_time = time.time()\n for callback in self.step_callbacks:\n callback(\n net=self.net_,\n data_sample=data_sample,\n model_sample=model_sample,\n epoch=self.epoch_,\n global_step=self.global_step_,\n validation=False,\n )\n tr_metrics_time = time.time() - tr_metrics_start_time\n epoch_metrics_time += tr_metrics_time\n if self.verbose:\n print(\n f\"on epoch {self.epoch_}, batch {i_batch}, data erf: {data_erf}, objective: {objective}\"\n )\n print(f\"model energy: {model_energy_mean} +- {model_energy.std()}\")\n print(f\"data energy: {data_energy_mean}\")\n print(\n f\"training time: {batch_training_time:0.3f}s, metrics time: {tr_metrics_time:0.3f}s\"\n )\n means = self.logger_.means()\n if self.verbose:\n print(f\"on epoch {self.epoch_}\")\n for k, v in means.items():\n print(f\"{k}: {v}\")\n self.logger_.flush()\n means[\"loss\"] = energy_model.utils.constraints.add_soft_constraint(\n means[\"loss_ais\"], means[\"data_erf\"], lower_bound=-1\n )\n return means", "def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]", "def get_batches(self, batch_size):\n if self.data.shape[0] % batch_size != 0:\n raise RuntimeError('num of data tuples is not a multiple of batch size')\n num_batch = self.data.shape[0] // batch_size\n for b in range(num_batch):\n yield self.data[b*batch_size:(b+1)*batch_size, :], \\\n self.target[b*batch_size:(b+1)*batch_size, :]", "def ptb_producer(raw_data, batch_size, num_steps, name=None):\n\twith tf.name_scope(name, \"PTBProducer\", [raw_data, batch_size, num_steps]):\n\t\traw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n\n\t\tdata_len = tf.size(raw_data)\n\t\tbatch_len = data_len // batch_size\n\t\tdata = tf.reshape(raw_data[0: batch_size * batch_len],\n\t\t [batch_size, batch_len])\n\n\t\tepoch_size = (batch_len - 1) // num_steps\n\t\tassertion = tf.assert_positive(\n\t\t\tepoch_size,\n\t\t\tmessage=\"epoch_size == 0, decrease batch_size or num_steps\")\n\t\twith tf.control_dependencies([assertion]):\n\t\t\tepoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n\t\ti = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n\t\tx = tf.strided_slice(data, [0, i * num_steps],\n\t\t [batch_size, (i + 1) * num_steps])\n\t\tx.set_shape([batch_size, num_steps])\n\t\ty = tf.strided_slice(data, [0, i * num_steps + 1],\n\t\t [batch_size, (i + 1) * num_steps + 1])\n\t\ty.set_shape([batch_size, num_steps])\n\t\treturn x, y", "def _next_test(self):\n idx = self.it\n self.it = (self.it + 1) % self.n_examples\n\n if self.render_path:\n target_view = data_types.Views(\n rays=jax.tree_map(lambda r: r[idx], self.render_rays),)\n else:\n target_view = data_types.Views(\n rays=jax.tree_map(lambda r: r[idx], self.rays), rgb=self.images[idx])\n\n #--------------------------------------------------------------------------------------\n # Get the reference data\n batch_near_cam_idx = self.sorted_near_cam[idx]\n ref_images = self.train_images[batch_near_cam_idx]\n ref_images = ref_images.reshape(ref_images.shape[0], self.h, self.w, 3)\n\n ref_cameratoworld = self.train_camtoworlds[batch_near_cam_idx]\n ref_worldtocamera = self.train_worldtocamera[batch_near_cam_idx]\n\n #--------------------------------------------------------------------------------------\n # Replicate these so that they may be distributed onto several devices for\n # parallel computaion.\n l_devices = jax.local_device_count()\n reference_views = data_types.ReferenceViews(\n rgb=np.tile(ref_images, (l_devices, 1, 1, 1)),\n ref_worldtocamera=np.tile(ref_worldtocamera, (l_devices, 1, 1)),\n ref_cameratoworld=np.tile(ref_cameratoworld, (l_devices, 1, 1)),\n intrinsic_matrix=np.tile(self.intrinsic_matrix[None, :],\n (l_devices, 1, 1)),\n idx=np.tile(batch_near_cam_idx[None, :], (jax.local_device_count(), 1)),\n )\n\n return_batch = data_types.Batch(\n target_view=target_view, reference_views=reference_views)\n\n return return_batch", "def get_dataset_tfrecords(self, device_idx = 0):\n tf_records = os.path.join(self._data_dir, self._data_file)\n f_metadata = open(tf_records + '.metadata', 'r', encoding= 'utf-8')\n metadata = json.load(f_metadata)\n img_shape = metadata['img_shape']\n dtype = 'tf.' + metadata['d_type']\n f_metadata.close()\n\n data_iter = self._service.get_image_from_tfrecords(\n filenames= [tf_records],\n img_shape= img_shape,\n dt= eval(dtype))\n\n LOGGER.debug('----- TFRECORDS DATA ITER {} -----'.format(data_iter))\n\n return data_iter", "def __iter__(self):\n for batch in self.data:\n batch_size = len(batch)\n X, e1, e2, dist1, dist2, e1_pos, e2_pos, y = list(zip(*batch))\n\n x_len = max(len(x) for x in X)\n x_ids = torch.LongTensor(batch_size, x_len).fill_(0)\n dist1_padded = torch.LongTensor(batch_size, x_len).fill_(0)\n dist2_padded = torch.LongTensor(batch_size, x_len).fill_(0)\n for i, doc in enumerate(X):\n x_ids[i, :len(doc)] = torch.LongTensor(doc)\n\n dist1_padded[i, :len(doc)] = torch.LongTensor(dist1[i])\n dist1_padded[i, len(doc):] = torch.LongTensor([pos(e1_pos[i][1] - idx) for idx, _ in enumerate(x_ids[i][len(doc):], start=len(doc))])\n\n dist2_padded[i, :len(doc)] = torch.LongTensor(dist2[i])\n dist2_padded[i, len(doc):] = torch.LongTensor([pos(e2_pos[i][1] - idx) for idx, _ in enumerate(x_ids[i][len(doc):], start=len(doc))])\n\n e1_tensor = torch.LongTensor(e1)\n e2_tensor = torch.LongTensor(e2)\n\n y_tensor = torch.LongTensor(y)\n\n if self.gpu:\n x_ids = x_ids.pin_memory()\n e1_tensor = e1_tensor.pin_memory()\n e2_tensor = e2_tensor.pin_memory()\n dist1_padded = dist1_padded.pin_memory()\n dist2_padded = dist2_padded.pin_memory()\n y_tensor = y_tensor.pin_memory()\n\n yield (x_ids, e1_tensor, e2_tensor, dist1_padded, dist2_padded, y_tensor)", "def split_and_load(batch_data, num_gpus):\n return [batch_data[i].data[0] for i in range(num_gpus)], \\\n [batch_data[i].label[0].as_in_context(mx.gpu(i)) for i in range(num_gpus)]", "def get_batch(self, all_samples, all_labels, batch_size):\n\n # Create a Tensor dataset object for the samples and labels\n samples_dataset = tf.data.Dataset.from_tensor_slices(all_samples)\n labels_dataset = tf.data.Dataset.from_tensor_slices(all_labels)\n\n # Combine the samples dataset with the labels dataset\n combined_dataset = tf.data.Dataset.zip((samples_dataset, labels_dataset))\n\n # Prevent that you run out of samples by repeating the dataset once\n combined_dataset = combined_dataset.repeat()\n\n # Shuffle the data\n combined_dataset = combined_dataset.shuffle(batch_size)\n\n # Create batches of your dataset\n combined_dataset = combined_dataset.batch(batch_size)\n\n # Initialize the dataset for TensorFlow\n iterator = combined_dataset.make_initializable_iterator()\n\n # Get the batch samples and labels operations\n batch_samples, batch_labels = iterator.get_next()\n\n # Convert the samples and labels to type float32 to use them in the convolutional layer\n batch_samples = tf.cast(batch_samples, tf.float32)\n batch_labels = tf.cast(batch_labels, tf.float32)\n\n # Make the iterator object global to initialize it from another function\n self.iter_initializer = iterator.initializer\n\n return batch_samples, batch_labels", "def batch_iter(data: Union[np.ndarray, List[Any]], labels: Union[np.ndarray, List[Any]],\n batch_size: int, num_epochs: int) -> Tuple[Iterable[Any], Iterable[Any]]:\n assert len(data) == len(labels)\n\n for _ in range(num_epochs):\n start_index = 0\n while start_index < len(data) - 1:\n end_index = min(len(data) - 1, start_index + batch_size)\n\n xdata = data[start_index: end_index]\n ydata = labels[start_index: end_index]\n\n yield xdata, ydata\n\n start_index += batch_size", "def ptb_raw_data(data_path=None):\n\n train_path = os.path.join(data_path, \"ptb.train.txt\")\n valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n word_to_id, unigrams = _build_vocab(train_path)\n train_data = _file_to_word_ids(train_path, word_to_id)\n valid_data = _file_to_word_ids(valid_path, word_to_id)\n test_data = _file_to_word_ids(test_path, word_to_id)\n vocabulary = len(word_to_id)\n return train_data, valid_data, test_data, vocabulary, unigrams", "def _generate_examples(self,\n split: Text = 'train'\n ) -> Iterator[Tuple[Text, Dict[Text, Any]]]:\n with tf.io.gfile.GFile(self.splits[split]) as split_file: # pytype: disable=attribute-error # gen-stub-imports\n for i, img_class_line in enumerate(split_file.read().split('\\n')):\n if not img_class_line:\n continue\n key = f'{self.builder_config.name}_{split}_{i:08d}'\n\n example_path, example_class = img_class_line.split(' ')\n example_fullpath = os.path.join(self.img_path, example_path) # pytype: disable=attribute-error # gen-stub-imports\n\n yield key, {'image': example_fullpath, 'label': int(example_class)}" ]
[ "0.6978561", "0.6902418", "0.63087994", "0.61956096", "0.61338556", "0.6096684", "0.60834736", "0.60573965", "0.6056362", "0.6045705", "0.60360193", "0.6022146", "0.601624", "0.6003096", "0.59680426", "0.5925938", "0.59207135", "0.5913195", "0.59102833", "0.5889849", "0.5875031", "0.5844307", "0.5833486", "0.58319557", "0.5826164", "0.5775194", "0.5771107", "0.57702017", "0.57584375", "0.57518923", "0.57464427", "0.57387674", "0.57294226", "0.57223725", "0.57083106", "0.5706558", "0.5703361", "0.5697862", "0.5695406", "0.5692489", "0.5689895", "0.5688804", "0.5682997", "0.5664302", "0.5660681", "0.5660676", "0.5626888", "0.56234837", "0.5621027", "0.5607332", "0.56012803", "0.55983645", "0.55938977", "0.558163", "0.55807215", "0.556631", "0.5560526", "0.55605227", "0.5560514", "0.55488074", "0.5545988", "0.5545594", "0.5541786", "0.5531048", "0.55255115", "0.5522568", "0.5521005", "0.5514893", "0.55074185", "0.5487041", "0.54758084", "0.5470946", "0.54688543", "0.54672897", "0.5460959", "0.5459542", "0.54583365", "0.5452571", "0.54476684", "0.5446512", "0.5446512", "0.5445719", "0.5442414", "0.5439692", "0.54383284", "0.5433966", "0.54328173", "0.54275054", "0.5424687", "0.5421767", "0.5420813", "0.54196346", "0.54176784", "0.54162997", "0.5409785", "0.54094374", "0.5408948", "0.5405894", "0.54006064", "0.5396441" ]
0.54554814
77
Return the notify service.
def get_service( hass: HomeAssistant, config: ConfigType, discovery_info: DiscoveryInfoType | None = None, ) -> RocketChatNotificationService | None: username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) url = config.get(CONF_URL) room = config.get(CONF_ROOM) try: return RocketChatNotificationService(url, username, password, room) except RocketConnectionException: _LOGGER.warning("Unable to connect to Rocket.Chat server at %s", url) except RocketAuthenticationException: _LOGGER.warning("Rocket.Chat authentication failed for user %s", username) _LOGGER.info("Please check your username/password") return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_service(hass, config, discovery_info=None):\n\n sender_nr = config[CONF_SENDER_NR]\n recp_nrs = config[CONF_RECP_NR]\n signal_cli_rest_api_url = config[CONF_SIGNAL_CLI_REST_API]\n\n signal_cli_rest_api = SignalCliRestApi(signal_cli_rest_api_url, sender_nr)\n\n return SignalNotificationService(recp_nrs, signal_cli_rest_api)", "def GetNotifyEvent(self):\r\n \r\n return self.notify", "def GetNotifyEvent(self):\r\n \r\n return self.notify", "def GetNotifyEvent(self):\r\n \r\n return self.notify", "def get_service(hass, config, discovery_info=None):\n image_dir = hass.config.path(config[CONF_IMAGE_DIR])\n return TimeBoxNotificationService(config[CONF_MAC],\n image_dir)", "def notifier(self):\n\n return self.config.notifier(self._notifier)", "def get_notifications() -> INotifications:\n notifiers = {\"django\": DjangoNotifier, \"govuk-notify\": GovUKNotifyEmail}\n notifier = getattr(settings, \"NOTIFIER\", \"django\")\n notifier_class = notifiers[notifier]\n return notifier_class()", "def get_service(hass, config):\n return TwitterNotificationService(config[CONF_CONSUMER_KEY],\n config[CONF_CONSUMER_SECRET],\n config[CONF_ACCESS_TOKEN],\n config[CONF_ACCESS_TOKEN_SECRET])", "def get_service(self):\n return self.__service", "def get_service(hass, config):\n\n if not validate_config(config,\n {DOMAIN: ['sender',\n 'password',\n 'recipient']},\n _LOGGER):\n return None\n\n try:\n SendNotificationBot(config[DOMAIN]['sender'] + '/home-assistant',\n config[DOMAIN]['password'],\n config[DOMAIN]['recipient'],\n '')\n except ImportError:\n _LOGGER.exception(\n \"Unable to contact jabber server.\"\n \"Please check your credentials.\")\n\n return None\n\n return XmppNotificationService(config[DOMAIN]['sender'],\n config[DOMAIN]['password'],\n config[DOMAIN]['recipient'])", "def get_service():\n if not hasattr(g, 'service'):\n g.service = Service()\n return g.service", "def create_and_register_service(self, server):\n service = GrpcNotifier(notifier_api=notifier,\n service_config=self.config)\n notifier_pb2_grpc.add_NotifierServicer_to_server(service, server)\n LOGGER.info('Service %s created and registered', service)\n return service", "def get_notification():\n condition.acquire()\n if not notifications:\n ret = condition.wait(2)\n if not ret:\n condition.release()\n raise TimeoutError(\"Timed out while waiting for notification\")\n\n notice = notifications.pop(0)\n condition.release()\n return notice", "def notification(self, sid):\r\n return notifications.Notification(self, sid)", "def service(self):\n return self._service", "def service(self):\n return self._service", "def get_notifier(publisher_id):\n global NOTIFIER\n return NOTIFIER.prepare(publisher_id=publisher_id)", "def notifier(self, immediately_reset=True, name=None):\n with ops.name_scope(name, \"notify_notification\",\n [self._handle]) as name:\n return gen_resource_variable_ops.notify_notification(\n self._handle, immediately_reset=immediately_reset, name=name)", "def service(self) -> interface.BaseService:\n for protocol in DEFAULT_PRIORITIES:\n service = self._config.get_service(protocol)\n if service:\n return service\n\n raise RuntimeError(\"no service (bug)\")", "async def async_get_service(\n hass: HomeAssistant,\n config: ConfigType,\n discovery_info: DiscoveryInfoType | None = None,\n) -> FlockNotificationService:\n access_token = config.get(CONF_ACCESS_TOKEN)\n url = f\"{_RESOURCE}{access_token}\"\n session = async_get_clientsession(hass)\n\n return FlockNotificationService(url, session)", "def notification(self):\n return self._notification", "def mock_clicksend_tts_notify():\n with patch(\n \"homeassistant.components.clicksend_tts.notify.get_service\", autospec=True\n ) as ns:\n yield ns", "def notification(message: str):\n # initialize the notification\n notify2.init(\"notifywhenLOAD\")\n notifyObj = notify2.Notification(\"Emergency Alert!\", message)\n notifyObj.set_timeout(12000)\n return notifyObj", "def service_instance(self):\n return self.service_class(self)", "def async_get_service_discovery(hass, discovery_info):\n notification_devices = []\n for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:\n device = hass.data[DATA_KNX].xknx.devices[device_name]\n notification_devices.append(device)\n return (\n KNXNotificationService(notification_devices) if notification_devices else None\n )", "def service(self) -> Optional['outputs.ServiceReferencePatch']:\n return pulumi.get(self, \"service\")", "def notify(self, **kwargs):\n return self.send(kwargs)", "def notify(self, **kwargs):\n return self.send(kwargs)", "def notification():\n # pop-up notification\n notifies = NotifyModel.get_notify(current_user.get_id())\n return jsonify(notifications=notifies)", "def notifications(self):\r\n return notifications.Notifications(self)", "def notifications(self):\r\n return notifications.Notifications(self)", "def getService(self):\n return self.serviceClass", "def _create_notify(knx_module: XKNX, config: ConfigType) -> XknxNotification:\n return XknxNotification(\n knx_module,\n name=config[CONF_NAME],\n group_address=config[CONF_ADDRESS],\n )", "def notification(self, notification_id):\r\n return Notification(self, notification_id)", "def sd_notify(state, logger, unset_environment=False):\n\n\n addr = os.environ.get('NOTIFY_SOCKET')\n if addr is None:\n # not run in a service, just a noop\n return\n try:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM | socket.SOCK_CLOEXEC)\n if addr[0] == '@':\n addr = '\\0' + addr[1:]\n sock.connect(addr)\n sock.sendall(state.encode('utf-8'))\n except:\n logger.debug(\"Exception while invoking sd_notify()\", exc_info=True)\n finally:\n if unset_environment:\n os.environ.pop('NOTIFY_SOCKET')\n sock.close()", "def notifications(id):\n return core.query(schema.notify, id)", "def get(self, notifier_id):\n return self.registry.get(notifier_id)", "def get_service(self):", "def notify(self, notification):\n topic = 'notify.' + notification['subject']\n payload = serializer.dumps(notification, use_bin_type=True)\n self.socket.send_string(topic, flags=zmq.SNDMORE)\n self.socket.send(payload)\n return self.socket.recv_string()", "def notify(message):\n context = nova.context.get_admin_context()\n message['method'] = 'notify'\n priority = message.get('priority',\n FLAGS.default_notification_level)\n priority = priority.lower()\n rpc.cast(context, FLAGS.notification_topic, {'method':'notify','args':{'message':message}})", "def notify(self):\n\n def remind():\n \"\"\"\n this function shows a pop-up using windows notification\n \"\"\"\n ntftion.notify('reminder', f\"{self.notification}:\\n{self.work_name}\\n{self.work_datetime.hour}: \"\n f\"{self.work_datetime.minute} \", app_icon='reminder.ico', timeout=3)\n\n self.eisenhower_priority()\n if self.priority:\n while dt.now().day <= self.time_ntf.day and self.status != \"done\":\n if self.priority == 1 and dt.now().time() >= self.time_ntf.time():\n remind()\n time.sleep(5*60)\n\n elif (self.priority == 2) and ((dt.now().hour == self.time_ntf.hour)\n and (dt.now().time().minute == self.time_ntf.time().minute)):\n remind()\n break\n elif self.priority == 3 and dt.now().time().hour == 18:\n remind()\n time.sleep(24 * 3600)\n elif self.priority == 4 and dt.now().weekday() == 6:\n remind()\n time.sleep(7 * 24 * 3600)\n else:\n pass", "def get_service():\r\n creds = None\r\n # The file token.json stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('/var/jail/home/team28/final_project/python/EmailApp/token.json'):\r\n creds = Credentials.from_authorized_user_file('/var/jail/home/team28/final_project/python/EmailApp/token.json', SCOPES)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file('/var/jail/home/team28/final_project/python/EmailApp/credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('/var/jail/home/team28/final_project/python/EmailApp/token.json', 'w') as token:\r\n token.write(creds.to_json())\r\n\r\n service = build('gmail', 'v1', credentials=creds)\r\n return service", "def service(self):\n return self.__stackdriver", "def alert_new_service_notification(hirer, worker, service):\n\n domain = Site.objects.get_current().domain\n url = \"http://\" + domain + \"/worker/\"\n\n message = loader.get_template(\n 'alerts/new_service_notification.txt').render(\n {'worker': worker, 'hirer': hirer, 'service': service, 'url':url})\n\n return message", "def events_service() -> Events.Service:\n return Container.get_service('events_service')", "def notifier(self, name):\n\n # Look up the notifier\n notifier = self.notifiers.get(name, self.notifiers.get(None))\n\n # Return the driver\n return notifier.driver", "def service(self) -> str:\n return pulumi.get(self, \"service\")", "def on_notify(self, name):\r\n pass", "def service(self) -> Optional['outputs.ServiceReference']:\n return pulumi.get(self, \"service\")", "async def notify(self, message: str) -> None:\n\n pass", "def get_notifications(self):\n return self.ws.events['notifications']", "def get_service(self):\n if 'service' in self._data:\n return self._data['service']\n else:\n raise ClskError('Network %s does not have service confgiured' % \n self.name)", "def service(self):\n pass", "def email_service_used(cls):\n return EMAIL_SERVICE_MAILJET", "def create_gunicorn_worker():\n note_worker = NotificationWorker(\n notification_queue, poll_period_seconds=10, reservation_seconds=30, retry_after_seconds=30\n )\n worker = GunicornWorker(__name__, app, note_worker, True)\n return worker", "def notify(cls, state):\r\n return PlatformMessage(method=\"__reply__\", kwargs={\"state\": state})", "def notify_count(self):\n return self._notify_count", "def clean_notify(self):\n return self.cleaned_data.get(self.Fields.NOTIFY, self.NotificationTypes.DEFAULT)", "def svn_client_ctx_t_notify_func_get(svn_client_ctx_t_self): # real signature unknown; restored from __doc__\n pass", "def getServiceSupplier(self):\n return self._ServiceSupplier", "def get_service(self, name):\n return self.app.container.get(name)", "def service(self, service_id):\r\n return services.Service(self, service_id)", "def base_service(self):\n return self", "def _get_k8s_service(self):\n return kubernetes.client.V1Service(\n api_version=\"v1\",\n kind=\"Service\",\n metadata=kubernetes.client.V1ObjectMeta(name=self._k8s_service_name),\n spec=kubernetes.client.V1ServiceSpec(\n selector={\"app.kubernetes.io/name\": self.app.name},\n ports=[\n kubernetes.client.V1ServicePort(\n name=\"tcp-{}\".format(2222),\n port=2222,\n target_port=2222,\n )\n ],\n ),\n )", "def notification_config(self) -> 'outputs.NotificationConfigResponse':\n return pulumi.get(self, \"notification_config\")", "def get_notification(self, id):\n url = \"https://api.imgur.com/3/notification/{0}\".format(id)\n resp = self._send_request(url)\n return Notification(resp, self)", "def notify(self):\n return _MethodCall(self._proto)", "def __get_service(self):\n \n service_basic_info = self.__get_service_basicinfo()\n contact_info = self.__get_service_contactinfo()\n\n service = service_basic_info\n service['contact_info'] = contact_info\n return service", "def service(self):\n payload = {\n 'time_zone': self.timezone,\n 'query': self._service_key,\n 'include[]': 'escalation_policies'\n }\n r = self._get_url(payload, 'services')\n return r['services'][0]", "def getService(name):\n return Service.getService(name)", "def service_signal(self, service):\n signal = \"{}_{}_{}\".format(DOMAIN, service, self.unique_id.replace(\".\", \"_\"))\n return signal", "def notify_processing(self, **kwargs):\n return self.notify(\"notify_processing\", **kwargs)", "def get_notifications(self):\n res = self.get_object(\"/integrationServices/v3/notification\")\n return res.get(\"notifications\", [])", "def alert_service_notification(user, service):\n\n message = loader.get_template(\n 'alerts/service_notification.txt').render(\n {'user': user, 'service': service})\n\n return message", "def staff_grading_service():\r\n global _service\r\n if _service is not None:\r\n return _service\r\n\r\n if settings.MOCK_STAFF_GRADING:\r\n _service = MockStaffGradingService()\r\n else:\r\n _service = StaffGradingService(settings.OPEN_ENDED_GRADING_INTERFACE)\r\n\r\n return _service", "def _get_lsp_config_notify_isis(self):\n return self.__lsp_config_notify_isis", "def GetCloudPubSubService(version):\n credentials = _GetCredentials()\n\n service = build('pubsub', version, credentials=credentials)\n\n return service", "async def find_notification(db_session: Session, notification_id: int):\n notification = await NotificaitonCRUD.find_notification_by_id(db_session, notification_id=notification_id)\n return notification", "def notify_new(self, **kwargs):\n return self.notify(\"notify_new\", **kwargs)", "def notify(cls, self, message):\n pass", "def _notify(title, message, icon=\"dialog-error\"):\n try:\n import pynotify\n except ImportError:\n return\n pynotify.init(\"moya-doc\")\n n = pynotify.Notification(title, message, icon)\n n.show()", "def get_redis_server():\n return redis_server", "def service(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service\")", "def queue(self):\n return self._notifications", "def get_notifiers(self):\n _notifiers = []\n for notifier in (self.notifiers or\n settings.ACTIVITIES_DEFAULT_NOTIFIERS):\n if isinstance(notifier, str):\n notifier = notifier_registry.get(notifier)\n else:\n notifier = notifier_registry.get_or_register(notifier)\n _notifiers.append(notifier)\n return _notifiers", "def get_service():\n creds = None\n\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n\n service = build('gmail', 'v1', credentials=creds)\n\n return service", "def service_client(self):\n\n return self._service_client", "def service(self, block, service_name):\n declaration = block.service_declaration(service_name)\n if declaration is None:\n raise NoSuchServiceError(f\"Service {service_name!r} was not requested.\")\n service = self._services.get(service_name)\n if service is None and declaration == \"need\":\n raise NoSuchServiceError(f\"Service {service_name!r} is not available.\")\n return service", "def service(self) -> BaseService:", "def Get():\n return ServiceConfig() # Singleton decorator ensures there's only one", "def _get_service_instance(self):\n try:\n smart_stub = SmartStubAdapter(\n host=self._host,\n port=int(self._port),\n sslContext=self.sslContext,\n connectionPoolTimeout=0\n )\n session_stub = VimSessionOrientedStub(\n smart_stub,\n VimSessionOrientedStub.makeUserLoginMethod(self._user, self._password))\n service_instance = vim.ServiceInstance('ServiceInstance', session_stub)\n\n # Ensure connection to server is closed on program exit\n atexit.register(Disconnect, service_instance)\n return service_instance\n except vmodl.MethodFault as error:\n logger.error(f\"Caught vmodl fault : {error.msg}\")\n raise", "def service(self) -> Optional['DistributionConfigurationTargetContainerRepositoryService']:\n return pulumi.get(self, \"service\")", "def get_service(self,name):\n\t\t#forma segura de obtener un servicio\n\t\ttry:\n\t\t\treturn self._services[name]\n\t\texcept Exception:\n\t\t\treturn None", "def notifies(self) -> Iterator[Notify]:\n while 1:\n with self.lock:\n ns = self.wait(notifies(self.pgconn))\n enc = self.client_encoding\n for pgn in ns:\n n = Notify(\n pgn.relname.decode(enc), pgn.extra.decode(enc), pgn.be_pid\n )\n yield n", "def _get_service(self, service_name):\n if self._service:\n return self._service\n res = self._cc.services().get_by_name(service_name, name='label')\n self._service = res.resource\n return self._service", "def get_product_caching_service():\n return APP.config['ProductCachingService']", "def stop_notify(self, bus):\n chrc = bus.get(BLUEZ_SVC_NAME, self.chrc_path)\n try:\n chrc.StopNotify()\n except Exception as err:\n print(\"Unable to stop notifying\")", "def notification_preference(self) -> Optional[Sequence['outputs.NotificationPreferenceResponse']]:\n return pulumi.get(self, \"notification_preference\")", "def getServices(self):\n pass", "def _service_task(self):\n pass" ]
[ "0.6690221", "0.6181973", "0.6181973", "0.6181973", "0.6116812", "0.60631627", "0.6032626", "0.6008227", "0.5919631", "0.5903544", "0.5897824", "0.5842087", "0.5824085", "0.5796516", "0.5742724", "0.5742724", "0.57323897", "0.5706646", "0.56921333", "0.5666572", "0.56374615", "0.5550773", "0.5545915", "0.54989165", "0.54889065", "0.5470254", "0.54621744", "0.54621744", "0.5461941", "0.5428253", "0.5428253", "0.5416206", "0.5379972", "0.53740853", "0.532917", "0.5312271", "0.52949035", "0.52555865", "0.522415", "0.5202326", "0.51606053", "0.5152428", "0.51523", "0.51521325", "0.51474273", "0.51147586", "0.5109041", "0.5103518", "0.50573665", "0.5054968", "0.50428915", "0.50369984", "0.50332624", "0.5030604", "0.5000233", "0.4996501", "0.49931353", "0.4992004", "0.49758524", "0.49592012", "0.49576297", "0.4943879", "0.49431112", "0.49408445", "0.49387828", "0.49105144", "0.48932043", "0.48907483", "0.4886623", "0.48839685", "0.4881922", "0.48729593", "0.48591846", "0.48212424", "0.4776841", "0.477242", "0.47595456", "0.47542852", "0.47527432", "0.47388282", "0.47383308", "0.47143722", "0.4709943", "0.46975902", "0.4697158", "0.46961153", "0.4687803", "0.46787015", "0.46767622", "0.4671845", "0.46567246", "0.4653364", "0.4648109", "0.46442217", "0.4638939", "0.46364453", "0.4632895", "0.4629147", "0.4616614", "0.46103522" ]
0.5747234
14
Send a message to Rocket.Chat.
def send_message(self, message="", **kwargs): data = kwargs.get(ATTR_DATA) or {} resp = self._server.chat_post_message(message, channel=self._room, **data) if resp.status_code == HTTPStatus.OK: if not resp.json()["success"]: _LOGGER.error("Unable to post Rocket.Chat message") else: _LOGGER.error( "Incorrect status code when posting message: %d", resp.status_code )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def send_message(self, message):\n \n msgPacket = serverbound.play.ChatPacket()\n msgPacket.message = message\n self.connection.write_packet(msgPacket)", "def send(self, msg):\n self.message('Me', msg)", "def send(self, message):\n _check_message_type(message=message)\n response = requests.post(\n self._server_url + _SEND_URL,\n data={\"id\": self._chat_id, \"msg\": message}\n )", "def sendmessage(user,roomid):\n message = request.form['message']\n channel.send_message(user+roomid,message)", "def sendMsg(self, chat, msg):\n try:\n self.chats[chat].SendMessage(msg)\n return \"Message sent\\n\"\n except KeyError:\n raise RuntimeError(\"No chat %s\" % chat)", "def send_chat_message(self,\n action: str,\n json_data: Any\n ) -> None:\n\n data = {\n 'from': self.player.player_name,\n 'message': json_data['data']['message']\n }\n self.send_all(\n message=UtilityModule.generate_response(\n action=action,\n code=200,\n data=data\n )\n )", "def message(self, msg):\n if msg['type'] in ('chat', 'normal'):\n msg.reply(\"Thanks for sending\\n%(body)s\" % msg).send()", "def send(self, msg: str):\n\t\tself.client.send(msg.encode())", "def send_message(self, message: str):\n self.client.chat_postMessage(\n channel=f\"@{self.username}\", text=message,\n )", "async def chat_message(self, event):\n await self.send(\n {'type': \"websocket.send\",\n 'text': event['response_data']}\n )", "def send_chat_message(self, channel, message):\r\n self._send(\"PRIVMSG #{0} :{1}\".format(channel, message))", "def send(self, message, sender):\n chatclient.receive_chat_message(message, sender)\n return {}", "def sendmessage(user,gameid):\n message = request.form['message']\n channel.send_message(user+gameid,message)", "def send_message(self, chat_id, text):\n self.updater.bot.sendMessage(chat_id=chat_id, text=text)\n log.info(\"Send msg @%s: %s..\", chat_id, text[:20])", "def chat(sock, msg):\r\n message = \"PRIVMSG {} :{}\\r\\n\".format(cfg.CHAN, msg)\r\n #print(\"Sending: \"+message)\r\n sock.send(message.encode(\"utf-8\"))", "def send_chat(self, text, sender, target, whisper=False):\n self.connection.send({'type': 'chat', 'sender': sender, 'target': target,\n 'text': text, 'whisper': whisper})", "def sendchat(self, the_id, msg):\r\n the_id = Client.toroomid(the_id)\r\n self.tx_cmd(FCTYPE.CMESG, the_id, 0, 0, msg)\r\n #@TODO - Emote encoding\r", "def send(self, message):\n if self.connection:\n self.connection.send(message)", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n sleep(self.m_to)\n self.conn.send(msg)", "async def send_message(self, message: dict) -> None:\n await self.client.chat_postMessage(channel=self.channel_id, **message)", "async def chat_message(self, event):\n message = event['message']\n await self.send_json({\n 'message': message\n })", "def sendMessage(self, msg):\n # Socket Object\n self.sock.connect((self.host, self.port))\n self.sock.send(msg)\n self.sock.close()", "def send_message(self, message, user, msg_type=MSG_TYPE_MESSAGE):\n final_msg = {'room': str(self.id), 'message': message, 'username': user.username, 'msg_type': msg_type}\n\n # Send out the message to everyone in the room\n self.websocket_group.send(\n {\"text\": json.dumps(final_msg)}\n )", "def send_message(userid):\n\tsc.api_call(\n\t\t\"chat.postMessage\",\n\t\tchannel=userid,\n\t\ttext=\"Hey there, just wanted to remind you to join <#CQCKS8UN6|secret-snowflake-fa19> by Wednesday night, if you want to participate in Secret Santa this year. It will be lots of fun!\",\n\t\tusername=\"Reminder\",\n\t\ticon_emoji=\":santa:\"\n\t)", "def send_message(self, message):\n pass", "def send(self):\n url = (\"https://chatbase.com/api/messages?api_key=%s\" % self.api_key)\n return requests.post(url,\n data=self.to_json(),\n headers=Message.get_content_type())", "def send(self, message):\n self.sock.send(message)", "def send(self):\n url = \"https://chatbase.com/api/message\"\n return requests.post(url,\n data=self.to_json(),\n headers=Message.get_content_type())", "def send(self, msg):\n self.__sock.send(msg)", "def send_message(self, channel, text):\n if not channel:\n return\n self.post('chat.postMessage', data={\"channel\": channel, \"text\": text})", "def send(self, msg):\n return self._channel_action(msg, 1)", "def send(message):\n\tmessage = message.encode()\n\tconn.send(message)", "async def chat_message(self, event):\n await self.send_json(\n return_value(\n ACTION_MESSAGE,\n event['label'],\n event['username'],\n MSG_MESSAGE,\n event['message']\n )\n )", "def send(self, message):\n pass", "def send_msg(self, match_id, msg):\n endpoint = '/user/matches/%s' % match_id\n params = {\n \"message\": msg\n }\n return self.post_request(endpoint, params)", "def telegram_send(message):\n\trequests.get(\"https://api.telegram.org/bot\" + TELEGRAM_TOKEN + \"/sendMessage?chat_id=\" + my_telegram_id + \"&text={}\".format(message))", "def send_message(self, to, message):\n\t\tmessage_dict = {\n\t\t\tACTION: MESSAGE,\n\t\t\tSENDER: self.username,\n\t\t\tDESTINATION: to,\n\t\t\tTIME: time.time(),\n\t\t\tMESSAGE_TEXT: message\n\t\t}\n\t\tclient_log.debug(f'Сформирован словарь сообщения: {message_dict}')\n\t\t# Необходимо дождаться освобождения сокета для отправки сообщения\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, message_dict)\n\t\t\tself.process_server_ans(get_message(self.transport))\n\t\t\tclient_log.info(f'Отправлено сообщение для пользователя {to}')", "def send_msg(\n self,\n msg: str,\n peer_id: int\n ) -> NoReturn:\n self.call_method(\n 'messages.send',\n dict(\n message=msg, peer_id=peer_id,\n disable_mentions=1, random_id=0))", "def send_message(self, message):\n source_guid = str(uuid.uuid1())\n date = time.strftime(\"%H:%M:%S\")\n self.api.send_message(\n self.conversation_type,\n self.cid,\n source_guid,\n message[:1000]\n )\n if self.api.send_message(self.conversation_type, self.cid, source_guid, message):\n self.append_message(source_guid, 'me', date, message[:1000])\n if len(message) > 1000:\n self.send_message(message[1000:])", "def send_message(channel, message):\n slack_client = get_client()\n slack_client.chat_postMessage(channel=channel, text=message, as_user=True)", "def send_to_room(self, message, room_name):\r\n room = self.get_room(room_name)\r\n\r\n if room is not None:\r\n room.send_message(message)", "def whisper(sock, user, msg):\r\n chat(sock, \"/w {} {}\".format(user, msg))", "def send_message(self, message):\r\n\t\tself.__tcpSocket.write(message.encode('utf8'))", "def send_message(self, chat, message, **kwargs):\n\n payload = {\n 'chat_id': chat.chat_id,\n 'text': message\n }\n\n if 'parse_mode' in kwargs:\n payload['parse_mode'] = 'Markdown'\n if 'disable_web_page_preview' in kwargs:\n payload['disable_web_page_preview'] = True\n if 'reply_to_message_id' in kwargs:\n payload['reply_to_message_id'] = kwargs['reply_to_message_id']\n if 'reply_markup' in kwargs:\n payload['reply_markup'] = kwargs['reply_markup'].as_dict()\n\n r = requests.post(self.url + 'sendMessage', json=json.dumps(payload))\n return r", "async def send(self, message):", "def send_message(self, message:str):\r\n msg_send = message.encode()\r\n self.server_connection.send(msg_send)", "def send(self, msg):\n with self._send_lock:\n self._rt.send_message(msg.bytes())", "def sendmsg(msg, target=channel):\n msg = bytes('PRIVMSG ' + target + ' :' + msg + '\\n', 'UTF-8')\n sleep(randint(5, 10) / 10) # to avoid throttling due to flooding\n write(msg)\n ircsocket.send(msg)", "def send_message(self, msg):\n if msg is None:\n raise ValueError('message cannot be None!')\n\n if not isinstance(msg, message.Message):\n raise ValueError('message must be a type of Message')\n\n message_json = json.dumps(msg.__dict__)\n message_length = len(message_json)\n message_length_binary = struct.pack('>I', message_length)\n\n logging.info(\"Send: {0}\".format(message_json))\n\n self.sck.send(message_length_binary)\n self.sck.send(message_json)", "def sendMessage(sock, message):\n messageTemp = \"PRIVMSG \" + channel +\" :\" +message\n sock.send((messageTemp+ \"\\n\").encode())", "async def send_room_message(self, room_id, message):\n print(\"PblicChatConsumer\", \"send_room_message\")\n user = self.scope[\"user\"]\n\n if self.room_id is not None:\n if str(room_id) != str(self.room_id):\n raise ClientError(\"ROOM_ACCESS_DENIED\", \"Room access denied\")\n elif not user.is_authenticated:\n raise ClientError(\"AUTH_ERRO\", \"Not authenticated to join\")\n else:\n raise ClientError(\"ROOM_ACCESS_DENIED\", \"Room access denied\")\n\n room: PublicChatRoom = await get_room_or_error(room_id)\n await create_new_public_room_chat(room, user, message)\n await self.channel_layer.group_send(\n room.group_name,\n {\n \"type\": \"chat.message\", # chat_message\n \"profile_image\": (user.profile_image.url\n if user.profile_image else None),\n \"username\": user.username,\n \"user_id\": user.id,\n \"message\": message,\n }\n )", "def sendMessage(self, message):\n self.connection.sendMessage(self, message.encode('ascii', 'ignore'))", "def chat(sock, msg):\n full_msg = \"PRIVMSG {} :{}\\n\".format('#' + encryption_key.decrypted_chan, msg)\n msg_encoded = full_msg.encode(\"utf-8\")\n print(msg_encoded)\n sock.send(msg_encoded)", "def sendMessage(self, message):\n\t\tm = domish.Element((None, 'message'))\n\t\tm['from'] = self.jid\n\t\tm['to'] = self.room\n\t\tm['type'] = 'groupchat'\n\t\tm.addElement('body', content = message)\n\t\tself.xmlstream.send(m)", "def chat(sock, chan, msg, excuse=False):\n if chan in cfg.ACCEPTED or excuse:\n sock.send(\"PRIVMSG {} :{}\\r\\n\".format(chan, msg).encode(\"utf-8\"))\n console.info(\"TWITCH : {:<11} - {:<10}: {}\".format(chan[:11], \"RyuoBot\", msg))\n else:\n console.error(\"TWITCH : {:<11} - {:<10}: {} - RyuoBot is not allowed to type in {}!\".format(chan[:11], \"RyuoBot\", msg, chan))", "def send_message_to_chat(channel, response):\n\n web_client.chat_postMessage(\n channel=channel,\n text=response\n )", "def send_user_message(self, channel_id, message):\n self.slack_client.api_call('chat.postMessage', as_user='true', channel=channel_id, text=message)", "def send_msg(self, msg):\n self.msg_queue.put(dict(to=settings.IOTTLY_XMPP_SERVER_USER,msg='/json ' + json.dumps(msg)))", "def send_message(msg, settings):\n from_jid = xmpp.protocol.JID(settings['xmpp_jid'])\n passwd = settings['xmpp_password']\n\n client = xmpp.Client(from_jid.getDomain(), debug=[])\n if client.connect():\n if client.auth(from_jid.getNode(), passwd):\n client.send(msg)\n client.disconnect()", "def send_message(self, message, socket):\n socket.send(bytes(message, 'UTF-8'))", "def send_message():\n incoming = request.get_json()\n message = Message(\n user_id = session['user_id'],\n room_id = incoming[\"room_id\"],\n sendTime = datetime.now(),\n content = incoming[\"content\"]\n )\n db.session.add(message)\n db.session.commit()\n return jsonify(\n content = incoming[\"content\"]\n )", "def send_message(self, to, sender, message=\"\", subject=\"\"):\n msg = xmpp.Message()\n if to is not None and to != \"\":\n msg.setTo(to)\n msg.setFrom(sender)\n msg.setBody(message)\n msg.setSubject(subject)\n if self.client.connected:\n return self.client.send(msg)\n return -1", "def send_message(self, message):\n\n self.socket.send(message.serialize())", "def send_message(text, chatID=chatID, token=token, time=10):\n url = f'https://api.telegram.org/bot{token}/sendMessage'\n com = f'curl -s --max-time {time} '\n com += f'-d \"chat_id={chatID}&disable_web_page_preview=1&text={text}\" {url}'\n resp = os.popen(com).read().strip()\n return json.loads(resp)", "def send_message(self, message):\n if self.connected:\n self.send(\n json.dumps(message.request))", "def send_command(self, command):\n cmd, arg = command\n logging.debug(f'Sending \"/{cmd} {arg}\" to {self.channel_name}...')\n params = self.params\n params[\"command\"] = f\"/{cmd}\"\n params[\"text\"] = arg\n response = requests.post(self.url + \"chat.command\", params=params)\n if response.ok:\n logging.info(f'Successfully sent \"/{cmd} {arg}\" to {self.channel_name}.')\n logging.debug(response.json())\n else:\n logging.info(f'Failed to send \"/{cmd} {arg}\" to {self.channel_name}.')\n logging.debug(response.json())\n return response.status_code", "def send_message(self, message):\n self.print_debug_message(message)\n self.socket.send(message)", "def sendMessage(self, msg):\r\n binaries, msg = recursiveBinarySearch(msg)\r\n msg = json.dumps(msg)\r\n\r\n if isInIOThread():\r\n self._send(msg, binaries)\r\n else:\r\n self._connection.reactor.callFromThread(self._send, msg, binaries)", "def send(self, msg: str):\n message = msg.encode(HttpClient.FORMAT)\n self.client.send(message)\n print(\"[MESSAGE] message sent:\", msg)", "def new_chat_message(cls, chatroom, text, sender):\n cls.broadcast(\n group=chatroom,\n payload={\"chatroom\": chatroom, \"text\": text, \"sender\": sender},\n )", "def write(self, msg):\n self.sock.send(msg.encode())", "def send_message(self, text):\n self.__telegram_info.message.reply_text(text)", "def send_message(self, data):\n self.transport.write(data)", "def send_message(self, message):\n encoded_message = self.encode_message(message)\n self.socket.send(encoded_message)", "def send_message(self, message):\n\t\tself.logger.send(\"{0} - {1}\".format(self.peerip, str(message)))\n\t\ttry:\n\t\t\tself.socket.sendall(message.get_message(self.coin))\n\t\texcept socket.error as err:\n\t\t\tself.stop(err.errno,'send_message')", "def send_message(self,contato,mensagem):\r\n #Open new chat on whatsapp web\r\n new_msg_button = self.driver.find_element_by_xpath(self.NEW_CHAT)\r\n new_msg_button.click()\r\n sleep(1)\r\n #Search the contact\r\n search_field = self.driver.find_element_by_xpath(self.SEARCH_CONTACT)\r\n search_field.click()\r\n search_field.send_keys(contato)\r\n sleep(1)\r\n #Click on the firts contact with the name that I told \r\n first_contact = self.driver.find_element_by_xpath(self.FIRST_CONTACT)\r\n first_contact.click()\r\n sleep(1.5)\r\n type_field = self.driver.find_element_by_xpath(self.TYPE_MSG)\r\n type_field.click()\r\n type_field.send_keys(mensagem)\r\n send_msg= self.driver.find_element_by_xpath(self.SEND_BUTTON)\r\n send_msg.click()\r\n sleep(1)", "def send(self, msg: Message, **kwargs):\n\n pass", "def send(self, msg):\n if self.sock is not None:\n try:\n send_msg(self.sock, msg)\n except socket.error, msg:\n self.sock = None\n print 'Send failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]", "def send_message(self, to, subject, body):\n self.forum.send_message(self.game, Message(to=to, subject=subject, body=body))", "def player_send_msg(self, player_ip, *args):\r\n\t\ttry:\r\n\t\t\tto_ID = args[0] # IndexError\r\n\t\t\tmessage = args[1:len(args)] # IndexError\r\n\t\t\tteam_type = self._teammates[player_ip] # KeyError\r\n\t\texcept IndexError:\t# Invalid arguments\r\n\t\t\tself._comm_server.send_message(player_ip, \"send-to fail\")\r\n\t\texcept KeyError:\t# Invalid player\r\n\t\t\tself._comm_server.send_message(player_ip, \"send-to fail\")\r\n\t\telse:\r\n\t\t\tfrom_ID = self._teams[team_type].get_player_info_by_IP(player_ip).ID\r\n\t\t\tto_info = self._teams[team_type].get_player_info_by_ID(to_ID)\r\n\t\t\tif to_info is not None:\r\n\t\t\t\tmsg_str = \"\"\r\n\t\t\t\tfor msg_block in message:\r\n\t\t\t\t\tmsg_str += \" \" + msg_block\r\n\r\n\t\t\t\tself._comm_server.send_message(to_info.IP, \"send-from {0}{1}\" \\\r\n\t\t\t\t\t.format(from_ID, msg_str))\r\n\t\t\t\tself._comm_server.send_message(player_ip, \"send-to ok\")\r\n\t\t\telse:\r\n\t\t\t\tself._comm_server.send_message(player_ip, \"send-to fail\")", "def send_slack(self, message):\n self.slack_client.api_call('chat.postMessage', channel=self.slack_channel, text=message, username=self.username, icon_emoji=self.slack_icon_emoji)\n print(\"Slack Notification sent\")", "def __send_message(self, data):\n if RemotePlayerProxy.DEBUG:\n print(f'[RPP] [SEND] -> [{self.name}]: {data}')\n\n try:\n self.__socket.sendall(bytes(data, 'ascii'))\n except Exception as e:\n if RemotePlayerProxy.DEBUG:\n print(e)", "def send_message(self, data):\n return self.__json_call('chat.postEphemeral', data)", "def send_message(message: str, bot_client=bot):\n try:\n return bot_client.send_message(chat_id=CHAT_ID, text=message)\n except telegram.error.TelegramError as e:\n logger.exception(e)\n raise", "def send_message(self, message, user, msg_type=MSG_TYPE_MESSAGE):\n final_msg = {'room': str(self.id), 'message': message, 'user_id': str(user.id), 'nombre': user.nombre, 'apellidos': user.apellidos, 'msg_type': msg_type}\n mensaje = Mensaje(mensaje=message, emisor=user, room=self)\n mensaje.save()\n self.websocket_group.send({\n 'text': json.dumps({\n 'mensaje': final_msg,\n 'type': 'message',\n 'msg_type': msg_type\n })\n })", "def send(self, msg):\n self.ws.send(json.dumps(msg))", "def send_protocol_message(self, msg):\n self.conn.send(msg + \"\\0\")", "def _send_msg(self, msg: str, bot: Bot = None, parse_mode: ParseMode = ParseMode.MARKDOWN):\n bot = bot or self._updater.bot\n\n keyboard = [['/status', '/position', '/balance'],\n ['/version', '/help']]\n\n reply_markup = ReplyKeyboardMarkup(keyboard)\n\n try:\n try:\n bot.send_message(\n self.chat_id,\n text=msg,\n parse_mode=parse_mode,\n reply_markup=reply_markup\n )\n except NetworkError as network_err:\n # Sometimes the telegram server resets the current connection,\n # if this is the case we send the message again.\n logger.warning(\n 'Telegram NetworkError: %s! Trying one more time.',\n network_err.message\n )\n bot.send_message(\n self.chat_id,\n text=msg,\n parse_mode=parse_mode,\n reply_markup=reply_markup\n )\n except TelegramError as telegram_err:\n logger.warning(\n 'TelegramError: %s! Giving up on that message.',\n telegram_err.message\n )", "def send(self, message):\n self.logger.info(\"Sending to server: %s\" % message)\n self.sendLine(message)", "def _sendMessage(self, msgType, msgData):\r\n if not self._conn:\r\n raise ConnectionError('No connection registered.')\r\n\r\n self._conn.sendMessage({'type':msgType, 'data':msgData})", "def send_to_telegram(text):\n\n bot = telegram.Bot(token='')\n # chat_id = -1001371737931\n chat_id = ''\n bot.send_message(chat_id=chat_id, text=text)\n time.sleep(5)", "def send(self):\r\n if self.connection:\r\n self.connection.send(self.getLine())\r\n else:\r\n print \"(0) message without connection could not be sent\"", "async def chat_message(self, event):\n\n print(\"PublicChatConsumer\", \"chat_message from user\", event[\"user_id\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_MESSAGE,\n \"profile_image\": event[\"profile_image\"],\n \"username\": event[\"username\"],\n \"user_id\": event[\"user_id\"],\n \"message\": event[\"message\"],\n \"natural_timestamp\": humanize_or_normal(timezone.now())\n })", "async def broadcast(self, msg):\n if not self._session:\n await self._create_session()\n \n if isinstance(msg, str):\n msg = Message(msg)\n assert isinstance(msg, Message)\n msg.set_recipient(-1)\n msg.set_sender(self._network._robot.id)\n await self._session.put(self._network.SERVER_ADDR + '/api/send', json=msg.to_dict())\n return msg", "def _send(self, message):\n self.sock.sendall('%s\\n' % message)", "def send_message(self, message:str):\n self.chat.click()\n text_box = self.chat.find_element_by_xpath(\"//div[@class='_2_1wd copyable-text selectable-text' and @data-tab='6']\")\n text_box.click()\n text_box.send_keys(message)\n time.sleep(0.1)\n send_button = self.chat.find_element_by_xpath(\"//button[@class='_1E0Oz']\")\n send_button.click()" ]
[ "0.79602695", "0.7867005", "0.7461435", "0.74155366", "0.73181975", "0.72749156", "0.72387594", "0.72059983", "0.71983445", "0.71811515", "0.7162376", "0.71472645", "0.7142168", "0.7140404", "0.70665747", "0.7054692", "0.704193", "0.70210105", "0.69757414", "0.69357747", "0.69357747", "0.69357747", "0.69257945", "0.6883542", "0.686552", "0.68562233", "0.6856006", "0.68529034", "0.6845461", "0.6839592", "0.6836895", "0.68104917", "0.6805418", "0.6803435", "0.68002355", "0.6793569", "0.6766834", "0.67641383", "0.67369497", "0.67356366", "0.6733321", "0.6714628", "0.6713918", "0.67063314", "0.6679915", "0.6671596", "0.66677934", "0.6665787", "0.6652186", "0.6631049", "0.6621633", "0.66139644", "0.6609234", "0.66066456", "0.6597372", "0.65917677", "0.6577749", "0.65720534", "0.65694493", "0.6565872", "0.6558836", "0.65587", "0.65562534", "0.65511924", "0.6546647", "0.65390587", "0.6532539", "0.65262264", "0.6517976", "0.6512391", "0.6500789", "0.6500678", "0.6498331", "0.64979196", "0.64974976", "0.6492986", "0.6491123", "0.6484812", "0.6483529", "0.6476978", "0.64750665", "0.64747393", "0.6470342", "0.6470298", "0.6468191", "0.64672", "0.6459623", "0.6457896", "0.645709", "0.64544404", "0.64526296", "0.64492404", "0.64488465", "0.6447004", "0.64423746", "0.64346576", "0.64335895", "0.64181536", "0.64113337", "0.64080405" ]
0.6836778
31
Unit test for Roybal_Student_Analytics constructor.
def test_init(self): s = Student_Analytics() self.assertEqual(len(s.data),89)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, student):\n pass", "def __init__(self, student, start_date, day_periods):\n self.student = student\n self.start_date = start_date\n self.day_periods = day_periods\n self.student_name = student.full_name_lastname_first(\n show_middle_name=False)\n self.student_gender= student.gender\n self.student_attendance_record = self.student.attendance", "def setUpClass(cls):\n super(TestPatientStatsHistory, cls).setUpClass()\n cls.stats_data = {\n \"num_patients_visited\": 1,\n \"num_patients_home_quarantine\": 2,\n \"num_patients_isolation\": 3,\n \"num_patient_referred\": 4,\n }", "def runTest(self):\n self.setUp()\n self.test_Analytics1()", "def test_constructor(self):\n # Args\n name = 'Mathew'\n age = 13\n grade = 14\n\n # Object construction\n obj = models.Student(name=name, age=age, grade=grade)\n # Constructor class works\n self.assertIsNotNone(obj)\n self.assertEqual(obj.name, name)\n self.assertEqual(obj.grade, grade)", "def __init__(self, *args, **kwargs):\r\n super(UniqueCourseTest, self).__init__(*args, **kwargs)", "def test_constructor(self):\n pass", "def setUp(self):\n program = program_utils.seedProgram()\n self.profile = profile_utils.seedSOCStudent(program)", "def setUp(self):\r\n super(CorrelationStatsTests, self).setUp()\r\n self.cs = CorrelationStats([self.overview_dm, self.overview_dm])", "def test_grade_change(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(int(s.grade_change()),0)", "def test_constructor(self):\r\n self.assertTrue(isinstance(self.res1, RichnessEstimatesResults))\r\n self.assertEqual(self.res1.getSampleCount(), 0)", "def __init__(self):\n\n # sample must be between 0 and 1\n if self.sample <= 0 or self.sample > 1:\n raise Exception('sample {} should be > 0 and <= 1'.format(self.sample))\n\n # sample RDD if sample is specified AND rdd has not been pre-sampled\n if self.sample < 1 and not self.pre_sampled:\n self.rdd = self.rdd.sample(False, self.sample, self.seed)\n\n # Assign each RDD with counter. Reduce and collect.\n collectedCounts = self.rdd.reduceByKey(lambda x,y: x+y) \\\n .collect() # id, number of times that id appears)\n\n # function that re-calculates coverage based on sampling\n approximateCounts = lambda counts, sample: int(counts * 1.0/sample)\n\n # approximate counts based on sampling\n self.collectedCounts = list(map(lambda x: approximateCounts(x[1], self.sample), collectedCounts))", "def __init__(self):\n super().__init__()\n self.metric = 'ACURCY'", "def __init__(self):\n\n # sample must be between 0 and 1\n if self.sample <= 0 or self.sample > 1:\n raise Exception('sample {} should be > 0 and <= 1'.format(self.sample))\n\n # sample RDD if sample is specified AND rdd has not been pre-sampled\n if self.sample < 1 and not self.pre_sampled:\n self.rdd = self.rdd.sample(False, self.sample, self.seed)\n\n # Assign each RDD with counter. Reduce and collect.\n collectedCounts = self.rdd.reduceByKey(lambda x,y: x+y) \\\n .collect() # (id, count), number of times that count appears)\n\n # function that re-calculates coverage based on sampling\n approximateCounts = lambda counts, sample: int(counts * 1.0/sample)\n\n # restructure each record so record structure is (key: sampleId, value: (coverage, count))\n x = list(map(lambda x: (x[0][0], (x[0][1], approximateCounts(x[1], self.sample))), collectedCounts))\n\n # create dictionary where keys are the sampleId\n self.collectedCounts = collections.defaultdict(set)\n for k, v in x:\n self.collectedCounts[k].add(v)", "def setUp(self):\n\t\tself.user = create_user()\n\n\t\tself.school_name = 'My Recent School'\n\t\tself.course_name = 'My Course Name'\n\t\tself.start_date = timezone.now()\n\t\tself.end_date = timezone.now() + timedelta(days=365)\n\t\tself.grade_obtained = 'My Grade'", "def __init__(self):\n super().__init__()\n self.metric = 'JACRD'", "def setUp(self):\n super(TestRiskSnapshotting, self).setUp()\n self.api = ExternalApiClient(use_ggrcq_service_account=True)\n self.objgen = ObjectGenerator()", "def __init__(self, name, age, student_id, courses):\n self.name = name\n self.age = age\n self.student_id = student_id\n self.courses = courses\n\n # When adding a student, increment the\n # class variable student_count\n Student.student_count += 1", "def setUp(self):\n self.student = Student(first_name=\"Eva\", last_name=\"Maier\", id=123456)\n self.assessor = Assessor(first_name=\"Peter\", last_name=\"Müller\")\n self.supervisor = Supervisor(first_name=\"Thomas\",\n last_name=\"Smits\", id=\"t.smits\")\n\n self.assessor.save()\n self.supervisor.save()\n self.student.save()", "def setUp(self):\n super().setUp()\n self.report = {\n \"report_uuid\": \"report_uuid\",\n \"title\": \"Report\",\n \"subjects\": {\"subject_uuid\": {\"name\": \"Subject\", \"type\": \"software\", \"metrics\": {}}},\n }", "def setUp(self):\n super().setUp()\n self.subject = {\n \"type\": \"software\",\n \"name\": \"Subject\",\n \"metrics\": {\"metric_uuid\": {\"type\": \"violations\", \"name\": \"Metric\", \"sources\": {}}},\n }", "def __init__(self, first_name, last_name, grad_year):\n super().__init__(first_name, last_name)\n self.grad_year = grad_year # The attribute \"grad_year\" is specific to the \"Student\" class.", "def __init__(self,student_id,lname,fname, major='Computer Science',gpa='0.0'):\n super().__init__(lname,fname) # Call init on parent class\n self._student_id = student_id\n self._major = major\n self._gpa = gpa", "def test_avg_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(s.avg_grade(3)),\"B\")", "def setUp(self):\r\n super(CategoryStatsTests, self).setUp()\r\n self.cs_overview = CategoryStats(self.overview_map, [self.overview_dm],\r\n [\"Treatment\", \"DOB\"])", "def __init__(self):\n OWSReport.__init__(self)\n self.stats['type'] = 'OGC:WCS'\n self.stats['operations']['GetCoverage'] = {}\n self.stats['operations']['GetCoverage']['hits'] = 0\n self.stats['operations']['GetCoverage']['resource'] = {}\n self.stats['operations']['GetCoverage']['resource']['param'] = 'coverage'\n self.stats['operations']['GetCoverage']['resource']['list'] = {}\n self.stats['operations']['DescribeCoverage'] = {}\n self.stats['operations']['DescribeCoverage']['hits'] = 0", "def __init__(self):\n\n #call super class's __init__ method\n super(TRiseSampler, self).__init__(name=\"trise\", observed=False)", "def __init__(self, *args, **kwargs):\r\n super(StubOraService, self).__init__(*args, **kwargs)\r\n\r\n # Create a dict to map student ID's to their state\r\n self._students = dict()\r\n\r\n # By default, no problems are available for peer grading\r\n # You can add to this list using the `register_location` HTTP end-point\r\n # This is a dict mapping problem locations to problem names\r\n self.problems = dict()", "def __init__(self, dataset_name, teacher_model, students_model):\n self.data_manager = DataManager(dataset_name)\n self.dataset_name = dataset_name\n self.teacher_model = teacher_model\n self.student_model = students_model", "def __init__(self, samples, analysis):\r\n self.samples = samples\r\n self.analysis = analysis", "def __init__(self):\n super().__init__()\n self.metric = 'AUC'", "def __init__(self):\n super(ForceBalanceTestResult,self).__init__()\n self.logger = forcebalance.output.getLogger('forcebalance.test.results')", "def __init__(self):\n OWSReport.__init__(self)\n self.stats['type'] = 'OGC:SOS'\n self.stats['operations']['GetObservation'] = {}\n self.stats['operations']['GetObservation']['hits'] = 0\n self.stats['operations']['GetObservation']['resource'] = {}\n self.stats['operations']['GetObservation']['resource']['param'] = 'observedproperty'\n self.stats['operations']['GetObservation']['resource']['list'] = {}\n self.stats['operations']['DescribeSensor'] = {}\n self.stats['operations']['DescribeSensor']['hits'] = 0", "def __init__(self, test_conditions_4_current_env):\n # Init Gaia Class\n self.test_conditions_4_current_env = test_conditions_4_current_env\n self.gaia = GaiaClass(gaia_tap_server=conf.HOST_URL, gaia_data_server=conf.HOST_URL)", "def test_import(self):\n self.assertTrue(NagiosPerfdataCollector)", "def __init__(self, case, **kwargs):\n SystemTestsCommon.__init__(self, case, **kwargs)", "def setUp(self):\r\n super(TestRawGradeCSV, self).setUp()\r\n\r\n self.instructor = 'view2@test.com'\r\n self.create_account('u2', self.instructor, self.password)\r\n self.activate_user(self.instructor)\r\n CourseStaffRole(self.course.id).add_users(User.objects.get(email=self.instructor))\r\n self.logout()\r\n self.login(self.instructor, self.password)\r\n self.enroll(self.course)\r\n\r\n # set up a simple course with four problems\r\n self.homework = self.add_graded_section_to_course('homework', late=False, reset=False, showanswer=False)\r\n self.add_dropdown_to_section(self.homework.location, 'p1', 1)\r\n self.add_dropdown_to_section(self.homework.location, 'p2', 1)\r\n self.add_dropdown_to_section(self.homework.location, 'p3', 1)\r\n self.refresh_course()", "def initialize_analyticsreporting():\n\n # Build the credentials object\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n KEY_FILE_LOCATION, SCOPES,\n )\n # Build the service object.\n analytics = build('analyticsreporting', 'v4', credentials=credentials)\n return analytics", "def __init__(self):\n super().__init__()\n self.metric = 'MAHLNBS'", "def __init__(self, sample_data):\r\n n = self._calculate_total_individual_count(sample_data)\r\n if n < 1:\r\n raise EmptySampleError(\"Encountered a sample without any recorded \"\r\n \"observations.\")\r\n else:\r\n self._n = n\r\n\r\n self._s_obs = self._calculate_observation_count(sample_data)\r\n self._fk = self._calculate_abundance_frequency_counts(sample_data, n)", "def __init__(self):\n super().__init__()\n self.metric = 'PRCISON'", "def __init__(self, student, major_map):\n self.student = student\n self.major_map = major_map\n\n # matches \n self.general_studies = self.__general_studies_self()\n self.subject = self.__subject_self()\n self.exact = self.__exact_self()\n self.low_elective = self.__low_elective_self()\n self.up_elective = self.__up_elective_self()\n\n # expanded matches\n self.graph = self.__build_graph()\n\n # linear optimization lookup tables\n self.classes_from , self.requirements_to = self.build_opt()\n\n # \n self.expanded = self.class_combinations()\n self.all_combos = self.get_all_combinations()\n\n self.matches = self.___matches()", "def __init__(self):\n super().__init__()\n self.metric = 'PROBDST'", "def test_init(self):\n ar = awstats_reader.AwstatsReader('/tmp', 'example.com')\n self.assertTrue(isinstance(ar, awstats_reader.AwstatsReader))", "def test_default_constructor_args(self):\n data = np.random.random(10)\n\n s = SegmentTestData(data)\n\n self._assert_fields_equal_to_data(s, data)\n\n np.testing.assert_array_equal(s.data, data)", "def __init__(self, icurr, Fs, **kwargs):\n\t\tself.eventData=icurr\n\t\tself.Fs=Fs\n\n\t\t# Will throw a key error if not passed\n\t\tself.eStartEstimate=kwargs['eventstart']\n\t\tself.eEndEstimate=kwargs['eventend']\n\n\t\tself.settingsDict=kwargs['algosettingsdict']\n\n\t\tself.absDataStartIndex=kwargs['absdatidx']\n\n\t\t[ self.baseMean, self.baseSD, self.baseSlope ]=kwargs['baselinestats']\n\n\t\tself.saveTS=kwargs['savets']\n\t\t\n\t\t# Optional args. If dataFileHnd is not passed at init, it must be set later\n\t\t# If not set before WriteEvent is called, it will result in a MissingMDIOError\n\t\tself.dataFileHnd=kwargs.pop(\"datafilehnd\", None)\n\t\t# self.dataFileHnd=kwargs['datafileHnd']\n\n\t\t# meta-data attrs that are common to all event processing\n\t\tself.mdProcessingStatus='normal'\n\n\t\t# print self.settingsDict\n\t\t# Call sub-class initialization\n\t\tself._init(**kwargs)", "def test_constructor(self):\n uploader=DummyS3Uploader()\n soap_client = DummyContentCafeSOAPClient()\n api = ContentCafeAPI(self._db, None, \"user_id\", \"password\", \n uploader, soap_client=soap_client)\n provider = ContentCafeCoverageProvider(\n self._db, api=api, uploader=uploader\n )", "def __init__(self, analytics, anal_name: str):\n self.analytics = analytics\n self.anal_name = anal_name\n self.lock = asyncio.Lock()\n self.counter = 0", "def setUp(self):\n\n self.logger_stats = DataScreen()", "def __init__(self, name, skill):\n \n super(Student, self).__init__(name)\n self.grades = []\n self.skill = skill", "def setUpClass(cls):\n # Make the output directory if needed\n if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'output')):\n os.mkdir(os.path.join(os.path.dirname(__file__), 'output'))\n # Create a new directory if needed\n if not os.path.isdir(outdir):\n os.mkdir(outdir)\n # If not, then clear any files already in the output directory so that they don't influence tests\n else:\n for file in os.listdir(outdir):\n os.remove(os.path.join(outdir, file))\n\n # Create sampler\n setup_StandardGCMCSystemSampler()\n\n return None", "def __init__(self, test_resources):\n self.info = None\n self.is_mock = False\n self.cluster = None\n self.bucket = None\n self.bucket_name = None\n self.cluster_version = None\n self.set_test_resources(test_resources)", "def setUp(self):\n self.t = Anomaly()", "def __init__(self, driver, log):\r\n if not driver:\r\n raise Exception('driver not provided')\r\n self.driver = driver\r\n self.log = log", "def test_stats_class_initialisation(self):\n self.assertIsInstance(self.stats,cardutils.Stats)", "def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.linear_model\n self.model = sklearn.linear_model.LogisticRegression", "def setUpClass(cls):\n cls.student_f = inspect.getmembers(Student, inspect.isfunction)", "def __init__(self, auto_redirect=True):\n self.redirect = auto_redirect\n self.analytics_interface = AnalyticsInterface(settings.KONTAGENT_API_SERVER,\n settings.KONTAGENT_API_KEY)", "def __init__(self, account_name=None, property_name=None, profile_name=None, ga_settings=None, logging_obj=None):\n if logging_obj is None:\n log_filename = get_log_filepath('Python App')\n logging_obj = Logging(name=__name__, log_filename=log_filename, log_level_str='INFO')\n self.logging_obj = logging_obj\n config_app_util = ConfigAppUtility()\n if ga_settings is None:\n ga_settings = config_app_util.get_settings_dict('GA')\n self.service_old = init('analytics', 'v3', ga_settings)\n self.service = init('analytics', 'v4', ga_settings)\n self.profile_id = None\n\n if account_name is not None and property_name is not None and profile_name is not None:\n (profile, property, account) = self.get_profile_by_name(account_name, property_name, profile_name)\n profile_id = self.get_profile_id(profile)\n self.set_profile_id(profile_id)\n else:\n log_msg = \"message='The profile ID has not been set. This needs to be set prior to executing any queries.'\"\n self.logging_obj.log(self.logging_obj.WARN, log_msg)", "def __init__(self):\n super(VirusTotalAnalysisPlugin, self).__init__(VirusTotalAnalyzer)\n self._api_key = None", "def setUpClass(cls):\n # Make the output directory if needed\n if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'output')):\n os.mkdir(os.path.join(os.path.dirname(__file__), 'output'))\n # Create a new directory if needed\n if not os.path.isdir(outdir):\n os.mkdir(outdir)\n # If not, then clear any files already in the output directory so that they don't influence tests\n else:\n for file in os.listdir(outdir):\n os.remove(os.path.join(outdir, file))\n\n # Create sampler\n setup_NonequilibriumGCMCSystemSampler()\n\n return None", "def setUp(self):\n # construct instance of Roll\n self.roll = Roll('John Smith')", "def test_classify_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(5.00),\"A+\")", "def __init__(self, data_source, num_resamples=NUM_RESAMPLES, method=GSEA_RANKING_SNR, case=None, control=None,\n preprocessors=None):\n logger.debug('GSEA initialised with num_resamples=%d and ranking_method=%s' % (num_resamples, method))\n super().__init__(data_source, preprocessors=preprocessors)\n self.num_resamples = num_resamples\n self.method = method\n self.case = case\n self.control = control", "def __init__(self):\n self._profiling_mode = False\n self._total_time_ms = 0.0\n self._traced_records = []\n self._statistical_results = {}", "def setUp(self):\n\n self.client = None\n if conf.options.get_value('runlive') == 'true':\n self.client = gdata.analytics.client.AnalyticsClient()\n self.client.http_client.debug = True\n\n conf.configure_client(\n self.client,\n 'AnalyticsClientTest',\n self.client.auth_service)", "def test_setting_constructor_args(self):\n\n maximum = 1.\n mean = 2.\n minimum = 3.\n shape = (4, 2)\n size = 8\n std = 5.\n\n data = 'nonexisting_test_file.dat'\n\n s = SegmentTestData(data, maximum=maximum, mean=mean, minimum=minimum, shape=shape, size=size, std=std)\n\n self.assertEqual(s.max, maximum)\n self.assertEqual(s.mean, mean)\n self.assertEqual(s.min, minimum)\n self.assertEqual(s.shape, shape)\n self.assertEqual(s.size, size)\n self.assertEqual(s.std, std)\n\n self.assertEqual(s.data, data)", "def __init__(self):\n self.students = []\n self.grades = {}\n self.is_sorted = True", "def runAnalytics():\n #gets OAuth from the API\n analytics = get_Analytics_service()\n #get the object return from the API\n #send that object to print out useful fields\n response = get_report(analytics)\n print_response(response)", "def __init__(self, checkpoint_id, student_id, date, title, card):\n self.id = checkpoint_id\n self.student = models.users.User.get_user_by_id(student_id)\n self.date = date\n self.title = title\n self.card = card", "def __init__(self):\n self.students = {}", "def setUp(self):\n self.example = Example()", "def setUp(self):\n super().setUp()\n self.metric = {\n \"name\": \"Metric\",\n \"type\": \"security_warnings\",\n \"sources\": {\"source_uuid\": {\"type\": \"owasp_zap\", \"name\": \"Source\"}},\n }", "def setUpClass(cls):\n super(test_usage_retention, cls).setUpClass()\n cls.mgmt_client = cls.dbaas_provider.mgmt_client.reddwarfclient\n cls.mgmt_client.authenticate()", "def setUp(self) -> None:\n self.database = Mock()\n self.database.reports.distinct.return_value = []\n self.database.datamodels.find_one.return_value = self.DATA_MODEL\n self.report_json = json.dumps(\n {\n \"report_uuid\": \"id\",\n \"subjects\": [\n {\n \"name\": \"name\",\n \"type\": \"software\",\n \"metrics\": [\n {\n \"type\": \"security_warnings\",\n \"sources\": [{\"type\": \"sonarqube\", \"parameters\": {\"url\": {}}}],\n },\n ],\n },\n ],\n },\n )\n self.database.sessions.find_one.return_value = {\"user\": \"jadoe\"}", "def setUpClass(cls):\n\n # Build articles database\n Execute.run(Utils.FILE + \"/data\", Utils.FILE + \"/models\", Utils.STUDY)", "def __init__(self, first_name, last_name, address):\n\n self.first_name = first_name\n self.last_name = last_name\n self.address = address\n\n # Creates dictionary for each student with the label & info.\n\n self.info = {\n 'first name': self.first_name,\n 'last name': self.last_name,\n 'address': self.address,\n }", "def setUpClass(cls):\n # Make the output directory if needed\n if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'output')):\n os.mkdir(os.path.join(os.path.dirname(__file__), 'output'))\n # Create a new directory if needed\n if not os.path.isdir(outdir):\n os.mkdir(outdir)\n # If not, then clear any files already in the output directory so that they don't influence tests\n else:\n for file in os.listdir(outdir):\n os.remove(os.path.join(outdir, file))\n\n # Create sampler\n setup_StandardGCMCSphereSampler()\n\n return None", "def test_get_students_for_contact(self):\n pass", "def __init__(self):\n self.label = \"Agterberg-Cheng CI Test\"\n self.description = \"\"\n self.canRunInBackground = False\n self.category = \"Weights of Evidence\"", "def setUpClass(cls):\n cls._test_dc = DataCleaner(data = create_test_df())", "def setUpClass(cls):\n # Make the output directory if needed\n if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'output')):\n os.mkdir(os.path.join(os.path.dirname(__file__), 'output'))\n # Create a new directory if needed\n if not os.path.isdir(outdir):\n os.mkdir(outdir)\n # If not, then clear any files already in the output directory so that they don't influence tests\n else:\n for file in os.listdir(outdir):\n os.remove(os.path.join(outdir, file))\n\n # Need to create the sampler\n setup_BaseGrandCanonicalMonteCarloSampler()\n\n return None", "def setUpClass(cls):\n # Make the output directory if needed\n if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'output')):\n os.mkdir(os.path.join(os.path.dirname(__file__), 'output'))\n # Create a new directory if needed\n if not os.path.isdir(outdir):\n os.mkdir(outdir)\n # If not, then clear any files already in the output directory so that they don't influence tests\n else:\n for file in os.listdir(outdir):\n os.remove(os.path.join(outdir, file))\n\n # Need to create the sampler\n setup_GCMCSystemSampler()\n\n return None", "def initialize_analyticsreporting():\n logging.info(\"Initializing Analytics API...\")\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n KEY_FILE_LOCATION, SCOPES)\n\n # Build the service object.\n analytics = build('analyticsreporting', 'v4', credentials=credentials)\n\n return analytics", "def setUpClass(cls):\n super().setUpClass()\n\n cls.accessor = OCPReportDBAccessor(cls.schema)\n cls.report_schema = cls.accessor.report_schema\n cls.all_tables = list(OCP_REPORT_TABLE_MAP.values())\n cls.creator = ReportObjectCreator(cls.schema)\n cls.date_accessor = DateHelper()\n cls.manifest_accessor = ReportManifestDBAccessor()\n cls.dh = DateHelper()", "def __init__(self):\n super().__init__()\n self.metric = 'SNSVTY'", "def __init__(self):\n\n self.start_datetime_wallclock = None;\n self.end_datetime_wallclock = None;\n self.action_count = 0;\n self.reward_cumulative = 0;\n self.mission_xml = None;\n self.mission_type = None;\n self.mission_seed = None;\n self.student_guid = None;\n self.mission_xml_as_expected = None;\n self.is_goal = None;\n self.is_timeout = None;", "def setUpClass(cls):\n\n # initialize Pulsar class\n cls.psr = Pulsar(datadir + \"/B1855+09_NANOGrav_9yv1.gls.par\", datadir + \"/B1855+09_NANOGrav_9yv1.tim\")", "def __init__(self, session, *args, **kwargs):\n self.sasproduct = 'stat'\n # create logging\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(logging.WARN)\n self.sas = session\n self.logger.debug(\"Initialization of SAS Macro: \" + self.sas.saslog())", "def initialize_analyticsreporting():\n # Parse command-line arguments.\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args([])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRETS_PATH, scope=SCOPES,\n message=tools.message_if_missing(CLIENT_SECRETS_PATH))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage('analyticsreporting.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=httplib2.Http())\n\n # Build the service object.\n analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)\n\n return analytics", "def __init__(self, eventSource, dataSource, attributes = {}):\r\n \r\n self.attributes = attributes\r\n self._subscription = event.subscribe(eventSource, self.fire, self)\r\n self._dataSource = dataSource\r\n self._alias = ['_details', 'indicator base']", "def __init__(self, student_key, event_date, cause_name, direction):\n cause_names = {\"student_status\":0, \"transfer\":1, \"reassign\":2}\n self.student_key = student_key\n #There should always be a date here but it seems to sometimes\n #not happen. As A temporary fix we use the current date because\n #this is likely to be close to the correct date. Log the error\n #if a date is not there.\n if (not event_date):\n event_date = date.today()\n logging.warning(\n \"Needed to add an arbitrary date for the change '%s' on student %s\" \\\n %(cause_name, unicode(db.get(student_key)))) \n self.date_ordinal = event_date.toordinal()\n self.cause = cause_names[cause_name]\n self.direction = 1\n if (direction == \"Out\"):\n self.direction = 0", "def __init__(__self__, *,\n perf_metric_type: str,\n perf_unit: str,\n sample_series_label: str):\n pulumi.set(__self__, \"perf_metric_type\", perf_metric_type)\n pulumi.set(__self__, \"perf_unit\", perf_unit)\n pulumi.set(__self__, \"sample_series_label\", sample_series_label)", "def __init__(self, api, site_id):\n super().__init__(api, site_id)\n\n self.unit = None", "def __init__(self, api, site_id):\n super().__init__(api, site_id)\n\n self.unit = None", "def initialize_analyticsreporting():\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n KEY_FILE_LOCATION, SCOPES)\n\n # Build the service object.\n analytics = build('analyticsreporting', 'v4', credentials=credentials)\n\n return analytics", "def initialize_analyticsreporting():\n # Parse command-line arguments.\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args([])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRETS_PATH, scope=SCOPES,\n message=tools.message_if_missing(CLIENT_SECRETS_PATH))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage('auth/analyticsreporting.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=httplib2.Http())\n\n # Build the service object.\n analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)\n\n return analytics", "def __init__(self):\n super().__init__()\n self.metric = 'ICCORR'", "def test_create(self):\n from supvisors.statistics import StatisticsInstance\n instance = StatisticsInstance(17, 10)\n # check attributes\n self.assertEqual(3, instance.period)\n self.assertEqual(10, instance.depth)\n self.assertEqual(-1, instance.counter)\n self.assertIsNone(instance.ref_stats)\n self.assertIs(list, type(instance.cpu))\n self.assertFalse(instance.cpu)\n self.assertIs(list, type(instance.mem))\n self.assertFalse(instance.mem)\n self.assertIs(dict, type(instance.io))\n self.assertFalse(instance.io)\n self.assertIs(dict, type(instance.proc))\n self.assertFalse(instance.proc)", "def __init__(self, classific_method=\"LogisticRegression\"):\n\t\tself.classific_method = classific_method" ]
[ "0.6371787", "0.6314157", "0.6181478", "0.6096545", "0.6089736", "0.60254514", "0.5956355", "0.59550136", "0.594131", "0.58837605", "0.58226454", "0.5809876", "0.57946306", "0.5790085", "0.578611", "0.57836026", "0.57833344", "0.57766145", "0.5764777", "0.5755531", "0.574614", "0.573403", "0.57335436", "0.57245237", "0.5723998", "0.5710367", "0.5704233", "0.57041055", "0.57020575", "0.5689032", "0.5683701", "0.5671407", "0.56691736", "0.56330174", "0.5611047", "0.56076145", "0.55919176", "0.5579615", "0.5578395", "0.5578151", "0.5570492", "0.5568785", "0.556118", "0.5553244", "0.5551908", "0.55481327", "0.55372393", "0.55304646", "0.5527928", "0.55247414", "0.5522308", "0.5518991", "0.5518649", "0.5511416", "0.5511273", "0.5510358", "0.5494674", "0.5494226", "0.5493091", "0.54885304", "0.54881126", "0.54865694", "0.5483088", "0.5474246", "0.54723465", "0.54568297", "0.5456286", "0.5455728", "0.5453063", "0.5446228", "0.54458195", "0.544523", "0.544417", "0.54324496", "0.54261816", "0.5419903", "0.5412035", "0.5411613", "0.54115057", "0.539854", "0.5398217", "0.53963226", "0.5394133", "0.5392316", "0.5389262", "0.5378891", "0.53784853", "0.5377951", "0.53719616", "0.5370492", "0.5366428", "0.53606117", "0.5358519", "0.53566396", "0.53566396", "0.5351071", "0.53508234", "0.5339773", "0.53396016", "0.53394544" ]
0.75536776
0
Unit test for Roybal_Student_Analytics classify_grade method.
def test_classify_grade(self): s = Student_Analytics() self.assertEqual(s.classify_grade(5.00),"A+")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_avg_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(s.avg_grade(3)),\"B\")", "def classification_score(self, x, y):\t\n\t\tpass", "def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_classify, classifiers)\n expected = np.mat([-1.])\n self.assertEqual(classifications, expected)", "def test_grade_change(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(int(s.grade_change()),0)", "def get_prediction(course_grades, train_data, train_grades):\n\n # In the case the student has no grade, return a predicted grade of 0\n if train_data.size == 0:\n return 0,0\n\n model = BayesianRidge()\n model.fit(train_data, train_grades)\n y_mean, y_sd = model.predict(np.array(course_grades).reshape(1, -1), return_std=True)\n\n return y_mean, y_sd", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "def classify(data, labels, (train_idx, test_idx), classifier=None):\r\n\r\n assert classifier is not None, \"Why would you pass not classifier?\"\r\n\r\n # Data scaling based on training set\r\n scaler = SupervisedStdScaler() #SupervisedRobustScaler() # # \r\n scaler.fit(data[train_idx,:], labels[train_idx], label=-1)\r\n #scaler.fit(data[train_idx,:], labels[train_idx])\r\n data_train = scaler.transform(data[train_idx,:])\r\n data_test = scaler.transform(data[test_idx,:])\r\n try:\r\n classifier.fit(data_train, labels[train_idx])\r\n \r\n \r\n confMat = confusion_matrix(labels[test_idx],\r\n classifier.predict(data_test))\r\n if confMat.shape == (1,1):\r\n if all(labels[test_idx] == -1):\r\n confMat = np.array([[confMat[0], 0], [0, 0]], dtype=confMat.dtype)\r\n else:\r\n confMat = np.array([[0, 0], [0, confMat[0]]], dtype=confMat.dtype)\r\n confMatRate = confMat / np.tile(np.sum(confMat, axis=1).astype('float'), (2,1)).transpose()\r\n totalErr = (confMat[0, 1] + confMat[1, 0]) / float(confMat.sum())\r\n #if type(classifier) not in [type(None), DummyClassifier]:\r\n if hasattr(classifier,'param_grid'): \r\n #isinstance(classifier, GridSearchCV) or \\\r\n # isinstance(classifier, RandomizedSearchCV):\r\n fitted_model = classifier.best_estimator_\r\n else:\r\n fitted_model = copy.copy(classifier) \r\n return confMatRate, totalErr, fitted_model\r\n except np.linalg.linalg.LinAlgError as e:\r\n # sahil added statement to raise the error instead of returning nun values\r\n print e.message\r\n raise e\r\n # return np.array([[np.nan, np.nan], [np.nan, np.nan]]), np.nan, None\r", "def classify(self, data):\n abstract", "def test_a_grade(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.submit_question_answer('p3', {'2_1': 'Correct'})\r\n self.check_grade_percent(1.0)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'A')", "def classify(self, example):\n raise NotImplementedError()", "def classify_test(classifier, test_data):\n for d in test_data:\n test(d[\"name\"], d[\"attribute\"], classifier)", "def classification_evaluation(self, test_set, predicted_values, certainty):\r\n\r\n percent_accuracy = self.percent_accuracy(test_set, predicted_values)\r\n one_zero = self.one_zero_loss(test_set, predicted_values)\r\n log_loss = self.log_loss(test_set, predicted_values, certainty)\r\n print(f\"Percent correct:\\t{percent_accuracy * 100:.2f}%\")\r\n print(f\"1/0 Loss:\\t\\t\\t{one_zero:.2f}\")\r\n print(\"Log Loss: \", log_loss)", "def classify(trainX, trainY, testX, testY):\n trainC = getClasses(trainY)\n P = estimatePosterior(trainX, trainC, testX)\n E = fit(testX, P)\n (e_rate, se, interval) = error.confidenceInterval(testY, E)\n return (P, E, e_rate, se, interval)", "def classify(trait_arg, alpha):\r\n x = df['essay'][1:]\r\n x = x.str.lower()\r\n y = df[trait_arg][1:]\r\n\r\n print(\"Predicting \", trait_arg, \" with alpha = \", alpha)\r\n print(\"Test set, Train Set ratio: 1:3\")\r\n\r\n # Test train split in 25 : 75 ratio\r\n x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=11)\r\n\r\n # TF-IDF vectorizer\r\n vectorizer = TfidfVectorizer()\r\n xx_train = vectorizer.fit_transform(x_train)\r\n xx_test = vectorizer.transform(x_test)\r\n\r\n # Multinomial Naive Bayes Classifier\r\n classifier = MultinomialNB(alpha=alpha)\r\n classifier.fit(xx_train, y_train)\r\n\r\n predictions = classifier.predict(xx_test)\r\n print(\"Confusion Matrix:\")\r\n print(classification_report(y_test, predictions))\r\n score = accuracy_score(y_test, predictions)\r\n print(\"Accuracy:\", score)", "def test(name, data, classifier):\n classification = classifier.classify(data)\n print('Item ' + name + ' is a ' + classification)", "def binary_classification(self,test_samples,test_labels):\n num_correct = 0\n num_samples = 0\n for sample,perm in zip(test_samples,test_labels):\n if len(sample.vertices()) == 3:\n continue\n minus_edge_weights = sample.get_edge_weights(self._weights)\n S_correct = 0.0\n S_all = 0.0\n for e,w in minus_edge_weights:\n if e == (START_NODE,END_NODE):\n continue\n if e in perm:\n S_correct += -1 * w\n S_all += -1 * w\n S_avg = S_all / (len(sample.vertices()) - 2)\n if S_correct > S_avg:\n num_correct += 1\n elif S_correct == S_avg:\n num_correct += 0.5\n num_samples += 1\n if num_samples % 50 == 0:\n print(num_samples)\n return 1.0 * num_correct / num_samples", "def baseline(x_data, y_data, stra = \"uniform\"):\r\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2)\r\n dummy = DummyClassifier(strategy= stra)\r\n dummy.fit(x_train, y_train)\r\n y_pred = dummy.predict(x_test)\r\n accu = accuracy_score(y_test, y_pred)\r\n return accu", "def classify(self, x, y):\n\t\tif self.classific_method==\"LogisticRegression\":\n\t\t\tclf = LogisticRegression().fit(x,y)\n\t\t\tscore = clf.score(x,y)\n\t\t\tparams = {\"coef\" : clf.coef_, \"intercept\" : clf.intercept_}\n\n\t\telif self.classific_method==\"RidgeClassifier\":\n\t\t\tclf = RidgeClassifier().fit(x,y)\n\t\t\tscore = clf.score(x,y)\n\t\t\tparams = clf.get_params()\n\n\t\telif self.classific_method==\"MLPClassifier\":\n\t\t\tclf = MLPClassifier(solver='lbfgs',alpha=1e-5,hidden_layer_sizes=(5,2),\\\n\t\t\t\t\t\t\t\trandom_state=1,max_iter=1000)\n\t\t\tclf.fit(x, y)\n\t\t\tparams = {\"coefs\" : clf.coefs_}\n\t\t\tscore = clf.score(x,y)\n\n\t\telif self.classific_method==\"RandomForestClassifier\":\n\t\t\t# clf = RandomForestClassifier(n_estimators=100, max_depth=20, random_state=2)\n\t\t\t\n\t\t\t# model = RandomForestClassifier(random_state=2)\n\t\t\t# grid_parameters = {'n_estimators': [i for i in range(300, 601, 50)],\\\n\t\t\t# \t\t\t\t\t'min_samples_split' : [2, 10, 20, 30, 40]}\n\t\t\t# grid = GridSearchCV(estimator=model, param_grid=grid_parameters)\n\t\t\t# grid_result = grid.fit(x, y)\n\n\t\t\t# n_estimator = grid_result.best_params_['n_estimators']\n\t\t\t# min_samples_split = grid_result.best_params_['min_samples_split']\n\t\t\t\n\n\t\t\tclf = RandomForestClassifier(random_state=2,n_estimators=400,\\\n\t\t\t\t\t\t\t\t\t\t min_samples_split=30, max_depth=20)\n\t\t\tclf.fit(x,y)\n\t\t\tscore = clf.score(x,y)\n\t\t\tparams = {}#{\"params\" : grid_result.best_params_}\n\n\t\telif self.classific_method==\"NeuralNetwork\":\n\t\t\tseed = 7\n\t\t\tnp.random.seed(seed)\n\t\t\tinput_shape = x.shape[1]\n\n\n\t\t\tclf = build_keras_model(input_shape,optimizer=\"adam\",init=\"glorot_normal\")\n\n\t\t\tn_epochs = 200\n\t\t\tn_sub_epochs = 10\n\t\t\tsub_epoch_size = len(x) // n_sub_epochs\n\t\t\t# for epoch_number in range(50):\n\t\t\t# \tfor sub_epoch in range(n_sub_epochs):\n\t\t\t# \t\tX = x[sub_epoch * sub_epoch_size: (sub_epoch + 1) * sub_epoch_size]\n\t\t\t# \t\tY = y[sub_epoch * sub_epoch_size: (sub_epoch + 1) * sub_epoch_size]\n\t\t\t# \t\thist = clf.fit(X,Y,epochs=1);\n\t\t\thist=clf.fit(x, y, epochs=n_epochs, batch_size=sub_epoch_size, verbose=0)\n\t\t\tacc = hist.history['accuracy']\n\t\t\tloss = hist.history['loss']\n\t\t\tscore = acc[-1]\n\t\t\tparams = {\"acc\" : acc, \"loss\" : loss}\n\n\t\treturn clf, score, params", "def test_score_with_fitted_estimator(self):\n model = GaussianNB().fit(self.binary.X.train, self.binary.y.train)\n\n # NOTE that the wrapper will pass a call down to `classes_`\n oz = ClassificationScoreVisualizer(model)\n assert_not_fitted(oz, [\"class_counts_\", \"score_\"])\n\n msg = \"could not determine class_counts_\"\n with pytest.warns(YellowbrickWarning, match=msg):\n oz.score(self.binary.X.test, self.binary.y.test)\n assert_fitted(oz, [\"classes_\", \"class_counts_\", \"score_\"])", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def classification(trainData, trainLabels, testData, method):\n\n nClass = 2\n classLabels = [0,1]\n\n trainLabelsUnqArr = np.unique(trainLabels)\n\n if method == 'NaiveBayes':\n classifier = GaussianNB()\n model = classifier.fit(trainData, trainLabels)\n result = model.predict(testData)\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n elif method == 'knnVoting':\n\n classifier = KNeighborsClassifier(5)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'RandomForests':\n\n classifier = RandomForestClassifier(max_depth=10, random_state=0)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n ############################################\n importances = model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(trainData.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(range(trainData.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(trainData.shape[1]), indices)\n plt.xlim([-1, trainData.shape[1]])\n plt.show()\n\n elif method == 'SVM':\n\n classifier = svm.SVC(C=3, gamma=0.003, probability=True)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'AdaBoost':\n\n classifier = AdaBoostClassifier()\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n ############################################\n importances = model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(trainData.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(range(trainData.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(trainData.shape[1]), indices)\n plt.xlim([-1, trainData.shape[1]])\n plt.show()\n\n elif method == 'NeuralNetwork':\n classifier = MLPClassifier(alpha=1)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'LogisticRegression':\n classifier = LogisticRegression()\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'LinearSVM':\n classifier = LinearSVC(random_state=0)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n ############################################\n importances = model.coef_\n # std = np.std([tree.feature_importances_ for tree in model.estimators_],\n plt.plot(importances.shape[1])\n plt.ylabel('some numbers')\n plt.show()\n elif method == 'kNN':\n\n # logger.info(model.coef_)\n # proba = model.predict_proba(testData)\n # proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n # probaDf = pd.DataFrame(data=proba, columns=classLabels)\n neigh = KNeighborsClassifier(n_neighbors=3)\n neigh.fit(trainData, trainLabels)\n\n result=neigh.predict(testData)\n probaDf=neigh.predict_proba(testData)\n\n # logger.info(method)\n\n return result, probaDf", "def test_score():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.score(testing_features, testing_classes)\n assert False # Should be unreachable\n except ValueError:\n pass", "def _evaluate_classifer(self, classifier: object, X_test: np.ndarray, y_test: np.ndarray, scaler: StandardScaler, optimal_threshold: float, beta: float, calculate_confusion_matrix:bool = False) -> tuple:\n\n # If the data was scaled in the pipeline the scaler will be not none othersie (none) don't scale the data\n if scaler is not None:\n X_test = scaler.transform(X_test)\n\n # get probabilities for positive class\n y_pred = classifier.predict_proba(X_test)[:,1]\n\n # predict based on optimal_threshold\n threshold_predictions = [1 if y > optimal_threshold else 0 for y in y_pred]\n\n # calculate scores\n fb_score = fbeta_score(y_test, threshold_predictions, beta=beta)\n balanced_accurcacy = balanced_accuracy_score(y_test, threshold_predictions)\n\n if calculate_confusion_matrix:\n conf_mat = confusion_matrix(y_test, threshold_predictions)\n return fb_score, balanced_accurcacy, conf_mat\n\n return fb_score, balanced_accurcacy", "def check_classifier():\n content = []\n labels = []\n file = 'COMP3074-CW1-Dataset.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'name.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'Small_talk.csv'\n content, labels = get_tag(file, \"small_talk\", content, labels, )\n x_train, x_test, y_train, y_test = train_test_split(content, # Sample feature set to be divided\n labels, # The sample result to be divided (label)\n stratify=labels, # Keep the category proportions\n # the same in training and testing\n test_size=0.25, # Refers to the proportion of\n # samples reserved for testing\n random_state=22) # Random seed\n count_vect = CountVectorizer(stop_words=stopwords.words('english'))\n x_train_counts = count_vect.fit_transform(x_train)\n tfidf_transformer = TfidfTransformer(use_idf=True, # Tf_idf\n sublinear_tf=True).fit(x_train_counts)\n x_train_tf = tfidf_transformer.transform(x_train_counts) # Standardize the inherent attributes of the training set,\n # reduce dimensionality and normalize\n classify = LogisticRegression(random_state=0).fit(x_train_tf, y_train) # Logistic regression\n return classify, tfidf_transformer, count_vect", "def test_class_counts(self):\n oz = ClassificationScoreVisualizer(GaussianNB())\n oz.fit(self.multiclass.X.train, self.multiclass.y.train)\n\n unique, counts = np.unique(self.multiclass.y.train, return_counts=True)\n npt.assert_array_equal(oz.classes_, unique)\n npt.assert_array_equal(oz.class_counts_, counts)", "def evaluate(self, test_set, predicted_values, certainty):\r\n\r\n if self.classification_type == \"classification\":\r\n self.classification_evaluation(test_set, predicted_values, certainty)\r\n elif self.classification_type == \"regression\":\r\n self.regression_evaluation(test_set, predicted_values)", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_svm.predict(data)", "def testAssignClassifications(self):\n classifications = [c.UID for c in self.directory.getClassifications()]\n self.person.setClassifications(classifications)\n for c in self.person.getClassifications():\n self.failUnless(c.id in ['faculty', 'staff', 'grad-students'])\n self.failUnlessEqual(c.Type(), 'Classification')", "def testDriver():\n exam1=90\n exam2=85\n assignmentScores = [50, 60, 70, 80, ]\n computeGrades(exam1, exam2, assignmentScores)", "def test_Gaussian_NB_estimators():", "def testClassifier(x_train, y_train, x_test, y_test, clf):\n #metrics = []\n start = dt.now()\n clf.fit(x_train, y_train)\n end = dt.now()\n print 'training time: ', (end - start)\n \n # add training time to metrics\n #metrics.append(end-start)\n \n start = dt.now()\n yhat = clf.predict(x_test)\n end = dt.now()\n print 'testing time: ', (end - start)\n \n # add testing time to metrics\n #metrics.append(end-start)\n \n print 'classification report: '\n# print classification_report(y_test, yhat)\n pp(classification_report(y_test, yhat))\n \n print 'f1 score'\n print f1_score(y_test, yhat, average='macro')\n \n print 'accuracy score'\n accuracy = accuracy_score(y_test, yhat)\n print accuracy\n #metrics.append(accuracy)\n #precision = precision_score(y_test, yhat, average=None)\n #recall = recall_score(y_test, yhat, average=None)\n \n # add precision and recall values to metrics\n #for p, r in zip(precision, recall):\n # metrics.append(p)\n # metrics.append(r)\n \n \n #add macro-averaged F1 score to metrics\n #metrics.append(f1_score(y_test, yhat, average='macro'))\n \n print 'confusion matrix:'\n print confusion_matrix(y_test, yhat)\n \n # plot the confusion matrix\n plt.imshow(confusion_matrix(y_test, yhat), interpolation='nearest')\n plt.show()\n \n return accuracy", "def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ", "def test_b_grade_above(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.67)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')", "def evaluate_preds_classification(y_true, y_preds):\n accuracy = accuracy_score(y_true, y_preds)\n precision = precision_score(y_true, y_preds)\n recall = recall_score(y_true, y_preds)\n f1 = f1_score(y_true, y_preds)\n metric_dict = {\"accuracy\": round(accuracy, 2),\n \"precision\": round(precision, 2),\n \"recall\": round(recall, 2),\n \"f1\": round(f1, 2)}\n print(f\"Accuracy: {accuracy * 100:.2f}%\")\n print(f\"Precision: {precision}\")\n print(f\"Recall: {recall}\")\n print(f\"F1 Score: {f1} \\n\")\n return metric_dict", "def test_scores_gatn(atn_model_fn, clf_model_fn, student_model_fn, dataset_name, target_class_id,\n batchsize=128, atn_name=None, clf_name=None, student_name=None, device=None,\n shuffle=True):\n if device is None:\n if tf.test.is_gpu_available():\n device = '/gpu:0'\n else:\n device = '/cpu:0'\n\n # Load the dataset\n (_, _), (X_test, y_test) = generic_utils.load_dataset(dataset_name)\n\n # Split test set to get adversarial train and test split.\n (X_train, y_train), (X_test, y_test) = generic_utils.split_dataset(X_test, y_test)\n\n num_classes = y_train.shape[-1]\n image_shape = X_train.shape[1:]\n\n # cleaning data\n # idx = (np.argmax(y_test, axis=-1) != target_class_id)\n # X_test = X_test[idx]\n # y_test = y_test[idx]\n\n batchsize = min(batchsize, X_test.shape[0])\n\n # num_train_batches = X_train.shape[0] // batchsize + int(X_train.shape[0] % batchsize != 0)\n num_test_batches = X_test.shape[0] // batchsize + int(X_test.shape[0] % batchsize != 0)\n\n # build the datasets\n _, test_dataset = generic_utils.prepare_dataset(X_train, y_train,\n X_test, y_test,\n batch_size=batchsize,\n shuffle=shuffle,\n device=device)\n\n # construct the model on the correct device\n with tf.device(device):\n if clf_name is not None:\n clf_model = clf_model_fn(num_classes, name=clf_name) # type: tf.keras.Model\n else:\n clf_model = clf_model_fn(num_classes) # type: tf.keras.Model\n\n if student_name is not None:\n student_model = student_model_fn(num_classes, name=student_name) # type: tf.keras.Model\n else:\n student_model = student_model_fn(num_classes) # type: tf.keras.Model\n\n if atn_name is not None:\n atn_model = atn_model_fn(image_shape, name=atn_name) # type: tf.keras.Model\n else:\n atn_model = atn_model_fn(image_shape) # type: tf.keras.Model\n\n optimizer = tf.train.AdamOptimizer()\n\n atn_checkpoint = tf.train.Checkpoint(model=atn_model, optimizer=optimizer,\n global_step=tf.train.get_or_create_global_step())\n\n student_checkpoint = tf.train.Checkpoint(model=student_model)\n\n clf_model_name = clf_model.name if clf_name is None else clf_name\n basepath = 'weights/%s/%s/' % (dataset_name, clf_model_name)\n\n if not os.path.exists(basepath):\n os.makedirs(basepath, exist_ok=True)\n\n checkpoint_path = basepath + clf_model_name + '.pkl'\n\n # Restore the weights of the classifier\n if os.path.exists(checkpoint_path):\n clf_model = clf_model.restore(checkpoint_path)\n print(\"Classifier model restored !\")\n\n atn_model_name = atn_model.name if atn_name is None else atn_name\n gatn_basepath = 'gatn_weights/%s/%s/' % (dataset_name, atn_model_name + \"_%d\" % (target_class_id))\n\n # Restore student model\n student_model_name = student_model.name if student_name is None else student_name\n basepath = 'gatn_weights/%s/%s/' % (dataset_name, student_model_name)\n\n if not os.path.exists(basepath):\n os.makedirs(basepath, exist_ok=True)\n\n student_checkpoint_path = basepath + student_model_name\n\n student_checkpoint.restore(student_checkpoint_path)\n\n if not os.path.exists(gatn_basepath):\n os.makedirs(gatn_basepath, exist_ok=True)\n\n atn_checkpoint_path = gatn_basepath + atn_model_name + \"_%d\" % (target_class_id)\n\n atn_checkpoint.restore(atn_checkpoint_path)\n\n # Restore the weights of the atn\n print()\n\n # train loop\n test_acc_realistic = tfe.metrics.Mean()\n test_acc_optimistic = tfe.metrics.Mean()\n test_target_rate = tfe.metrics.Mean()\n test_mse = tfe.metrics.Mean()\n\n batch_id = 0\n adversary_ids = []\n\n with tqdm(test_dataset, desc='Evaluating',\n total=num_test_batches, unit=' samples') as iterator:\n\n for test_iter, (x, y) in enumerate(iterator):\n\n if test_iter >= num_test_batches:\n break\n\n _, x_test_grad = compute_target_gradient(x, student_model, target_class_id)\n x_test_adversarial = atn_model(x, x_test_grad, training=False)\n\n y_test_pred = clf_model(x, training=False)\n y_pred_adversarial = clf_model(x_test_adversarial, training=False)\n\n # compute and update the test target_accuracy\n acc_val_white, target_rate = generic_utils.target_accuracy(y, y_pred_adversarial, target_class_id)\n acc_val_black, _ = generic_utils.target_accuracy(y_test_pred, y_pred_adversarial, target_class_id)\n\n x_mse = tf.losses.mean_squared_error(x, x_test_adversarial, reduction=tf.losses.Reduction.NONE)\n\n test_acc_realistic(acc_val_white)\n test_acc_optimistic(acc_val_black)\n test_target_rate(target_rate)\n test_mse(x_mse)\n\n # find the adversary ids\n y_labels = tf.argmax(y, axis=-1).numpy().astype(int)\n y_pred_labels = generic_utils.checked_argmax(y_test_pred, to_numpy=True).astype(int)\n y_adv_labels = generic_utils.checked_argmax(y_pred_adversarial, to_numpy=True).astype(int) # tf.argmax(y_pred_adversarial, axis=-1)\n\n pred_eq_ground = np.equal(y_labels, y_pred_labels) # correct prediction\n pred_neq_adv_labels = np.not_equal(y_pred_labels, y_adv_labels) # correct prediction was harmed by adversary\n\n found_adversary = np.logical_and(pred_eq_ground, pred_neq_adv_labels)\n\n not_same = np.argwhere(found_adversary)[:, 0]\n not_same = batch_id * batchsize + not_same\n batch_id += 1\n\n adversary_ids.extend(not_same.tolist())\n\n return (test_mse.result().numpy(),\n test_acc_realistic.result().numpy(), test_acc_optimistic.result().numpy(),\n test_target_rate.result().numpy(), adversary_ids)", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.1142382568740966:\r\n return 1\r\n else:\r\n return 1", "def test_classify_cuisine(self):\n pass", "def NBAccuracy(features_train, labels_train, features_test, labels_test):\n ### import the sklearn module for GaussianNB\n from sklearn.naive_bayes import GaussianNB\n from sklearn.metrics import accuracy_score\n\n ### create classifier\n clf = GaussianNB()\n\n ### fit the classifier on the training features and labels\n clf.fit(features_train, labels_train)\n\n ### use the trained classifier to predict labels for the test features\n # method 1\n accuracy = clf.score(features_test, labels_test)\n \n # method 2\n pred = clf.predict(features_test)\n accuracy = accuracy_score(pred, labels_test)\n \n return accuracy", "def test_text_classifier_test(self):\n pass", "def classify(X, Y, skf, clf, round_threshold=0.5, average=\"macro\"):\n X = X.values\n if isinstance(Y, pd.Series):\n labels = [\"{}_0\".format(Y.name), \"{}_1\".format(Y.name)]\n Y = np.ravel(Y)\n else:\n Y, labels = Y.values, list(Y.columns)\n\n fold_results = []\n for train, test in skf.split(X, Y):\n current_clf = clone(clf)\n X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]\n\n current_clf.fit(X_train, Y_train)\n Y_prob = current_clf.predict_proba(X_test)\n Y_pred = current_clf.predict(X_test)\n\n (p, r, f1, auc, jac, hl, p_c,\n r_c, f1_c, s_c) = calculate_metrics(Y_test, Y_pred, Y_prob, average)\n\n # calculate overall scores for current fold\n fold_scores = {\n \"precision\": p,\n \"recall\": r,\n \"f1\": f1,\n \"auc\": auc,\n \"jaccard\": jac,\n \"hamming_loss\": hl\n }\n\n for i in range(len(labels)):\n fold_scores[\"precision_{0}\".format(labels[i])] = p_c[i]\n fold_scores[\"recall_{0}\".format(labels[i])] = r_c[i]\n fold_scores[\"f1_{0}\".format(labels[i])] = f1_c[i]\n fold_scores[\"support_{0}\".format(labels[i])] = s_c[i]\n\n fold_results.append({\n \"scores\": fold_scores,\n \"y_pred\": Y_pred,\n \"y_prob\": Y_prob,\n \"y_test\": Y_test\n })\n\n scores = {}\n for score in fold_results[0][\"scores\"].keys():\n values = [s[\"scores\"][score] for s in fold_results]\n scores[score] = (np.sum(values) if score.startswith(\"support_\")\n else np.mean(values))\n\n return scores, fold_results", "def __calculate_gender_diversity_score(project: dict, student: dict) -> int:\n # project_name = project[\"fields\"][PROJECT_NAME_FIELD]\n # student_name = student[\"fields\"][SURVEY_STUDENT_NAME_FIELD][0]\n\n # print(\"Calculating gender pairing score for: Project({}) - Student({})\".format(project_name, student_name))\n\n # Get the gender specified by the student\n student_gender = student[\"fields\"].get(SURVEY_GENDER_FIELD, None)\n if not student_gender:\n # The student didn't provide a gender, so we can't calculate a score\n return 0\n\n # Get the list of current assignments for the project team\n team_assignments = __get_team_assignments(project)\n\n # This list will hold the list of genders on the team\n team_gender_values = []\n for assignment in team_assignments:\n assigned_student_gender = assignment.student[\"fields\"].get(SURVEY_GENDER_FIELD, None)\n\n if assigned_student_gender:\n team_gender_values.append(assigned_student_gender)\n\n # ================================================================================================================\n # Get the count genders for the already assigned students\n gender_counter = __get_gender_counter()\n gender_counter.update(team_gender_values)\n\n # Get the count of the particular gender that matches the student\n matching_gender_count = gender_counter.get(student_gender)\n\n if matching_gender_count == 0:\n # This is good, as it will make the team more diverse\n return SURVEY_GENDER_BASE_WEIGHT\n elif matching_gender_count == 1:\n # This is better, as it will pair students with like genders\n return SURVEY_GENDER_BASE_WEIGHT * 2\n else:\n # There are already at least 2 student with this gender identity, so we won't\n # prefer this\n return 0", "def classifiction_metric(preds, labels, label_list):\n\n acc = metrics.accuracy_score(labels, preds)\n\n labels_list = [i for i in range(len(label_list))]\n\n report = metrics.classification_report(\n labels, preds, labels=labels_list, target_names=label_list, digits=5, output_dict=True)\n\n return acc, report", "def NBAccuracy(features_train, labels_train, features_test, labels_test):\n ### import the sklearn module for GaussianNB\n from sklearn.naive_bayes import GaussianNB\n\n ### create classifier\n clf = GaussianNB()#TODO\n clf.fit(features_train,labels_train)\n ### fit the classifier on the training features and labels\n #TODO\n\n ### use the trained classifier to predict labels for the test features\n pred = clf.predict(features_test)#TODO\n\n\n ### calculate and return the accuracy on the test data\n ### this is slightly different than the example, \n ### where we just print the accuracy\n ### you might need to import an sklearn module\n from sklearn.metrics import accuracy_score\n accuracy = accuracy_score(pred,labels_test)#TODO\n return accuracy", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 0\r\n elif (float(i[1])) <= 0.1142382568740966:\r\n return 1\r\n else:\r\n return 0", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.01755814193254369:\r\n return 1\r\n else:\r\n return 0", "def test_multiple_averages(self):\n user = self.make_user()\n enrollment = EnrollmentFactory(grade_level__school_year__school=user.school)\n GradeFactory(\n score=50,\n student=enrollment.student,\n graded_work__course_task__course__grade_levels=[enrollment.grade_level],\n )\n GradeFactory(\n score=100,\n student=enrollment.student,\n graded_work__course_task__course__grade_levels=[enrollment.grade_level],\n )\n GradeFactory(\n graded_work__course_task__course__grade_levels=[enrollment.grade_level]\n )\n\n with self.login(user):\n self.get_check_200(\"reports:progress\", pk=enrollment.id)\n\n assert self.get_context(\"courses\")[0][\"course_average\"] == 50\n assert self.get_context(\"courses\")[1][\"course_average\"] == 100", "def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.02728102940334218:\r\n return 1\r\n else:\r\n return 1", "def score(self, test_data):\n\n\t\tpass", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.891599215656381:\r\n return 1\r\n else:\r\n return 0", "def test_b_grade_exact(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.33)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.6215704159296479:\r\n return 0\r\n else:\r\n return 1", "def sk_test_suit(X, y):\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)\r\n\r\n classifierDict = {\"Random Forest\": RandomForestClassifier(),\r\n \"Logistic Regression\": LogisticRegression(),\r\n \"Linear Discriminant Analysis\": LinearDiscriminantAnalysis(),\r\n \"Gaussian Naive Bayes\": GaussianNB(),\r\n \"Neural Network\": MLPClassifier()}\r\n\r\n\r\n try:\r\n for k, v in classifierDict.items():\r\n clf = v.fit(X_train, y_train)\r\n training_score = cross_val_score(clf, X_train, y_train)\r\n testing_score = cross_val_score(clf, X_test, y_test)\r\n print(k)\r\n print('Sk-learn {0} training accuracy: {1}'.format(k, training_score.mean()))\r\n print('Sk-learn {0} testing accuracy: {1}'.format(k, testing_score.mean()))\r\n except:\r\n pass\r\n # winsound.PlaySound('sound.wav', winsound.SND_FILENAME)\r", "def _grade(student, request, course, keep_raw_scores):\r\n grading_context = course.grading_context\r\n raw_scores = []\r\n\r\n # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs\r\n # scores that were registered with the submissions API, which for the moment\r\n # means only openassessment (edx-ora2)\r\n submissions_scores = sub_api.get_scores(\r\n course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)\r\n )\r\n\r\n totaled_scores = {}\r\n # This next complicated loop is just to collect the totaled_scores, which is\r\n # passed to the grader\r\n for section_format, sections in grading_context['graded_sections'].iteritems():\r\n format_scores = []\r\n for section in sections:\r\n section_descriptor = section['section_descriptor']\r\n section_name = section_descriptor.display_name_with_default\r\n\r\n # some problems have state that is updated independently of interaction\r\n # with the LMS, so they need to always be scored. (E.g. foldit.,\r\n # combinedopenended)\r\n should_grade_section = any(\r\n descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']\r\n )\r\n\r\n # If there are no problems that always have to be regraded, check to\r\n # see if any of our locations are in the scores from the submissions\r\n # API. If scores exist, we have to calculate grades for this section.\r\n if not should_grade_section:\r\n should_grade_section = any(\r\n descriptor.location.to_deprecated_string() in submissions_scores\r\n for descriptor in section['xmoduledescriptors']\r\n )\r\n\r\n if not should_grade_section:\r\n with manual_transaction():\r\n should_grade_section = StudentModule.objects.filter(\r\n student=student,\r\n module_state_key__in=[\r\n descriptor.location for descriptor in section['xmoduledescriptors']\r\n ]\r\n ).exists()\r\n\r\n # If we haven't seen a single problem in the section, we don't have\r\n # to grade it at all! We can assume 0%\r\n if should_grade_section:\r\n scores = []\r\n\r\n def create_module(descriptor):\r\n '''creates an XModule instance given a descriptor'''\r\n # TODO: We need the request to pass into here. If we could forego that, our arguments\r\n # would be simpler\r\n with manual_transaction():\r\n field_data_cache = FieldDataCache([descriptor], course.id, student)\r\n return get_module_for_descriptor(student, request, descriptor, field_data_cache, course.id)\r\n\r\n for module_descriptor in yield_dynamic_descriptor_descendents(section_descriptor, create_module):\r\n\r\n (correct, total) = get_score(\r\n course.id, student, module_descriptor, create_module, scores_cache=submissions_scores\r\n )\r\n if correct is None and total is None:\r\n continue\r\n\r\n if settings.GENERATE_PROFILE_SCORES: \t# for debugging!\r\n if total > 1:\r\n correct = random.randrange(max(total - 2, 1), total + 1)\r\n else:\r\n correct = total\r\n\r\n graded = module_descriptor.graded\r\n if not total > 0:\r\n #We simply cannot grade a problem that is 12/0, because we might need it as a percentage\r\n graded = False\r\n\r\n scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))\r\n\r\n _, graded_total = graders.aggregate_scores(scores, section_name)\r\n if keep_raw_scores:\r\n raw_scores += scores\r\n else:\r\n graded_total = Score(0.0, 1.0, True, section_name)\r\n\r\n #Add the graded total to totaled_scores\r\n if graded_total.possible > 0:\r\n format_scores.append(graded_total)\r\n else:\r\n log.exception(\"Unable to grade a section with a total possible score of zero. \" +\r\n str(section_descriptor.location))\r\n\r\n totaled_scores[section_format] = format_scores\r\n\r\n grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)\r\n\r\n # We round the grade here, to make sure that the grade is an whole percentage and\r\n # doesn't get displayed differently than it gets grades\r\n grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100\r\n\r\n letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])\r\n grade_summary['grade'] = letter_grade\r\n grade_summary['totaled_scores'] = totaled_scores \t# make this available, eg for instructor download & debugging\r\n if keep_raw_scores:\r\n grade_summary['raw_scores'] = raw_scores # way to get all RAW scores out to instructor\r\n # so grader can be double-checked\r\n return grade_summary", "def _eval_classifier(self):\n\n y_pred_baseline = self.df_baseline[self.score_column]\n y_pred_sample = self.df_sample[self.score_column]\n\n y_label_baseline = self.df_baseline[self.label_column]\n y_label_sample = self.df_sample[self.label_column]\n\n precision_baseline = precision_score(y_label_baseline, y_pred_baseline)\n recall_baseline = recall_score(y_label_baseline, y_pred_baseline)\n acc_baseline = accuracy_score(y_label_baseline, y_pred_baseline)\n f1_baseline = f1_score(y_label_baseline, y_pred_baseline)\n try:\n auc_baseline = roc_auc_score(y_label_baseline, y_pred_baseline)\n except ValueError:\n auc_baseline = \"NA\"\n\n precision_sample = precision_score(y_label_sample, y_pred_sample)\n recall_sample = recall_score(y_label_sample, y_pred_sample)\n acc_sample = accuracy_score(y_label_sample, y_pred_sample)\n f1_sample = f1_score(y_label_sample, y_pred_sample)\n try:\n auc_sample = roc_auc_score(y_label_sample, y_pred_sample)\n except ValueError:\n auc_sample = \"NA\"\n\n metrics_df = pd.DataFrame(\n {\n \"Accuracy\": [acc_baseline, acc_sample],\n \"Precision\": [precision_baseline, precision_sample],\n \"Recall\": [recall_baseline, recall_sample],\n \"F1\": [f1_baseline, f1_sample],\n \"AUC\": [auc_baseline, auc_sample],\n },\n index=[\"baseline\", \"sample\"],\n )\n\n self.performance_comparison = metrics_df", "def baseline(*args):\n XTrain, XTest, yTrain, yTest = args\n clf = DecisionTreeClassifier(random_state=42)\n clf.fit(XTrain, yTrain)\n return clf.score(XTest, yTest), clf.feature_importances_", "def test_model (self, text_test, labels_test):\n print(classification_report(labels_test, self.classify(text_test)))", "def determine_classes_based_on_gain_in_r2_score(dataset, alpha, downsample=True):\n gains = dataset['gain_in_r2_score']\n classes = ['good_gain' if i > alpha else 'loss' for i in gains]\n dataset[TARGET] = classes\n if downsample:\n return downsample_data(dataset)\n return dataset", "def score(self, X, y):\n predictions = self.predict(X)\n total_values = len(y)\n accuracy = 0\n if 'classification' == self.label_type:\n correct_values = np.where(predictions == y)\n accuracy = correct_values[0].size / total_values\n elif 'regression' == self.label_type:\n sse = (y - predictions) ** 2\n sse_summed = np.sum(sse)\n accuracy = sse_summed / total_values\n\n return accuracy", "def classify(self, X, y):\n\n clf = svm.SVC(kernel='linear', C=1)\n cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)\n\n scores = cross_val_score(clf, X, y, cv=cv, scoring='balanced_accuracy')\n\n return scores", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.610257172808176:\r\n return 1\r\n else:\r\n return 0", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 0\r\n elif (float(i[1])) <= 0.02728102940334218:\r\n return 1\r\n else:\r\n return 0", "def test_get_simple_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n\n simple_score = self.annot_scorer._get_simple_score(\n classes, negated_classes, self.ic_store.statistics.mean_mean_ic,\n self.ic_store.statistics.mean_max_ic, self.ic_store.statistics.mean_sum_ic,\n self.negation_weight, self.mock_ic_values\n )\n assert simple_score == 0.7276770236073753", "def classify(train=None, test=None, data=None, res_dir=\"res/\", disp=True, outfilename=None):\n utils.print_success(\"Comparison of differents classifiers\")\n if data is not None:\n train_features = data[\"train_features\"]\n train_groundtruths = data[\"train_groundtruths\"]\n test_features = data[\"test_features\"]\n test_groundtruths = data[\"test_groundtruths\"]\n else:\n train = utils.abs_path_file(train)\n test = utils.abs_path_file(test)\n train_features, train_groundtruths = read_file(train)\n test_features, test_groundtruths = read_file(test)\n if not utils.create_dir(res_dir):\n res_dir = utils.abs_path_dir(res_dir)\n classifiers = {\n \"RandomForest\": RandomForestClassifier()\n # \"RandomForest\": RandomForestClassifier(n_estimators=5),\n # \"KNeighbors\":KNeighborsClassifier(3),\n # \"GaussianProcess\":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),\n # \"DecisionTree\":DecisionTreeClassifier(max_depth=5),\n # \"MLP\":MLPClassifier(),\n # \"AdaBoost\":AdaBoostClassifier(),\n # \"GaussianNB\":GaussianNB(),\n # \"QDA\":QuadraticDiscriminantAnalysis(),\n # \"SVM\":SVC(kernel=\"linear\", C=0.025),\n # \"GradientBoosting\":GradientBoostingClassifier(),\n # \"ExtraTrees\":ExtraTreesClassifier(),\n # \"LogisticRegression\":LogisticRegression(),\n # \"LinearDiscriminantAnalysis\":LinearDiscriminantAnalysis()\n }\n for key in classifiers:\n utils.print_success(key)\n clf = classifiers[key]\n utils.print_info(\"\\tFit\")\n clf.fit(train_features, train_groundtruths)\n utils.print_info(\"\\tPredict\")\n predictions = clf.predict(test_features)\n\n if outfilename is not None:\n with open(outfilename, \"w\") as filep:\n for gt, pred in zip(test_groundtruths, predictions):\n filep.write(gt + \",\" + pred + \"\\n\")\n\n # Global\n data = [key]\n data.append(str(precision_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(recall_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(f1_score(test_groundtruths, predictions, average='weighted')))\n data = \",\".join(data)\n if disp:\n print(data)\n else:\n with open(res_dir + \"global.csv\", \"a\") as filep:\n filep.write(data + \",\\n\")\n # Local\n for index, tag in enumerate(list(set(train_groundtruths))):\n precision = precision_score(test_groundtruths, predictions, average=None)\n recall = recall_score(test_groundtruths, predictions, average=None)\n f1 = f1_score(test_groundtruths, predictions, average=None)\n line = key + \",\" + str(precision[index]) + \",\" + str(recall[index]) + \",\" + str(f1[index])\n if disp:\n print(line)\n else:\n with open(res_dir + \"tag_\" + tag + \".csv\", \"a\") as filep:\n filep.write(line + \",\\n\")\n return predictions", "def evaluate(\n self,\n test_data=None,\n print_report=True,\n save_path=\"ktrain_classification_report.csv\",\n class_names=[],\n ):\n return self.validate(\n val_data=test_data,\n print_report=print_report,\n save_path=save_path,\n class_names=class_names,\n )", "def stratifier(self, data, labels, classifiers, cv, output_dir):\n\t\tresults_proba = collections.defaultdict(dict)\n\t\tdict_y_test = collections.defaultdict()\n\t\tsss = StratifiedShuffleSplit(n_splits=cv, test_size=0.2, random_state=3)\n\t\tsss.get_n_splits(data, labels)\n\t\ti = 1\n\t\tself.logger.info('Training processing ...')\n\t\tloop = sss.split(data, labels)\n\t\tt = tqdm(loop)\n\t\tl = collections.defaultdict(dict)\n\t\tfor train_index, test_index in t:\n\t\t\tt.set_description('Cross-validation n°')\n\t\t\tx_train, x_test = data.values[train_index], data.values[test_index]\n\t\t\ty_train, y_test = labels[train_index], labels[test_index]\n\t\t\tdict_y_test[i] = y_test\n\t\t\tresults_proba, tmp_l = \\\n\t\t\t\tself.classification(\n\t\t\t\t\ti, classifiers, results_proba, x_train, x_test, y_train, y_test)\n\t\t\t[l[d].update(tmp_l[d]) for d in tmp_l]\n\t\t\ti += 1\n\t\t[l[clf].update({'Mean': np.mean(np.asarray(list(l[clf].values())))})\n\t\t for clf in l]\n\t\tlog_cv = pd.DataFrame(l)\n\t\tlog_cv.index.names = ['Cross-validation']\n\t\tlog_cv.to_csv(output_dir + '/Cross-validation_accuracy.csv',\n\t\t index=True, sep='\\t')\n\t\tprint('Cross-validation results : \\n')\n\t\tprint(log_cv)\n\n\t\treturn results_proba, dict_y_test, classifiers", "def test_save_grade(self):\r\n response = self.peer_grading.save_grade(self.save_dict)\r\n self.assertEqual(response['success'], True)", "def _score(self, estimator, train, test):\n b = estimator.fit(self.A[train], self.b[train]).predict(self.A[test])\n return accuracy_score(self.b[test], b)", "def test_compute_grade_for_fa(self):\n run1_data = self.user_edx_data.get_run_data(self.run_fa.edx_course_key)\n run2_data = self.user_edx_data.get_run_data(self.run_fa_with_cert.edx_course_key)\n\n grade1_from_cur_grade = api._compute_grade_for_fa(run1_data)\n grade2_from_cert = api._compute_grade_for_fa(run2_data)\n\n assert isinstance(grade1_from_cur_grade, api.UserFinalGrade)\n assert isinstance(grade2_from_cert, api.UserFinalGrade)\n\n assert grade1_from_cur_grade.passed == self.current_grades.get(\n self.run_fa.edx_course_key).data.get('passed')\n assert grade1_from_cur_grade.grade == self.current_grades.get(\n self.run_fa.edx_course_key).data.get('percent')\n assert grade1_from_cur_grade.payed_on_edx == (self.enrollments.get(\n self.run_fa.edx_course_key).data.get('mode') in ['verified', 'honor'])\n\n assert grade2_from_cert.passed is self.current_grades.get(\n self.run_fa_with_cert.edx_course_key).data.get('passed')\n assert grade2_from_cert.grade == self.current_grades.get(\n self.run_fa_with_cert.edx_course_key).data.get('percent')\n # this is True as long as the certificate is verified\n assert grade2_from_cert.payed_on_edx is True", "def classify(self):\n\n if self.classifier is None:\n raise ValueError('self.classifier is None')\n if self.df is None:\n raise ValueError('self.df is None')\n if self.features is None:\n raise ValueError('self.features is None')\n\n train_set = self.df[self.df[self.label_col] != CLASSIFIER_NAN]\n test_set = self.df[self.df[self.label_col] == CLASSIFIER_NAN]\n\n test_set_timestamps = list(test_set.index.strftime('%Y-%m-%d %H:%M:%S.%f'))\n\n self.classifier.fit(\n train_set[self.features],\n train_set[self.label_col]\n )\n\n preds = self.classifier.predict(test_set[self.features])\n probs = self.classifier.predict_proba(test_set[self.features])\n\n res = []\n\n for i in range(0, len(preds)):\n probability = max(probs[i])\n res.append([test_set_timestamps[i], preds[i], probability])\n\n return res", "def mgcEval(self):\n import numpy as np\n def report_to_df(report):\n\n \"\"\"\n function to convert classification report to dataframe (for visualisation plot)\n \"\"\"\n\n report = re.sub(r\" +\", \" \", report).replace(\"avg / total\", \"avg/total\").replace(\"\\n \", \"\\n\")\n # update this due to sklearn classification report output change\n report = re.sub(r\" +\", \" \", report).replace(\"micro avg\", \"micro_avg\").replace(\"macro avg\", \"macro_avg\").replace(\"weighted avg\", \"weighted_avg\").replace(\"\\n \", \"\\n\")\n report_df = pd.read_csv(StringIO(\"Classes\" + report), sep=' ', index_col=0) \n return(report_df)\n \n #txt report to df\n class_rpttop1 = classification_report(self.y_true, self.y_pred)\n df_report = report_to_df(class_rpttop1)\n\n df_report = df_report.iloc[:self.nb_classes, :].copy()\n df_report.index = df_report.index.astype(int)\n \n\n # classifier prediction metrics\n def classMetrics(averagex):\n precision, recall, fscore, support = score(self.y_true, self.y_pred, average=averagex)\n \n return(\n print(''), \n print('-------------{0:}--------------------'.format(averagex)), \n print('precision: {0:.4f}'.format(precision)),\n print('recall: {0:.4f}'.format(recall)),\n print('fscore: {0:.4f}'.format(fscore)),\n print(''),\n print('kappa score: {0:.4f}'.format(cohen_kappa_score(self.y_true, self.y_pred))),\n print('accuracy score: {0:.4f}'.format(accuracy_score(self.y_true, self.y_pred))))\n \n def predSamp():\n\n correct = np.nonzero(self.y_pred==self.y_true)[0]\n incorrect = np.nonzero(self.y_pred!=self.y_true)[0]\n\n # quick check of the number of correct prediction from validation set\n print(\"\")\n print(\"correct/total = {0: .4f}\".format(len(correct)/(len(correct)+len(incorrect))))\n print(\"total correct sample = {0: .0f}\".format(len(correct)))\n print('------------------------------------------------------------------')\n \n def classReport():\n print('----------------------------- Classfication Report -------------------------------')\n print(classification_report(pd.Series(self.y_true).map(self.dict_label), pd.Series(self.y_pred).map(self.dict_label)))\n \n self.class_rpt = pd.concat([pd.DataFrame(pd.Series(df_report.index.tolist()).map(self.dict_label), columns = ['label']), df_report], axis = 1)\n \n self.classMetricsMac = classMetrics(\"macro\")\n self.classMetricsMic = classMetrics(\"micro\")\n self.predSample = predSamp()\n self.class_rptTop1 = classReport()\n \n return self", "def test_calculate_assign_group_rdclass(request):\n print(\"\\n--Starting:\", request.node.name)\n\n net = ModelRoadwayNetwork.read(\n link_file=STPAUL_LINK_FILE,\n node_file=STPAUL_NODE_FILE,\n shape_file=STPAUL_SHAPE_FILE,\n fast=True,\n )\n\n net.calculate_assign_group()\n net.calculate_roadway_class()\n assert \"assign_group\" in net.links_df.columns\n assert \"roadway_class\" in net.links_df.columns\n print(\"Assign Group Frequency\")\n print(net.links_df[net.links_df.drive_access == 1].assign_group.value_counts())\n print(\"Roadway Class Frequency\")\n print(net.links_df[net.links_df.drive_access == 1].roadway_class.value_counts())\n ## todo write an assert that actually tests something", "def classification(original_training_data):\n\n ''' Storing the dataframe as numpy array '''\n original_training_data_values = original_training_data.values\n\n ''' Storing the values of target attribute for finding out the counts of each recipetype'''\n target_column = original_training_data_values[:, -1]\n\n ''' Recipe_type stores the unique values of target attribute in the form of a list [Muffin Cupcake] \n cupcake_muffin_count stores the count of muffin and cupcakes in the form of a list [451 451]'''\n recipe_type, cupcake_muffin_count = np.unique(target_column, return_counts=True)\n\n ''' cupcake_muffin_count.argmax() returns the index of the highest value. In this case, it will return the index of \n muffin or cupcake count. '''\n majority_class = recipe_type[cupcake_muffin_count.argmax()]\n\n return majority_class", "def test_weighted_exam(self):\r\n self.weighted_setup()\r\n self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})\r\n self.check_grade_percent(0.75)", "def _classify(self, sample):\n # This function is used so that we can reduce each row with respect \n # to the sample.\n def calc_dist(vector):\n return distance_utils.euclidean(vector, sample)\n\n distances = self.training_set.reduce_rows(calc_dist)\n \n votes = self._tally_votes(self.training_set.get_labels(), distances)\n \n return collection_utils.get_key_with_highest_value(votes)", "def _grade_with_errors(student, request, course, keep_raw_scores=False):\r\n if student.username in ['student3', 'student4']:\r\n raise Exception(\"I don't like {}\".format(student.username))\r\n\r\n return grade(student, request, course, keep_raw_scores=keep_raw_scores)", "def test(self):\n\t\treturn classification_report(self.test_labels, self.predict(self.test_data), target_names=self.le.classes_)", "def __existence_classification__(self,task_id,shape,aggregations):\n\n # aggregations = {}\n\n # raw_classifications and clustering_results have different hierarchy orderings- raw_classifications\n # is better for processing data and clustering_results is better for showing the end result\n # technically we only need to look at the data from clustering_results right now but its\n # hierarchy is really inefficient so use raw_classifications to help\n\n # each shape is done independently\n\n # set - so if multiple tools create the same shape - we only do that shape once\n # for shape in set(marking_tasks[task_id]):\n\n\n # pretentious name but basically whether each person who has seen a subject thinks it is a true\n # positive or not\n existence_classification = {\"param\":\"subject_id\"}\n\n global_cluster_index = 0\n # clusters_per_subject = []\n\n # look at the individual points in the cluster\n for subject_id in aggregations.keys():\n if subject_id == \"param\":\n continue\n\n # gold standard pts may not match up perfectly with the given clusters -\n # for example, we could have a gold penguin at 10,10 but the users' cluster\n # is centered at 10.1,9.8 - same penguin though\n # so as we go through the clusters, we need to see which ones match up more closely\n # with the gold standard\n # if subject_id in gold_standard_clustering[0]:\n # # closest cluster and distance\n # gold_to_cluster = {pt:(None,float(\"inf\")) for pt in gold_standard_clustering[0][subject_id]}\n # else:\n # gold_to_cluster = None\n\n\n # clusters_per_subject.append([])\n\n # # in either case probably an empty image\n # if subject_id not in clustering_results:\n # continue\n # if task_id not in clustering_results[subject_id]:\n # continue\n\n if (shape+ \" clusters\") not in aggregations[subject_id][task_id]:\n # if none of the relevant markings were made on this subject, skip it\n continue\n\n all_users = aggregations[subject_id][task_id][shape+ \" clusters\"][\"all_users\"]\n\n for local_cluster_index in aggregations[subject_id][task_id][shape+ \" clusters\"]:\n if local_cluster_index == \"all_users\":\n continue\n\n # extract the users who marked this cluster\n cluster = aggregations[subject_id][task_id][shape+ \" clusters\"][local_cluster_index]\n\n # todo - put this back when we support gold standard clustering\n # # is this user cluster close to any gold standard pt?\n # if subject_id in gold_standard_clustering[0]:\n # x,y = cluster[\"center\"]\n # for (gold_x,gold_y) in gold_to_cluster:\n # dist = math.sqrt((x-gold_x)**2+(y-gold_y)**2)\n # if dist < gold_to_cluster[(gold_x,gold_y)][1]:\n # gold_to_cluster[(gold_x,gold_y)] = local_cluster_index,dist\n #\n # # now repeat for negative gold standards\n # if subject_id in gold_standard_clustering[1]:\n # x,y = cluster[\"center\"]\n # min_dist = float(\"inf\")\n # closest= None\n # for x2,y2 in gold_standard_clustering[1][subject_id]:\n # dist = math.sqrt((x-x2)**2+(y-y2)**2)\n # if dist < min_dist:\n # min_dist = min(dist,min_dist)\n # closest = (x2,y2)\n # if min_dist == 0.:\n # assert (x,y) == closest\n # mapped_gold_standard[(subject_id,local_cluster_index)] = 0\n\n users = cluster[\"users\"]\n\n ballots = []\n\n # todo - the 15 hard coded value - might want to change that at some point\n for u in all_users:\n if u in users:\n ballots.append((u,1))\n else:\n ballots.append((u,0))\n\n existence_classification[(subject_id,local_cluster_index)] = ballots\n # clusters_per_subject[-1].append(global_cluster_index)\n # global_cluster_index += 1\n\n # # note we don't care about why a cluster corresponds to a gold standard pt - that is\n # # it could be really close to given gold standards - the point is that it is close\n # # to at least one of them\n # if gold_to_cluster is not None:\n # for (local_cluster_index,dist) in gold_to_cluster.values():\n # # arbitrary threshold but seems reasonable\n # if dist < 1:\n # mapped_gold_standard[(subject_id,local_cluster_index)] = 1\n\n existence_results = self.__task_aggregation__(existence_classification,task_id,{})#,mapped_gold_standard)\n assert isinstance(existence_results,dict)\n\n for subject_id,cluster_index in existence_results:\n new_results = existence_results[(subject_id,cluster_index)][task_id]\n # new_agg = {subject_id: {task_id: {shape + \" clusters\": {cluster_index: {\"existence\": new_results}}}}}\n # aggregations = self.__merge_results__(aggregations,new_agg)\n aggregations[subject_id][task_id][shape + \" clusters\"][cluster_index][\"existence\"] = new_results\n # if subject_id not in aggregations:\n # aggregations[subject_id] = {}\n # if task_id not in aggregations[subject_id]:\n # aggregations[subject_id][task_id] = {}\n # if (shape + \" clusters\") not in aggregations[subject_id][task_id]:\n # aggregations[subject_id][task_id][shape+ \" clusters\"] = {}\n # # this part is probably redundant\n # if cluster_index not in aggregations[subject_id][task_id][shape+ \" clusters\"]:\n # aggregations[subject_id][task_id][shape+ \" clusters\"][cluster_index] = {}\n #\n # aggregations[subject_id][task_id][shape+ \" clusters\"][cluster_index][\"existence\"] = existence_results[(subject_id,cluster_index)]\n\n return aggregations", "def test_is_student_calibrated(self):\r\n response = self.peer_grading.is_student_calibrated(self.calibrated_dict)\r\n self.assertTrue(response['success'])", "def classification_report(y_test:list, y_predict:list) -> str:\n return classification_report(y_test, y_predict)", "def test_labels(self):\n classes = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"])\n y = classes[np.random.randint(0, 5, 100)]\n\n oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)\n npt.assert_array_equal(oz._labels(), classes)\n\n encoder = dict(zip(range(len(classes)), classes))\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._labels(), classes)\n\n encoder = LabelEncoder().fit(y)\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._labels(), classes)", "def classify(dataset,classifier,feat_mask=None):\r\n \r\n train = dataset.get_data('train',True)\r\n X_train = train['x']\r\n if feat_mask is not None:\r\n X_train = X_train[:,feat_mask]\r\n y_train = train['y']\r\n \r\n classifier.fit(X_train,y_train)\r\n \r\n test = dataset.get_data('test',True)\r\n X_test = test['x']\r\n if feat_mask is not None:\r\n X_test = X_test[:,feat_mask]\r\n y_test = test['y']\r\n \r\n pred = classifier.predict(X_test)\r\n \r\n acc = np.count_nonzero(pred==y_test) / len(y_test)\r\n return acc,y_test,pred", "def test_text_classifier_curate(self):\n pass", "def evaluate_model(model, X_test, y_test, category_names):\n # Predict for test set\n y_pred = model.predict(X_test)\n \n print(\"**** Scores for each category *****\\n\")\n for i in range(36):\n print(\"Scores for '{}':\".format(category_names[i]))\n print(classification_report(y_test.values[:,i], y_pred[:,i]))", "def test(self, X, y):\n\t\tself.test_X = X\n\t\tself.test_y = y\n\n\t\tclassifier = self.classifier.fit(self.X, self.y)\n\t\ty_pred = classifier.predict(X) \t\t\t# class prediction\n\t\ty_prob = classifier.predict_proba(X)\t# probability of each class\n\t\tself.test_metrics = ModelMetrics(classifier, y, y_pred, y_prob, 'holdout')", "def classify(self, audio_sample, should_print=True):\n features_left, features_right = self.extract_features(audio_sample)\n classification_counts = [0 for x in range(len(self.speakers))]\n\n for i in range(len(features_left)):\n feature = np.reshape(features_left[i, :], (1, -1))\n\n left_pred = int(self.left_model.predict(feature)[0])\n classification_counts[left_pred] += 1\n\n if self.both_channels:\n right_pred = int(self.right_model.predict(feature)[0])\n classification_counts[right_pred] += 1\n\n probabilities = np.array(classification_counts) / sum(classification_counts)\n pred = np.argmax(probabilities)\n\n if should_print:\n print(probabilities)\n\n if probabilities[pred] > self.certainty:\n print(\"Identified %s\" % self.speakers[pred])\n return self.speakers[pred]\n else:\n print(\"Unidentified Speaker\")\n return -1", "def test(student_module):\n tester = _testDriver()\n tester.test_all(student_module)\n return tester.score, tester.feedback", "def test(self):\n for data_tier in self.data_tiers:\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.floor(tot*0.2))\n test_features = np.array(self.preprocessed_data[data_tier]['features'][p:])\n trend_test_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][p:])\n avg_test_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][p:])\n accuracy_trend = self.clf_trend[data_tier].score(test_features, trend_test_classifications)\n accuracy_avg = self.clf_avg[data_tier].score(test_features, avg_test_classifications)\n self.logger.info('The accuracy of %s trend classifier for data tier %s is %.3f', self.name, data_tier, accuracy_trend)\n self.logger.info('The accuracy of %s avg regressor for data tier %s is %.3f', self.name, data_tier, accuracy_avg)", "def getClassifier(self):\n return self.classify", "def classify_data(self, test_set, include_features_in_result=False):\n if len(test_set) == 1:\n return self.__classify(test_set, self.__tree)\n else:\n\n indices = test_set.index.values.tolist()\n correct_classified_rows = 0\n\n classification_result = []\n\n for index in indices:\n\n training_row = pd.DataFrame(test_set.loc[index])\n training_row = training_row.T\n\n result_row = [list(x) for x in training_row.values][0]\n expected_value = str(training_row[self.__resulting_feature].iloc[0])\n classified_value = self.classify_data(training_row)\n result_row.append(classified_value)\n result_row = tuple(result_row)\n\n classification_result.append(result_row)\n\n if expected_value == classified_value:\n correct_classified_rows += 1\n\n self.accuracy_of_previous_test = (correct_classified_rows / len(test_set) * 100)\n\n column_names = list(test_set)\n column_names.append(\"classified\")\n classification_result = pd.DataFrame(classification_result, columns=column_names)\n\n if include_features_in_result:\n return classification_result\n else:\n return classification_result.iloc[:, -2:]", "def get_prediction(self, data, class_label):\n\t\taccuracy = 0\n\t\thit=0\n\t\tcount=0\n\t\tfor index, row in test.iterrows():\n\t\t\tcount += 1\n\t\t\ttmp = self.get_classLabel(row.tolist(),row[class_label])\n\t\t\t#print (tmp)\n\t\t\tif tmp:\n\t\t\t\thit+=1\n\t\t#print (\"hit \"+ str(hit) )\n\t\taccuracy = hit/count\n\t\t\n\t\treturn accuracy", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def evaluate(self, training_scores, original_test_scores, imitation_test_scores):\n\n #finding a threshold: third to smallest training score\n sorted_scores = np.sort(training_scores)\n threshold = sorted_scores[2]\n\n #computing the number of errors\n errors = len(np.where(original_test_scores < threshold)[0])\n errors += len(np.where(imitation_test_scores > threshold)[0])\n\n #computing the local accuracy\n accuracy = 1 - errors/(len(original_test_scores)+len(imitation_test_scores))\n return accuracy, threshold", "def test_grade(self, grade):\n self.client.login(username=self.student.username, password=self.password)\n with patch('lms.djangoapps.grades.course_grade_factory.CourseGradeFactory.read') as mock_grade:\n grade_fields = {\n 'letter_grade': grade['letter_grade'],\n 'percent': grade['percent'],\n 'passed': grade['letter_grade'] is not None,\n\n }\n mock_grade.return_value = MagicMock(**grade_fields)\n resp = self.client.get(self.get_url(self.student.username))\n\n assert resp.status_code == status.HTTP_200_OK\n expected_data = {\n 'username': self.student.username,\n 'email': '',\n 'course_id': str(self.course_key),\n }\n\n expected_data.update(grade)\n assert resp.data == [expected_data]", "def test_nb(x, y, tune):\n # Perform classification without tuning\n nb = GaussianNB()\n pipeline = create_pipeline(nb)\n return accuracy(pipeline, x, y)", "def test_recommender(self):\n\n self._build_sample_graph()\n\n # set skill sa score to 1.0 and skill sb score to 0.5\n measure_sa = competency.SuccessRateCompetencyMeasure.load(\n self.user_id, self.sa.id)\n measure_sa.add_score(1.0)\n measure_sa.save()\n measure_sb = competency.SuccessRateCompetencyMeasure.load(\n self.user_id, self.sb.id)\n measure_sb.add_score(0.0)\n measure_sb.add_score(1.0)\n measure_sb.save()\n\n # verify that the proficient skill list equals [sa]\n # verify that the recommended skill list equals [sb, sc]\n skill_map = SkillMap.load(self.course, self.user_id)\n recommender = SkillRecommender.instance(skill_map)\n recommended, learned = recommender.recommend()\n self.assertEqual(1, len(learned))\n self.assertEqual(2, len(recommended))\n self.assertEqual(self.sb.id, recommended[0].id)\n self.assertEqual(self.sc.id, recommended[1].id)\n assert learned[0].competency_measure.last_modified\n\n # add second successful attempt for skill b and:\n # verify that the proficient skill list equals [sa, sb]\n # verify that the recommended skill list equals [sc, sd]\n measure_sb = competency.SuccessRateCompetencyMeasure.load(\n self.user_id, self.sb.id)\n measure_sb.add_score(1.0)\n assert measure_sb.proficient\n measure_sb.save()\n skill_map = SkillMap.load(self.course, self.user_id)\n recommender = SkillRecommender.instance(skill_map)\n recommended, proficient = recommender.recommend()\n self.assertEqual(2, len(proficient))\n self.assertEqual(2, len(recommended))\n self.assertEqual(self.sc.id, recommended[0].id)\n self.assertEqual(self.sd.id, recommended[1].id)", "def evaluate_model(model, X_test, Y_test, category_names):\n# Print out Precision , recall F1_score and support for each column using classification_report function\n y_pred_test = model.predict(X_test)\n print(classification_report(Y_test, y_pred_test, target_names=category_names))", "def test_e_instancia_enrolled_in_trail_default_class_rs_checks_student_enrolled_in_trail_default_class(self):\n\n data = EnrolledInTrailDefaultClassRQ(\n student_id=7790,\n trail_id=9999\n )\n\n res = self.api.checks_student_enrolled_in_trail_default_class(data)\n\n if isinstance(res, ConnectionExceptionRS):\n raise unittest.SkipTest(res.msg)\n\n self.assertIsInstance(res, EnrollmentCourseRS)", "def compute_class_sens_spec(pred, label, class_num):\n\n # extract sub-array for specified class\n class_pred = pred[class_num]\n class_label = label[class_num]\n\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n \n # compute:\n \n # true positives\n tp = np.sum((class_pred == 1) & (class_label == 1))\n\n # true negatives\n tn = np.sum((class_pred == 0) & (class_label == 1))\n \n #false positives\n fp = np.sum((class_pred == 1) & (class_label == 0))\n \n # false negatives\n fn = np.sum((class_pred == 0) & (class_label == 0))\n\n # compute sensitivity and specificity\n sensitivity = tp / (tp + fn)\n specificity = tn / (tn + fp)\n\n ### END CODE HERE ###\n\n return sensitivity, specificity" ]
[ "0.8039397", "0.65274847", "0.640173", "0.6289307", "0.6219212", "0.5914755", "0.5887433", "0.5776925", "0.5719986", "0.56444573", "0.56377053", "0.56352776", "0.56190753", "0.56097853", "0.5556611", "0.55541223", "0.55449235", "0.5517405", "0.55008376", "0.54830605", "0.5482247", "0.5472169", "0.54640055", "0.5461173", "0.5457503", "0.5450712", "0.54477394", "0.5434986", "0.5430377", "0.54296345", "0.5417018", "0.541277", "0.5408827", "0.539733", "0.53917855", "0.5382198", "0.53762573", "0.537564", "0.5375525", "0.5369464", "0.53650796", "0.5362092", "0.5348077", "0.53477025", "0.53437656", "0.5340593", "0.53344554", "0.5324237", "0.53217936", "0.5321599", "0.53204125", "0.53168714", "0.53158987", "0.5308224", "0.53042454", "0.52986705", "0.5297159", "0.52914095", "0.5288521", "0.52863187", "0.5285332", "0.52848446", "0.5284044", "0.5271129", "0.5268212", "0.5243384", "0.524282", "0.52387434", "0.52297544", "0.5226862", "0.52259153", "0.520555", "0.5204211", "0.5204098", "0.51974547", "0.51949215", "0.5181263", "0.5177774", "0.51720476", "0.51643", "0.51632655", "0.5162502", "0.5161444", "0.51592916", "0.51588875", "0.5158034", "0.5157114", "0.5150556", "0.51477486", "0.5145373", "0.5140785", "0.513941", "0.5138154", "0.5136559", "0.5133883", "0.51329195", "0.51275766", "0.5125191", "0.51237303", "0.5119992" ]
0.8334534
0
Unit test for Roybal_Student_Analytics element_count method.
def test_element_count(self): s = Student_Analytics() self.assertEqual(s.element_count(2,"F"),6)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_elements_count():\n # GIVEN\n bboxes = [8.67066,49.41423,8.68177,49.4204]\n time = \"2010-01-01/2011-01-01/P1Y\"\n keys = [\"building\"]\n values = [\"\"]\n\n timestamps = [\"2010-01-01T00:00:00Z\", \"2011-01-01T00:00:00Z\"]\n counts = [53.0, 256.0]\n expected = pd.DataFrame({\"timestamp\": timestamps, \"value\": counts})\n\n # WHEN\n client = ohsome.OhsomeClient()\n response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)\n result = response.as_dataframe()\n del client\n\n # THEN\n assert expected.equals(result)", "def testArticleCount(self):\n\n self.articleCount(17)", "def count(self):\n\n raise NotImplementedError", "def getNumElements(self):\n raise Exception(\"Didn't expect this to get called.\")", "def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def test_elements_count_exception():\n # GIVEN\n bboxes = \"8.67066,49.41423,8.68177,49.4204\"\n time = \"2010-01-01/2011-01-01/P1Y\"\n keys = [\"building\"]\n values = [\"\"]\n\n # WHEN\n client = ohsome.OhsomeClient()\n response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)\n response.as_geodataframe()", "def assertCountSeleniumElements(self, selector, count, root_element=None):\n from selenium.webdriver.common.by import By\n\n root_element = root_element or self.selenium\n self.assertEqual(\n len(root_element.find_elements(By.CSS_SELECTOR, selector)), count\n )", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def test_count(self):\n self._test_count_func(count)", "def test_getTotalIndividualCount(self):\r\n # Verified with iNEXT.\r\n self.assertEqual(self.est1.getTotalIndividualCount(), 15)\r\n\r\n # Verified against results in Colwell 2012 paper.\r\n self.assertEqual(self.est2.getTotalIndividualCount(), 976)\r\n self.assertEqual(self.est3.getTotalIndividualCount(), 237)", "def test_count_elements(self):\n from pykml.util import count_elements\n\n test_datafile = path.join(\n path.dirname(__file__),\n 'testfiles',\n 'google_kml_developers_guide/complete_tour_example.kml'\n )\n with open(test_datafile) as f:\n doc = parse(f, schema=Schema('kml22gx.xsd'))\n summary = count_elements(doc)\n\n self.assertTrue('http://www.opengis.net/kml/2.2' in summary)\n self.assertEqual(4,\n summary['http://www.opengis.net/kml/2.2']['Placemark']\n )\n self.assertTrue('http://www.google.com/kml/ext/2.2' in summary)\n self.assertEqual(5,\n summary['http://www.google.com/kml/ext/2.2']['FlyTo']\n )\n self.assertEqual(2,\n summary['http://www.google.com/kml/ext/2.2']['Wait']\n )", "def element_count(self):\r\n result = conf.lib.clang_getNumElements(self)\r\n if result < 0:\r\n raise Exception('Type does not have elements.')\r\n\r\n return result", "def test_b_count_id(self):\n storage = FileStorage()\n count = storage.count(Amenity)\n self.assertEqual(1, count)\n count = storage.count(State)\n self.assertEqual(1, count)\n count = storage.count(City)\n self.assertEqual(1, count)\n count = storage.count(User)\n self.assertEqual(1, count)\n count = storage.count(Place)\n self.assertEqual(1, count)\n count = storage.count(Review)\n self.assertEqual(1, count)", "def element_count(self):\n return self._internal.get_element_count()", "def count(self):\n return len(self._elements)", "def count():", "def count(self, elem):\n return self.iter.count(elem)", "def test_data_source_soaps_count_get(self):\n pass", "def elements_count(self):\n return self.__elements_count", "def sample_count(self):", "def test_init(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(len(s.data),89)", "def test_abcdee():\n assert part_01.count_for('abcdee', 2) == 1\n assert part_01.count_for('abcdee', 3) == 0", "def count(self, value):\n # YOUR CODE HERE\n raise NotImplementedError()", "def count(self):\n # TODO not implemented yet\n return 0", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results", "def count(self, QXmlStreamAttribute=None): # real signature unknown; restored from __doc__ with multiple overloads\n return 0", "def test_getSampleCount(self):\r\n self.assertEqual(self.estimator1.getSampleCount(), 1)", "def test_number_of_testcase_elements(self):\n testcases = self.root.findall('testcase')\n self.assertEqual(len(testcases), 4)", "def test_count_publications(self):\n pass", "def test_own_count(self):\n self._test_count_func(it_count)", "def test_getObservationCount(self):\r\n # Verified with iNEXT.\r\n self.assertEqual(self.est1.getObservationCount(), 5)\r\n\r\n # Verified against results in Colwell 2012 paper.\r\n self.assertEqual(self.est2.getObservationCount(), 140)\r\n self.assertEqual(self.est3.getObservationCount(), 112)", "def test_all_count(self):\n self.assertEqual(2, self.alice_storage.all_count)\n self.assertEqual(3, self.bob_storage.all_count)\n self.assertEqual(0, self.carol_storage.all_count)\n self.assertEqual(0, self.anonymous_storage.all_count)", "def test_calculate_count(request):\n print(\"\\n--Starting:\", request.node.name)\n\n net = ModelRoadwayNetwork.read(\n link_file=STPAUL_LINK_FILE,\n node_file=STPAUL_NODE_FILE,\n shape_file=STPAUL_SHAPE_FILE,\n fast=True,\n )\n\n net.add_counts()\n assert \"AADT\" in net.links_df.columns\n print(net.links_df[net.links_df.drive_access == 1].AADT.value_counts())\n ## todo write an assert that actually tests something", "def element_count(self):\n return len(self.elements) + len(self.virtual_elements)", "def getNumElements(self):\n return 0", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def GetNumberOfElements(self, assoc):\n result = 0\n for dataset in self:\n result += dataset.GetNumberOfElements(assoc)\n return int(result)", "def test_sent_count(self):\n self.assertEqual(1, self.alice_storage.sent_count)\n self.assertEqual(1, self.bob_storage.sent_count)\n self.assertEqual(2, self.carol_storage.sent_count)\n self.assertEqual(0, self.anonymous_storage.sent_count)", "async def test_nr_of_metrics(self):\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(\n response,\n value=str(len(self.entities)),\n total=self.expected_software_metrics,\n entities=self.entities,\n )", "def test_item_count(self):\n self.assertEqual(len(self.items), 2)", "def test_search_result_count(self):\n user = User.objects.create(username=\"hoge\")\n\n ref_entity = Entity.objects.create(name=\"ref_entity\", created_user=user)\n ref_entry = Entry.objects.create(name=\"ref\", schema=ref_entity, created_user=user)\n\n entity = Entity.objects.create(name=\"entity\", created_user=user)\n for name in [\"foo\", \"bar\"]:\n attr = EntityAttr.objects.create(\n name=name,\n type=AttrTypeValue[\"object\"],\n created_user=user,\n parent_entity=entity,\n )\n attr.referral.add(ref_entity)\n entity.attrs.add(attr)\n\n for i in range(0, 20):\n entry = Entry.objects.create(name=\"e%3d\" % i, schema=entity, created_user=user)\n entry.complement_attrs(user)\n\n if i < 10:\n entry.attrs.get(schema__name=\"foo\").add_value(user, ref_entry)\n else:\n entry.attrs.get(schema__name=\"bar\").add_value(user, ref_entry)\n\n entry.register_es()\n\n resp = Entry.search_entries(user, [entity.id], [{\"name\": \"foo\", \"keyword\": \"ref\"}], limit=5)\n self.assertEqual(resp[\"ret_count\"], 10)\n self.assertEqual(len(resp[\"ret_values\"]), 5)", "def test_getSampleCount(self):\r\n self.assertEqual(self.res1.getSampleCount(), 0)\r\n\r\n self.res1.addSample('S1', 42)\r\n self.assertEqual(self.res1.getSampleCount(), 1)\r\n\r\n self.res1.addSample('S2', 43)\r\n self.assertEqual(self.res1.getSampleCount(), 2)", "def test_bababc():\n assert part_01.count_for('bababc', 2) == 1\n assert part_01.count_for('bababc', 3) == 1", "def test_counter(self):\n self.assertEqual(self._n_registered, 1)", "def count(self, value): # real signature unknown; restored from __doc__\n return 0", "def test_properties_count_group_by_group_by_get(self):\n pass", "def testSectionCount(self):\n\n self.sectionCount(3640)", "def getNumElements(self):\n return 1", "def getNumElements(self):\n return 1", "def test_workflows_count_get(self):\n pass", "def b_count_test(self):\n \t \n\tsel = self.selenium\n test = \"Test B - Count Articles, Titles, Headings, Etc.\"\n print test\n \n headers = sel.get_css_count(\"css=\" + CSS[1])\n images = sel.get_css_count(\"css=\" + CSS[2])\n authors = sel.get_css_count(\"css=\" + CSS[3])\n\tdots = sel.get_css_count(\"css=\" + CSS[7]) + sel.get_css_count(\"css=\" + CSS[6])\t\n \n if ((images < 8) or (dots < 8) or (authors < 8) or (headers < 8)):\n print \"Missing articles!\"\n L.log(BROWSERS[x], test, \"FAIL, MISSING CONTENT\", \"Images: \" + str(images) + \" Dots: \" + str(dots) + \" Authors: \" + str(authors) + \" Headers: \" + str(headers)) \n \n\telse:\n\t L.log(BROWSERS[x], test, \"PASS, OK\", \"None\")\n\t \n\t######################################################################## ", "def test_default_num_products(self):\n products = acme_report.generate_products()\n self.assertEqual(len(products), 30)", "def elemCount(memoryManager, paramsList):\n handleEmpty(paramsList, \"count elements of\")\n head = paramsList[0]\n \n if not validateList(head):\n raise Exception('Tried to get element count of non-list')\n \n def countHelper(head):\n if head == None:\n return 0\n\n if type(head) == float:\n return 1\n elif (validateList(head)):\n acum = 0\n for e in head:\n acum += countHelper(e)\n return acum\n return 0\n\n size = countHelper(head)\n return [float(size)]", "def count(self, query):", "def count() -> int:\n pass", "def noOfElem(classObj, index):\r\n return len(classObj.dataSet[:, index])", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def count(self):\n return self._lift(\"count\")", "def testGetEmpCount(self):\n\tself.assertEqual(Employee.getEmpCount(),1) # test getEmpCount() whether return correct answer", "def _get_count(results):\n return len(results)", "def test_numero_elementos_BD(self):\n respuesta = self.client.get(self.response)\n num_elementos = Musica.objects.count()\n self.assertEqual(num_elementos, len(respuesta.data))", "def count(self):\n return len(self)", "def test_new_count(self):\n self.assertEqual(2, self.alice_storage.new_count)\n self.assertEqual(3, self.bob_storage.new_count)\n self.assertEqual(0, self.carol_storage.new_count)\n self.assertEqual(0, self.anonymous_storage.new_count)", "def _count_elements(mapping, iterable): # real signature unknown; restored from __doc__\n pass", "def test_all_count(self):\n self.assertEqual(2, self.alice_inbox.all_count)\n self.assertEqual(3, self.bob_inbox.all_count)\n self.assertEqual(0, self.carol_inbox.all_count)", "def Count(self):\n return self._get_attribute('count')", "def getNumElements(self):\n return 1 + sum(m.getNumElements() for m in self.members)", "def test_abcccd():\n assert part_01.count_for('abcccd', 2) == 0\n assert part_01.count_for('abcccd', 3) == 1", "def test_expand_counts(self):\n c = array([2,0,1,2])\n self.assertEqual(expand_counts(c), array([0,0,2,3,3]))", "def test_properties_count_group_by_group_by_and_sub_group_by_get(self):\n pass", "def count(self):\n return self.get_count()", "def document_count(self):\n raise NotImplementedError", "def test_getReferenceIndividualCount(self):\r\n with self.assertRaises(ValueError):\r\n self.res1.getReferenceIndividualCount('S1')\r\n\r\n self.res1.addSample('S1', 42)\r\n self.assertEqual(self.res1.getReferenceIndividualCount('S1'), 42)", "def count(self, element):\n count = 0\n for i in range(self._length): # Increment count when equal value is found\n if self._arr[i] == element:\n count += 1\n return count", "def analysis_function_total_elements(self,clustering):\n return clustering.total_number_of_elements", "def dcount(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n a = profData.Descendants().AsArray()\n if len(a) > 0:\n return profData.DescendantCount(a[0])\n return \"\"", "def count(self):\n return self.size()", "def __len__(self) -> float:\n return len(self.elements)", "def count(self):\n return self._reduce_for_stat_function(F.count, only_numeric=False)", "def test_get_occurrence(self):\n pass", "def count(item):\n return len(item)", "def test_data_source_soaps_id_dynamic_datas_count_get(self):\n pass", "def count(self) -> float:\n return pulumi.get(self, \"count\")", "def test_svm_count():\n assert environments.svms() > 0\n count = 0\n for l in list(environments.data):\n e = environments[l]\n count += e.svms\n msg = ('SVM count mismatch. Environments says: ' +\n str(environments.svms()) +\n ', actual count: ' + str(count))\n assert count == environments.svms(), msg", "def test_count(self):\r\n assert self.table.objects.count() == 12\r\n\r\n q = self.table.objects(test_id=0)\r\n assert q.count() == 4", "def test_abbcde():\n assert part_01.count_for('abbcde', 2) == 1\n assert part_01.count_for('abbcde', 3) == 0", "def test_properties_count_get(self):\n pass", "def test_count(self):\r\n assert TestModel.objects.count() == 12\r\n\r\n q = TestModel.objects(test_id=0)\r\n assert q.count() == 4", "def test_task_count_total(self):\r\n tasks.count_total()\r\n\r\n stat = StatBookmark.query.first()\r\n self.assertEqual(stat.attrib, stats.TOTAL_CT)\r\n self.assertEqual(stat.data, 4)", "def test_table_counts():\n number_of_test_run = 2 # Run the pipeline twice\n for i in range(number_of_test_run):\n dp = DataPipeline()\n dp.run()\n\n dp = DataPipeline()\n assert dp.get_product_count() == (500000,)\n assert dp.get_duplicate_count(from_table=\"products\") == (0,)\n assert dp.get_aggregate_table_result_count() == (222024, )\n 222024\n dp.close()", "def test_unread_count(self):\n self.assertEqual(1, self.alice_storage.unread_count)\n self.assertEqual(2, self.bob_storage.unread_count)\n self.assertEqual(0, self.carol_storage.unread_count)\n self.assertEqual(0, self.anonymous_storage.unread_count)", "def test_data_counts(self):\n model = PoincareModel(self.data)\n self.assertEqual(len(model.all_relations), 5)\n self.assertEqual(len(model.node_relations[model.kv.vocab['kangaroo.n.01'].index]), 3)\n self.assertEqual(len(model.kv.vocab), 7)\n self.assertTrue('mammal.n.01' not in model.node_relations)", "def count(self) -> int:\n return self.__count", "def check_counts(self, lang, exp_loc, exp_sloc,\n exp_test_loc, exp_test_sloc):\n\n self.assertEqual(self.k__ is not None, True)\n loc_, sloc_, test_loc, test_sloc = self.k__.get_counts(lang)\n self.assertEqual(loc_, exp_loc)\n self.assertEqual(sloc_, exp_sloc)\n self.assertEqual(test_loc, exp_test_loc)\n self.assertEqual(test_sloc, exp_test_sloc)" ]
[ "0.68616205", "0.6832444", "0.67097497", "0.66983885", "0.667081", "0.66494507", "0.66494507", "0.66494507", "0.66494507", "0.6644853", "0.66385466", "0.6608439", "0.6572097", "0.64722455", "0.64489216", "0.6442627", "0.6383579", "0.63805336", "0.6367451", "0.63651866", "0.6357469", "0.63395673", "0.6323673", "0.6313401", "0.6285396", "0.6242326", "0.61823815", "0.61540586", "0.6146017", "0.6141991", "0.61211085", "0.61205405", "0.6117336", "0.61128384", "0.61117446", "0.6107629", "0.6096739", "0.6092792", "0.6085489", "0.6064569", "0.6064569", "0.60550344", "0.59743714", "0.59708077", "0.59614533", "0.5950731", "0.5916765", "0.5903004", "0.59000903", "0.588655", "0.5878023", "0.5852799", "0.58446866", "0.5840797", "0.5820137", "0.58178824", "0.58106774", "0.58036935", "0.5803554", "0.58012176", "0.5799576", "0.5795289", "0.57929325", "0.577716", "0.57711273", "0.5769771", "0.57649696", "0.57613516", "0.5758774", "0.57555294", "0.57542086", "0.5748391", "0.5747211", "0.57447565", "0.5737837", "0.5737188", "0.5735688", "0.57354224", "0.5725018", "0.57197005", "0.57119554", "0.5711915", "0.57064986", "0.5698896", "0.56979245", "0.56950986", "0.56941205", "0.56922054", "0.5679355", "0.5673578", "0.56711847", "0.5666456", "0.5661971", "0.5659265", "0.5656196", "0.56558347", "0.5654124", "0.5652638", "0.5645786", "0.56417847" ]
0.8672462
0
Unit test for Roybal_Student_Analytics avg_grade method.
def test_avg_grade(self): s = Student_Analytics() self.assertEqual(s.classify_grade(s.avg_grade(3)),"B")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_classify_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(5.00),\"A+\")", "def test_multiple_averages(self):\n user = self.make_user()\n enrollment = EnrollmentFactory(grade_level__school_year__school=user.school)\n GradeFactory(\n score=50,\n student=enrollment.student,\n graded_work__course_task__course__grade_levels=[enrollment.grade_level],\n )\n GradeFactory(\n score=100,\n student=enrollment.student,\n graded_work__course_task__course__grade_levels=[enrollment.grade_level],\n )\n GradeFactory(\n graded_work__course_task__course__grade_levels=[enrollment.grade_level]\n )\n\n with self.login(user):\n self.get_check_200(\"reports:progress\", pk=enrollment.id)\n\n assert self.get_context(\"courses\")[0][\"course_average\"] == 50\n assert self.get_context(\"courses\")[1][\"course_average\"] == 100", "def test_grade_change(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(int(s.grade_change()),0)", "def test_mean(self):\n pass", "def test_mean(self):\n pass", "def average_grade(self):\n grade_sum = 0\n grades_length = 0\n for c in self.courses_grades:\n if c[1] != \"-\":\n grade_sum += int(c[1])\n grades_length += 1\n average = grade_sum / grades_length\n return average", "def get_average_grade_of_students(students):\n total_grade = 0\n for row in students:\n total_grade += int(row[5])\n return total_grade/len(students)", "def average(grade1, grade2, grade3):\n return (grade1 + grade2 + grade3) / 3", "def test_average_weight_loss(self):\n user_created = self.create_user()\n average_return = self.new_calculation.average_weight_loss(user_created)\n\n self.assertEqual(average_return, 5.0)\n self.assertEqual(type(average_return), float)", "def test_mean_div(self):\n gfile = grades.writers.GradesFile(self.fname)\n gfile.table.compute_mean()\n gfile.table_format = 'org'\n self.check_output(self.output_str3, gfile, div_on=('Group', 'Test 1'))", "def test_is_average(self):\n avg_orders = Decimal(self._uncertain_demand.average_orders)\n self.assertEqual(avg_orders, 50)", "def grades_average(grades_input):\n sum_of_grades = grades_sum(grades_input)\n average = sum_of_grades / float(len(grades_input))\n\n return average", "def testDriver():\n exam1=90\n exam2=85\n assignmentScores = [50, 60, 70, 80, ]\n computeGrades(exam1, exam2, assignmentScores)", "def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average", "def get_average(value): # fine\r\n average_assignment = 0\r\n average_exam = 0\r\n student_count = 0\r\n if value == 'Assignment':\r\n for student in StudentRoster:\r\n student_count += 1\r\n average_assignment += int(student.assignment)\r\n if student_count == 0:\r\n print(0)\r\n else:\r\n calc = average_assignment/student_count\r\n print('{:.2f}'.format(calc))\r\n elif value == 'Exam':\r\n for student in StudentRoster:\r\n student_count += 1\r\n average_exam += int(student.exam)\r\n if student_count == 0:\r\n print(0)\r\n else:\r\n calc = average_exam/student_count\r\n print('{:.2f}'.format(calc))", "def test_a_grade(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.submit_question_answer('p3', {'2_1': 'Correct'})\r\n self.check_grade_percent(1.0)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'A')", "def print_avg():", "def average_grades(grades):\r\n\r\n\tfor key, value in grades.items(): # iterate through the dictionary for key and value\r\n\t\tgrades[key] = sum(value)/len(value) # average of the value\r\n\r\n\treturn (grades) #return grades\r", "def get_grade_stats(scores):\r\n # Calculate the arithmetic mean\r\n mean = sum(student_scores) / len(scores)\r\n\r\n # Calculate the standard deviation\r\n tmp = 0\r\n for i in range(len(scores)):\r\n tmp += (scores[i] - mean) ** 2\r\n std_dev = (tmp / len(scores)) ** 0.5\r\n\r\n # Package and return average, standard deviation in a tuple\r\n return mean, std_dev", "def test_average_rating(self):\n self.new_project.save()\n\n review1 = Review.objects.create(project = self.new_project, user = self.new_user, design = 8, usability = 5, content = 9, comment = 'This is a nice website.')\n\n review2 = Review.objects.create(project = self.new_project, user = self.new_user, design = 6, usability = 5, content = 3, comment = 'This is a nice website.')\n\n self.assertEqual(self.new_project.average_rating, 6.0)", "def getAvg(self):\r\n\t\treturn self.data['avg']", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def test_average(self):\n self.Person(name=\"person\", age=0).save()\n assert int(self.Person.objects.average(\"age\")) == 0\n\n ages = [23, 54, 12, 94, 27]\n for i, age in enumerate(ages):\n self.Person(name=\"test%s\" % i, age=age).save()\n\n avg = float(sum(ages)) / (len(ages) + 1) # take into account the 0\n assert round(abs(int(self.Person.objects.average(\"age\")) - avg), 7) == 0\n\n self.Person(name=\"ageless person\").save()\n assert int(self.Person.objects.average(\"age\")) == avg\n\n # dot notation\n self.Person(name=\"person meta\", person_meta=self.PersonMeta(weight=0)).save()\n assert (\n round(abs(int(self.Person.objects.average(\"person_meta.weight\")) - 0), 7)\n == 0\n )\n\n for i, weight in enumerate(ages):\n self.Person(\n name=f\"test meta{i}\", person_meta=self.PersonMeta(weight=weight)\n ).save()\n\n assert (\n round(abs(int(self.Person.objects.average(\"person_meta.weight\")) - avg), 7)\n == 0\n )\n\n self.Person(name=\"test meta none\").save()\n assert int(self.Person.objects.average(\"person_meta.weight\")) == avg\n\n # test summing over a filtered queryset\n over_50 = [a for a in ages if a >= 50]\n avg = float(sum(over_50)) / len(over_50)\n assert self.Person.objects.filter(age__gte=50).average(\"age\") == avg", "def grade(self):\n if round(self.numAvg,0) >= 70:\n return round(self.numAvg,0)\n elif self.PassSummer:\n return 70\n elif round(self.numAvg,0) >= 55 and not self.PassSummer:\n return round(self.numAvg,0)\n else:\n return 55", "def avg(a,b):\r\n return (a+b)/2", "def get_average_mark(self, test):\n return", "def compare_averages(ave_stats):\n pass", "def getAvg(self):\r\n\t\tdata = self.pair.data\r\n\t\tif data['avg'] == None:\r\n\t\t\treturn None\r\n\t\treturn 1. / self.pair.data['avg']", "def test_load_avg_1():\n result = _run_metric('load_avg_1')\n assert result.exit_code == 0", "def test_avg_mean(forecasters):\n y = make_forecasting_problem()\n forecaster = EnsembleForecaster(forecasters)\n forecaster.fit(y, fh=[1, 2, 3])\n mean_pred = forecaster.predict()\n\n forecaster_1 = EnsembleForecaster(forecasters, aggfunc=\"mean\", weights=[1, 1])\n forecaster_1.fit(y, fh=[1, 2, 3])\n avg_pred = forecaster_1.predict()\n\n pd.testing.assert_series_equal(mean_pred, avg_pred)", "def test_average_all_same(self):\n\n temp_data = [(32.00, time.localtime()), (32.00, time.localtime()),\n (32.00, time.localtime()), (32.00, time.localtime())]\n\n tt = TemperatureTracker()\n result = tt.average_from(temp_data)\n self.assertEqual(result, 32.0)\n\n # test the regular average function the user will call\n tt = TemperatureTracker(temp_data)\n result = tt.average()\n self.assertEqual(result, 32.0)", "def get_averages(self):\t\n\t\t\n\t\taverages = {}\n\t\tfor subject in self.grades.iterkeys():\n\t\t\taverages[subject] = float(sum(self.grades[subject])) / len(self.grades[subject])\n\t\treturn averages", "def average_grades(grades):\n\n for k in grades.keys():\n new_list = grades[k]\n str_len = len(new_list)\n total = float(sum(new_list) / str_len)\n grades[k] = total\n return grades", "def average(x, y):\n #helper function for get_accuracy\n average = (x+y)/2 \n return average", "def grade_report(course):\n report = []\n for st in course.get_students():\n try:\n average = sum(course.get_grades(st)) / len(course.get_grades(st))\n report.append(str(st) + '\\'s mean grade is: ' + str(average) + '.')\n except ZeroDivisionError:\n report.append(str(st) + ' has no grades.')\n return '\\n'.join(report)", "def gradeReport(course):\n report = []\n for student in course.allStudents():\n total = 0.0\n numberOfGrades = 0\n for grade in course.getGrades(student):\n total += grade\n numberOfGrades += 1\n \n try:\n average = total / numberOfGrades\n report.append(str(student) + \"'s mean grade is \" + str(average))\n except ZeroDivisionError:\n report.append(str(student) + \" has no grades\")\n \n return '\\n'.join(report)", "def test_get_average_of_sentiment_scores():\n\n dict_of_avg_scores = get_average_of_sentiment_scores(\n 'politics_30_months_comments_cleaned_standardized_vader_flair.csv')\n print('average sentiment scores all comments')\n for key, value in dict_of_avg_scores.items():\n print(key, value)\n print()", "def __ui_statistics_sort_avg(self, discipline_name):\n try:\n sorted_list = self.__grade_controller.get_averages_at_discipline_sorted_descending(discipline_name)\n if len(sorted_list) == 0:\n print(\"There is no student graded at the given discipline!\")\n return\n\n for student in sorted_list:\n print(str(student) + \"\\n\")\n\n except GradeException as ge:\n print(ge)\n return", "def test_e(self):\n user_dict = {'A': 3, 'B': 4, 'C': 5, 'D': 6, 'E': 7}\n user_key = 'e'\n self.assertEqual(7, switch_average(user_dict, user_key.upper()))", "def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 15.0)\r\n self.assertEqual(score_dict['total'], 5.0)", "def test_grade(self, grade):\n self.client.login(username=self.student.username, password=self.password)\n with patch('lms.djangoapps.grades.course_grade_factory.CourseGradeFactory.read') as mock_grade:\n grade_fields = {\n 'letter_grade': grade['letter_grade'],\n 'percent': grade['percent'],\n 'passed': grade['letter_grade'] is not None,\n\n }\n mock_grade.return_value = MagicMock(**grade_fields)\n resp = self.client.get(self.get_url(self.student.username))\n\n assert resp.status_code == status.HTTP_200_OK\n expected_data = {\n 'username': self.student.username,\n 'email': '',\n 'course_id': str(self.course_key),\n }\n\n expected_data.update(grade)\n assert resp.data == [expected_data]", "def avg_gross():\n avg = movies['Total Gross'].mean()\n if avg is np.nan:\n raise\n return ('avg_gross', avg)", "def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 0)\r\n self.assertEqual(score_dict['total'], 1)", "def calc_grade(self, average):\n if 95 <= average:\n return 'S'\n elif 90 <= average:\n return 'A'\n elif 80 <= average:\n return 'B'\n elif 70 <= average:\n return 'C'\n elif 60 <= average:\n return 'D'\n else:\n return 'F'", "def slg_average(df,start_year,end_year,bat_met,player_name):\n base_fields = ['AB','HR','X3B','X2B','SLG']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n df['X1B'] = round(df['SLG']*df['AB'] - (4*df['HR'] + 3*df['X3B'] + 2*df['X2B']),0)\n return round((df['X1B'].sum(axis = 0) + df['X2B'].sum(axis = 0) * 2 + df['X3B'].sum(axis = 0) * 3 + df['HR'].sum(axis = 0) * 4) / df['AB'].sum(axis = 0),3)\n\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n df['X1B'] = round(df['SLG']*df['AB'] - (4*df['HR'] + 3*df['X3B'] + 2*df['X2B']),0)\n SLG = round((df['X1B'].sum(axis = 0) + df['X2B'].sum(axis = 0) * 2 + df['X3B'].sum(axis = 0) * 3 + df['HR'].sum(axis = 0) * 4) / df['AB'].sum(axis = 0),3)\n del df['X1B']\n return SLG", "def get_mean(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def test_none_grade(self):\r\n self.basic_setup()\r\n self.check_grade_percent(0)\r\n self.assertEqual(self.get_grade_summary()['grade'], None)", "def test__compute_average_raises_error(self):\n # Setup\n base_property = BaseSingleTableProperty()\n\n # Run and Assert\n expected_error_message = re.escape(\n \"The property details must be a DataFrame with a 'Score' column.\"\n )\n with pytest.raises(ValueError, match=expected_error_message):\n base_property._compute_average()\n\n base_property._details = pd.DataFrame({'Column': ['a', 'b', 'c']})\n with pytest.raises(ValueError, match=expected_error_message):\n base_property._compute_average()", "def test_d(self):\n user_dict = {'A': 3, 'B': 4, 'C': 5, 'D': 6, 'E': 7}\n user_key = 'd'\n self.assertEqual(6, switch_average(user_dict, user_key.upper()))", "def _grade(student, request, course, keep_raw_scores):\r\n grading_context = course.grading_context\r\n raw_scores = []\r\n\r\n # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs\r\n # scores that were registered with the submissions API, which for the moment\r\n # means only openassessment (edx-ora2)\r\n submissions_scores = sub_api.get_scores(\r\n course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)\r\n )\r\n\r\n totaled_scores = {}\r\n # This next complicated loop is just to collect the totaled_scores, which is\r\n # passed to the grader\r\n for section_format, sections in grading_context['graded_sections'].iteritems():\r\n format_scores = []\r\n for section in sections:\r\n section_descriptor = section['section_descriptor']\r\n section_name = section_descriptor.display_name_with_default\r\n\r\n # some problems have state that is updated independently of interaction\r\n # with the LMS, so they need to always be scored. (E.g. foldit.,\r\n # combinedopenended)\r\n should_grade_section = any(\r\n descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']\r\n )\r\n\r\n # If there are no problems that always have to be regraded, check to\r\n # see if any of our locations are in the scores from the submissions\r\n # API. If scores exist, we have to calculate grades for this section.\r\n if not should_grade_section:\r\n should_grade_section = any(\r\n descriptor.location.to_deprecated_string() in submissions_scores\r\n for descriptor in section['xmoduledescriptors']\r\n )\r\n\r\n if not should_grade_section:\r\n with manual_transaction():\r\n should_grade_section = StudentModule.objects.filter(\r\n student=student,\r\n module_state_key__in=[\r\n descriptor.location for descriptor in section['xmoduledescriptors']\r\n ]\r\n ).exists()\r\n\r\n # If we haven't seen a single problem in the section, we don't have\r\n # to grade it at all! We can assume 0%\r\n if should_grade_section:\r\n scores = []\r\n\r\n def create_module(descriptor):\r\n '''creates an XModule instance given a descriptor'''\r\n # TODO: We need the request to pass into here. If we could forego that, our arguments\r\n # would be simpler\r\n with manual_transaction():\r\n field_data_cache = FieldDataCache([descriptor], course.id, student)\r\n return get_module_for_descriptor(student, request, descriptor, field_data_cache, course.id)\r\n\r\n for module_descriptor in yield_dynamic_descriptor_descendents(section_descriptor, create_module):\r\n\r\n (correct, total) = get_score(\r\n course.id, student, module_descriptor, create_module, scores_cache=submissions_scores\r\n )\r\n if correct is None and total is None:\r\n continue\r\n\r\n if settings.GENERATE_PROFILE_SCORES: \t# for debugging!\r\n if total > 1:\r\n correct = random.randrange(max(total - 2, 1), total + 1)\r\n else:\r\n correct = total\r\n\r\n graded = module_descriptor.graded\r\n if not total > 0:\r\n #We simply cannot grade a problem that is 12/0, because we might need it as a percentage\r\n graded = False\r\n\r\n scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))\r\n\r\n _, graded_total = graders.aggregate_scores(scores, section_name)\r\n if keep_raw_scores:\r\n raw_scores += scores\r\n else:\r\n graded_total = Score(0.0, 1.0, True, section_name)\r\n\r\n #Add the graded total to totaled_scores\r\n if graded_total.possible > 0:\r\n format_scores.append(graded_total)\r\n else:\r\n log.exception(\"Unable to grade a section with a total possible score of zero. \" +\r\n str(section_descriptor.location))\r\n\r\n totaled_scores[section_format] = format_scores\r\n\r\n grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)\r\n\r\n # We round the grade here, to make sure that the grade is an whole percentage and\r\n # doesn't get displayed differently than it gets grades\r\n grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100\r\n\r\n letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])\r\n grade_summary['grade'] = letter_grade\r\n grade_summary['totaled_scores'] = totaled_scores \t# make this available, eg for instructor download & debugging\r\n if keep_raw_scores:\r\n grade_summary['raw_scores'] = raw_scores # way to get all RAW scores out to instructor\r\n # so grader can be double-checked\r\n return grade_summary", "def get_prediction(course_grades, train_data, train_grades):\n\n # In the case the student has no grade, return a predicted grade of 0\n if train_data.size == 0:\n return 0,0\n\n model = BayesianRidge()\n model.fit(train_data, train_grades)\n y_mean, y_sd = model.predict(np.array(course_grades).reshape(1, -1), return_std=True)\n\n return y_mean, y_sd", "def test_load_avg_5():\n result = _run_metric('load_avg_5')\n assert result.exit_code == 0", "def test_a(self):\n user_dict = {'A': 3, 'B': 4, 'C': 5, 'D': 6, 'E': 7}\n user_key = 'a'\n self.assertEqual(3, switch_average(user_dict, user_key.upper()))", "def test_load_avg_15():\n result = _run_metric('load_avg_15')\n assert result.exit_code == 0", "def getCurrentAverage(examList, projectList, labList, adjPoints=0):\n \n totalPoints = 1000 if not adjPoints else adjPoints\n grades = examList + projectList + labList # concat into one list to calc the average\n return sum(grades) / totalPoints", "def average_grade(lst):\r\n res = []\r\n for stdnt in lst:\r\n name, avg = stdnt[0], mean(conv_to_num(stdnt[1:]))\r\n res.append([name, avg])\r\n\r\n\r\n return(res)", "def _grade_with_errors(student, request, course, keep_raw_scores=False):\r\n if student.username in ['student3', 'student4']:\r\n raise Exception(\"I don't like {}\".format(student.username))\r\n\r\n return grade(student, request, course, keep_raw_scores=keep_raw_scores)", "def test_avg_l(self):\n u_spec = leabra.UnitSpec(g_bar_e=0.3, g_bar_l=0.3, g_bar_i=1.0)\n u = leabra.Unit(spec=u_spec)\n\n for _ in range(20):\n u.add_excitatory(1.0)\n u.calculate_net_in()\n u.cycle('minus')\n\n self.assertEqual(u.avg_l, 0.40)\n u.spec.update_avg_l(u)\n self.assertTrue(np.allclose(0.52, u.avg_l, rtol=0.1, atol=0.1))\n #TODO: verify that 0.52 is the value of emergent\n\n for _ in range(100):\n u.spec.update_avg_l(u)\n self.assertTrue(np.allclose(1.64, u.avg_l, rtol=0.1, atol=0.1))\n #TODO: verify that 1.64 is the value of emergent", "def test_period_average():\n\n time_point = datetime(2012, 12, 31)\n period = 25\n spy = DEFAULT_ASSET_FACTORY.make_asset(\"SPY\")\n\n weatherman = weathermen.period_average(CALENDAR)\n forecast = weatherman(DEFAULT_ASSET_FACTORY, time_point, period)\n\n assert is_close(forecast.cagr(spy), .152)", "def average_rating(self):\n reviews = self.gamereview_set.all()\n\n try:\n return mean([ review.rating for review in reviews ])\n\n except StatisticsError:\n return None", "def avg_hw_one(students_dict):\n scores = [\n hw['Homework 1']\n for hw in students_dict.values()\n ]\n hw_average = sum(scores) / len(scores)\n return hw_average", "def test_basic_daily_mean(self):\n self.testInst.bounds = self.bounds1\n ans = avg.mean_by_day(self.testInst, 'dummy4')\n assert np.all(ans == 86399 / 2.0)\n\n return", "def average(self,start_window, end_window):\n query = f\"select avg(age) from `{self.table_id}` where timestamp between {start_window} and {end_window}\"\n query_job = self.client.query(query)\n return query_job.result", "def test(student_module):\n tester = _testDriver()\n tester.test_all(student_module)\n return tester.score, tester.feedback", "def test_list_int(self):\n\n result = get_avg([0,0,0,0])\n self.assertEqual(result, ZeroDivisionError)", "def test_gen_avg_brain():\n\n paths = FilePaths(datafile(\"average\"), avg_roi=\"surf/roi/\",\n output=\"blender_scene/\", avg_prior_stim=\"\")\n\n generate_average_brain(paths=paths, blender=True, force_rerun=True)\n\n assert os.path.exists(paths.output + '/iEEG_surface.blend')\n assert os.path.exists(paths.output + '/iEEG_surface.bin')\n assert os.path.exists(paths.output + '/iEEG_surface.json')\n assert os.path.exists(paths.root + \"/fsaverage_joel_allcords.csv\")\n\n if os.path.exists(paths.output):\n shutil.rmtree(paths.output, ignore_errors=True)\n if os.path.exists(paths.root + \"fsaverage_joel_allcords.csv\"):\n os.remove(paths.root + \"fsaverage_joel_allcords.csv\")", "def take_test(exam, student):\n\n student.score = exam.administer()\n return student.score", "def averageTime(self):\n \n pass", "def test_get_score(self):\n # Setup\n\n real_data = Mock()\n synthetic_data = Mock()\n metadata = Mock()\n progress_bar = Mock()\n\n mock_compute_average = Mock()\n mock__generate_details = Mock(return_value=None)\n\n base_property = BaseSingleTableProperty()\n base_property._compute_average = mock_compute_average\n base_property._generate_details = mock__generate_details\n\n # Run\n base_property.get_score(real_data, synthetic_data, metadata, progress_bar)\n\n # Assert\n mock__generate_details.assert_called_once_with(\n real_data, synthetic_data, metadata, progress_bar\n )\n mock_compute_average.assert_called_once()", "def test_b(self):\n user_dict = {'A': 3, 'B': 4, 'C': 5, 'D': 6, 'E': 7}\n user_key = 'B'\n self.assertEqual(4, switch_average(user_dict, user_key.upper()))", "def test_list_int(self):\n result = get_avg([])\n self.assertEqual(result, 0)", "def mean(self):\n raise RuntimeError(\"Needs to be implemented in base class\")", "def average_population_grade(population):\r\n total = 0\r\n for individual in population :\r\n total += get_individual_fitness(individual)\r\n return total/POPULATION_COUNT", "def test_average_all_different(self):\n\n temp_data = [(1.00, time.localtime()), (2.00, time.localtime()),\n (3.00, time.localtime()), (4.00, time.localtime())]\n\n tt = TemperatureTracker()\n result = tt.average_from(temp_data)\n self.assertEqual(result, 2.5)", "def test_aggregated_error_rate(self):\n estimates = np.mat([0.8, 0.4, 0.8, 0.4])\n m = np.shape(self.data_matrix)[0]\n returned = ada_boost.aggregated_error_rate(estimates, self.labels, m)\n self.assertEqual(returned, 2.0)", "def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)", "def get_average_rating(self):\n count = 0\n total = 0\n num_books = len(self.books)\n if num_books > 0:\n for rating in self.books.values():\n if rating:\n count += 1\n total += rating\n average = total / count\n if count > 0:\n return average\n else:\n print(\"Books with ratings not found for user {user}\".format(user=self.name))", "def average(self):\n return self.properties.get('average')", "def avg_e_score(self, entity):\n return float(entity['es']) / float(entity['count'])", "def avg(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n return profData.Tavg()\n return \"\"", "def test_average_over_db_field(self):\n\n class UserVisit(Document):\n num_visits = IntField(db_field=\"visits\")\n\n UserVisit.drop_collection()\n\n UserVisit.objects.create(num_visits=20)\n UserVisit.objects.create(num_visits=10)\n\n assert UserVisit.objects.average(\"num_visits\") == 15", "def get_grade(self) -> int :\n return self.grade", "def average_scores(self, scores, education, count):\n\n for key in scores.keys():\n for k in scores[key].keys():\n scores[key][k] = round(scores[key][k] / count[key][k], 1)\n education[key][k] = round(education[key][k] / count[key][k], 1)\n\n return scores, education", "def test_grouped(self):\n gfile = grades.writers.GradesFile(self.fname)\n gfile.table.compute_grouped_mean('Group')\n gfile.table_format = 'org'\n self.check_output(self.output_str2, gfile)", "def runTests():\r\n\r\n print(\"running a few tests\")\r\n\r\n average = compute .gpsAverage (4, 5)\r\n print(\"average = \", average)\r\n \r\n print (\"hello!\")", "def test_avg_loss(model, dataset):\n _opt = optim.Adadelta(model.parameters(), lr=1)\n opt = BaseDamper(model, dataset, _opt)\n for epoch in range(1, 16 + 1):\n model, opt, meta, _ = experiment.train(model, opt)\n loss = [\n {\"loss\": opt._get_loss(frac=frac), \"frac\": frac, \"repeat\": repeat}\n for frac in np.linspace(0.5, 0.99, num=5)\n for repeat in range(5)\n ]\n total_loss = opt._get_loss(frac=1)\n df = pd.DataFrame(loss)\n summary = df.pivot(index=\"frac\", columns=\"repeat\", values=\"loss\")\n\n abs_error = np.abs(df.loss - total_loss)\n rel_error = abs_error / total_loss\n assert rel_error.max() <= 0.125\n assert np.percentile(rel_error, 50) <= 0.12\n assert 1.5 <= total_loss <= 2.2\n assert abs_error.max() <= 0.17", "def test_avgeraging(self):\n\n num_ensemble = 10\n enn = networks.MLPEnsembleEnn(\n output_sizes=[1],\n num_ensemble=num_ensemble,\n )\n\n dummy_metrics = {'a': 0, 'b': 1}\n # A dummy loss fn that returns the normalized index as loss and two constant\n # metrics. Index is random but normalized such that its mean is 1.\n single_loss_fn = DummySingleIndexLossFn(num_ensemble, dummy_metrics)\n\n num_index_samples = 100\n loss_fn = average_single_index_loss(single_loss_fn, num_index_samples)\n dummy_batch = base.Batch(np.ones([1, 1]), np.ones([1, 1]))\n loss, metrics = loss_fn(\n enn=enn, params=dict(), batch=dummy_batch, key=jax.random.PRNGKey(0))\n\n # Since the single loss has mean 1 the averaged loss also has mean 1 a\n # variance proportional to 1/np.sqrt(num_index_samples).\n self.assertAlmostEqual(\n loss,\n 1.0,\n delta=5 / np.sqrt(num_index_samples),\n msg=f'Expected loss to be ~1.0 but it is {loss}')\n self.assertDictEqual(\n metrics, dummy_metrics,\n f'expected metrics to be {dummy_metrics} but it is {metrics}')", "def take_test(exam, student):\n\n student.score = exam.administer()", "def _assert_num_graded(self, student_id, location, num_graded, num_required):\r\n\r\n # Unlike the actual ORA service,\r\n # we keep track of counts on a per-student basis.\r\n # This means that every user starts with N essays to grade,\r\n # and as they grade essays, that number decreases.\r\n # We do NOT simulate students adding more essays to the queue,\r\n # and essays that the current student submits are NOT graded\r\n # by other students.\r\n num_pending = StudentState.INITIAL_ESSAYS_AVAILABLE - num_graded\r\n\r\n # Notifications\r\n response = requests.get(\r\n self._peer_url('get_notifications'),\r\n params={'student_id': student_id, 'course_id': 'test course'}\r\n )\r\n self._assert_response(response, {\r\n 'version': 1, 'success': True,\r\n 'count_required': num_required,\r\n 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],\r\n 'count_graded': num_graded,\r\n 'count_available': num_pending\r\n })\r\n\r\n # Location data\r\n if location is not None:\r\n response = requests.get(\r\n self._peer_url('get_data_for_location'),\r\n params={'location': location, 'student_id': student_id}\r\n )\r\n self._assert_response(response, {\r\n 'version': 1, 'success': True,\r\n 'count_required': num_required,\r\n 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],\r\n 'count_graded': num_graded,\r\n 'count_available': num_pending\r\n })", "def get_avg_score(game_id):\r\n\r\n scores = []\r\n game = Game.query.get(game_id)\r\n for rating in game.ratings:\r\n scores.append(rating.score)\r\n \r\n avg_score = sum(scores)/len(scores)\r\n \r\n \r\n return avg_score", "def test_grading_exception(self):\r\n all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students)\r\n student1, student2, student3, student4, student5 = self.students\r\n self.assertEqual(\r\n all_errors,\r\n {\r\n student3: \"I don't like student3\",\r\n student4: \"I don't like student4\"\r\n }\r\n )\r\n\r\n # But we should still have five gradesets\r\n self.assertEqual(len(all_gradesets), 5)\r\n\r\n # Even though two will simply be empty\r\n self.assertFalse(all_gradesets[student3])\r\n self.assertFalse(all_gradesets[student4])\r\n\r\n # The rest will have grade information in them\r\n self.assertTrue(all_gradesets[student1])\r\n self.assertTrue(all_gradesets[student2])\r\n self.assertTrue(all_gradesets[student5])", "def interval_average():\r\n import statistics as st\r\n from tach_detect import tach_detect\r\n r = request.get_json()\r\n try:\r\n email = r[\"user_email\"]\r\n except KeyError:\r\n return jsonify(\"no email input\"), 400\r\n raise LookupError(\"no email input\")\r\n check_email = Check_For_User(email)\r\n if check_email.user_exists is False:\r\n return jsonify(str(email) + \" was not found. Please re-enter\"), 400\r\n raise LookupError(str(user_email) + \" was not found. Please re-enter\")\r\n try:\r\n input_date_time = r[\"date_time\"]\r\n except KeyError:\r\n return jsonify(\"no date entered\"), 400\r\n raise LookupError(\"no date entered\")\r\n try:\r\n validate_date_time(input_date_time)\r\n except (ValueError, TypeError) as error:\r\n return jsonify(\"date entered is invalid. Please re-type.\"), 400\r\n date_time = datetime.datetime(input_date_time[0], input_date_time[1],\r\n input_date_time[2], input_date_time[3],\r\n input_date_time[4], input_date_time[5],\r\n input_date_time[6])\r\n time_list = get_all_times(email)\r\n heart_rate_list = get_all_rates(email)\r\n interval_list = find_first_date(date_time, time_list, heart_rate_list)\r\n try:\r\n interval_average_post = st.mean(interval_list)\r\n user = models.User.objects.raw({\"_id\": email}).first()\r\n curr_age = user.age\r\n tach_test = tach_detect(curr_age, interval_average_post)\r\n return_dict = {\r\n \"user_email\": email,\r\n \"heart_rate_average_since\": str(date_time),\r\n \"heart_rate_average\": interval_average_post,\r\n \"is_heart rate_tachycardic\": str(tach_test)\r\n }\r\n except st.StatisticsError:\r\n interval_average_post = heart_rate_list[len(heart_rate_list)-1]\r\n user = models.User.objects.raw({\"_id\": email}).first()\r\n curr_age = user.age\r\n tach_test = tach_detect(curr_age, interval_average_post)\r\n return_dict = {\r\n \"user_email\": email,\r\n \"heart_rate_average_since\": str(date_time),\r\n \"heart_rate_average\": interval_average_post,\r\n \"is_heart rate_tachycardic\": str(tach_test)\r\n }\r\n return jsonify(return_dict), 200", "def average(data, event):\n if len(data) == 0:\n return 0\n\n score = 0\n # scores = []\n count = 0\n for i in data:\n count += 1\n if event == 'Swim' or event == 'Run':\n num = time_seconds(i[event])\n #print(\"first if\")\n #Sprint(num)\n else:\n num = int(i[event])\n #print(\"second if\")\n #print(num)\n #scores[count] =\n #print(\"end of loop count\" + str(count))\n score += num\n #print (\"score\" + str(score))\n\n # total = 0\n # for x in range(0,len(scores)):\n # total += scores[x]\n score = float(score)\n\n return score / count", "def averaged_risk(self):\n return self._averaged_risk", "def averaged_risk(self):\n return self._averaged_risk", "def test_compute_grade_for_fa(self):\n run1_data = self.user_edx_data.get_run_data(self.run_fa.edx_course_key)\n run2_data = self.user_edx_data.get_run_data(self.run_fa_with_cert.edx_course_key)\n\n grade1_from_cur_grade = api._compute_grade_for_fa(run1_data)\n grade2_from_cert = api._compute_grade_for_fa(run2_data)\n\n assert isinstance(grade1_from_cur_grade, api.UserFinalGrade)\n assert isinstance(grade2_from_cert, api.UserFinalGrade)\n\n assert grade1_from_cur_grade.passed == self.current_grades.get(\n self.run_fa.edx_course_key).data.get('passed')\n assert grade1_from_cur_grade.grade == self.current_grades.get(\n self.run_fa.edx_course_key).data.get('percent')\n assert grade1_from_cur_grade.payed_on_edx == (self.enrollments.get(\n self.run_fa.edx_course_key).data.get('mode') in ['verified', 'honor'])\n\n assert grade2_from_cert.passed is self.current_grades.get(\n self.run_fa_with_cert.edx_course_key).data.get('passed')\n assert grade2_from_cert.grade == self.current_grades.get(\n self.run_fa_with_cert.edx_course_key).data.get('percent')\n # this is True as long as the certificate is verified\n assert grade2_from_cert.payed_on_edx is True", "def test_get_avg_link_sentiment_scores():\n print('average sentiment values when grouped by link_id')\n avg_scores = get_avg_link_sentiment_scores('politics_30_months_comments_cleaned_standardized_vader_flair.csv')\n for key, value in avg_scores.items():\n print(key, value)\n print()", "def get_student_grade_summary_data(request, course, get_grades=True, get_raw_scores=False, use_offline=False):\r\n course_key = course.id\r\n enrolled_students = User.objects.filter(\r\n courseenrollment__course_id=course_key,\r\n courseenrollment__is_active=1,\r\n ).prefetch_related(\"groups\").order_by('username')\r\n\r\n header = [_('ID'), _('Username'), _('Full Name'), _('edX email'), _('External email')]\r\n\r\n datatable = {'header': header, 'students': enrolled_students}\r\n data = []\r\n\r\n gtab = GradeTable()\r\n\r\n for student in enrolled_students:\r\n datarow = [student.id, student.username, student.profile.name, student.email]\r\n try:\r\n datarow.append(student.externalauthmap.external_email)\r\n except: # ExternalAuthMap.DoesNotExist\r\n datarow.append('')\r\n\r\n if get_grades:\r\n gradeset = student_grades(student, request, course, keep_raw_scores=get_raw_scores, use_offline=use_offline)\r\n log.debug('student={0}, gradeset={1}'.format(student, gradeset))\r\n with gtab.add_row(student.id) as add_grade:\r\n if get_raw_scores:\r\n # TODO (ichuang) encode Score as dict instead of as list, so score[0] -> score['earned']\r\n for score in gradeset['raw_scores']:\r\n add_grade(score.section, getattr(score, 'earned', score[0]))\r\n else:\r\n for grade_item in gradeset['section_breakdown']:\r\n add_grade(grade_item['label'], grade_item['percent'])\r\n student.grades = gtab.get_grade(student.id)\r\n\r\n data.append(datarow)\r\n\r\n # if getting grades, need to do a second pass, and add grades to each datarow;\r\n # on the first pass we don't know all the graded components\r\n if get_grades:\r\n for datarow in data:\r\n # get grades for student\r\n sgrades = gtab.get_grade(datarow[0])\r\n datarow += sgrades\r\n\r\n # get graded components and add to table header\r\n assignments = gtab.get_graded_components()\r\n header += assignments\r\n datatable['assignments'] = assignments\r\n\r\n datatable['data'] = data\r\n return datatable", "def showAverageStats(self) :\n Scenario.messageAverageStats()\n self.showAverageGainWon()\n self.showAverageBetUsed()\n self.showAverageNbAttemptsByLevels()", "def test_list_int(self):\n result = get_avg([\"Hello\", \"World\"])\n self.assertEqual(result, TypeError)" ]
[ "0.7095265", "0.6956697", "0.68770933", "0.68014836", "0.68014836", "0.6484994", "0.6410727", "0.6400051", "0.63546544", "0.63439816", "0.627917", "0.62735564", "0.62159175", "0.6123687", "0.6123593", "0.6098858", "0.6067414", "0.60544163", "0.5998449", "0.59840584", "0.597824", "0.5939073", "0.592113", "0.58991736", "0.5898149", "0.58560455", "0.58508176", "0.57952476", "0.5793598", "0.57901573", "0.57838494", "0.578049", "0.57800895", "0.5752805", "0.5744518", "0.5740307", "0.5728739", "0.5716497", "0.5710483", "0.5707796", "0.5705821", "0.57058114", "0.569333", "0.5688267", "0.56626064", "0.5657488", "0.5649582", "0.5649226", "0.5644209", "0.56331044", "0.5627265", "0.5624811", "0.5617069", "0.56141895", "0.5609631", "0.56093156", "0.55998826", "0.55989605", "0.55958253", "0.5593281", "0.55871457", "0.55848384", "0.55824757", "0.55824506", "0.5579505", "0.5565441", "0.5560359", "0.55572355", "0.55379164", "0.55365145", "0.55330396", "0.5523417", "0.5516989", "0.5508754", "0.5486073", "0.54848427", "0.548397", "0.5480875", "0.5473235", "0.5463641", "0.5459369", "0.5456289", "0.54552275", "0.54550326", "0.5447663", "0.5446873", "0.54445547", "0.5428596", "0.54245025", "0.5422092", "0.5419898", "0.54131025", "0.5408628", "0.54047847", "0.54047847", "0.5401848", "0.5393199", "0.53830236", "0.53817964", "0.5381221" ]
0.8476935
0
Unit test for Roybal_Student_Analytics grade_change method.
def test_grade_change(self): s = Student_Analytics() self.assertEqual(int(s.grade_change()),0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_avg_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(s.avg_grade(3)),\"B\")", "def test_grade(self, grade):\n self.client.login(username=self.student.username, password=self.password)\n with patch('lms.djangoapps.grades.course_grade_factory.CourseGradeFactory.read') as mock_grade:\n grade_fields = {\n 'letter_grade': grade['letter_grade'],\n 'percent': grade['percent'],\n 'passed': grade['letter_grade'] is not None,\n\n }\n mock_grade.return_value = MagicMock(**grade_fields)\n resp = self.client.get(self.get_url(self.student.username))\n\n assert resp.status_code == status.HTTP_200_OK\n expected_data = {\n 'username': self.student.username,\n 'email': '',\n 'course_id': str(self.course_key),\n }\n\n expected_data.update(grade)\n assert resp.data == [expected_data]", "def test_classify_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(5.00),\"A+\")", "def test_save_grade(self):\r\n response = self.peer_grading.save_grade(self.save_dict)\r\n self.assertEqual(response['success'], True)", "def test_a_grade(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.submit_question_answer('p3', {'2_1': 'Correct'})\r\n self.check_grade_percent(1.0)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'A')", "def test_grade(self):\r\n # Sample variables x and y in the range [-10, 10]\r\n sample_dict = {'x': (-10, 10), 'y': (-10, 10)}\r\n\r\n # The expected solution is numerically equivalent to x+2y\r\n problem = self.build_problem(sample_dict=sample_dict,\r\n num_samples=10,\r\n tolerance=0.01,\r\n answer=\"x+2*y\")\r\n\r\n # Expect an equivalent formula to be marked correct\r\n # 2x - x + y + y = x + 2y\r\n input_formula = \"2*x - x + y + y\"\r\n self.assert_grade(problem, input_formula, \"correct\")\r\n\r\n # Expect an incorrect formula to be marked incorrect\r\n # x + y != x + 2y\r\n input_formula = \"x + y\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")", "def test_compute_grade_for_fa(self):\n run1_data = self.user_edx_data.get_run_data(self.run_fa.edx_course_key)\n run2_data = self.user_edx_data.get_run_data(self.run_fa_with_cert.edx_course_key)\n\n grade1_from_cur_grade = api._compute_grade_for_fa(run1_data)\n grade2_from_cert = api._compute_grade_for_fa(run2_data)\n\n assert isinstance(grade1_from_cur_grade, api.UserFinalGrade)\n assert isinstance(grade2_from_cert, api.UserFinalGrade)\n\n assert grade1_from_cur_grade.passed == self.current_grades.get(\n self.run_fa.edx_course_key).data.get('passed')\n assert grade1_from_cur_grade.grade == self.current_grades.get(\n self.run_fa.edx_course_key).data.get('percent')\n assert grade1_from_cur_grade.payed_on_edx == (self.enrollments.get(\n self.run_fa.edx_course_key).data.get('mode') in ['verified', 'honor'])\n\n assert grade2_from_cert.passed is self.current_grades.get(\n self.run_fa_with_cert.edx_course_key).data.get('passed')\n assert grade2_from_cert.grade == self.current_grades.get(\n self.run_fa_with_cert.edx_course_key).data.get('percent')\n # this is True as long as the certificate is verified\n assert grade2_from_cert.payed_on_edx is True", "def test_combined_grade_created_updated(self):\n combined_grade_qset = CombinedFinalGrade.objects.filter(user=self.user, course=self.course_run.course)\n # no passing final grade\n api.update_or_create_combined_final_grade(self.user, self.course_run.course)\n assert combined_grade_qset.exists() is False\n\n FinalGradeFactory.create(user=self.user, course_run__course=self.course_run.course, grade=0.6, passed=True)\n # no passing exam grade\n api.update_or_create_combined_final_grade(self.user, self.course_run.course)\n assert combined_grade_qset.exists() is False\n ProctoredExamGradeFactory.create(\n user=self.user,\n course=self.course_run.course,\n percentage_grade=0.8,\n passed=True,\n exam_run=self.exam_run\n )\n\n # now should create combined grade\n api.update_or_create_combined_final_grade(self.user, self.course_run.course)\n assert combined_grade_qset.exists() is True\n\n # now update it with a new grade\n FinalGradeFactory.create(user=self.user, course_run__course=self.course_run.course, grade=0.8, passed=True)\n api.update_or_create_combined_final_grade(self.user, self.course_run.course)\n assert combined_grade_qset.first().grade == 80.0", "def _save_grade(self):\r\n student = self._student('POST', key='grader_id')\r\n if student is None:\r\n self._error_response()\r\n\r\n else:\r\n # Update the number of essays the student has graded\r\n student.grade_peer_essay()\r\n return self._success_response({})", "def testDriver():\n exam1=90\n exam2=85\n assignmentScores = [50, 60, 70, 80, ]\n computeGrades(exam1, exam2, assignmentScores)", "def grade_calculate_grade(self):\n try:\n if int(self.root.ids.grade_input_grade.text) >= 85:\n grade = 'High Distinction'\n elif int(self.root.ids.grade_input_grade.text) >= 75:\n grade = 'Distinction'\n elif int(self.root.ids.grade_input_grade.text) >= 65:\n grade = 'Credit'\n elif int(self.root.ids.grade_input_grade.text) >= 50:\n grade = 'Pass'\n else:\n grade = 'Fail'\n self.root.ids.grade_output_label.text = 'Grade: ' + grade\n except ValueError:\n\n self.root.ids.grade_output_label.text = 'Invalid Grade'", "def update_grade(self, course, grade):\n if course not in self.courses:\n raise NameError('This student is not enrolled in that course')\n else:\n self.courses[course] = grade\n\n return self", "def _grade(student, request, course, keep_raw_scores):\r\n grading_context = course.grading_context\r\n raw_scores = []\r\n\r\n # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs\r\n # scores that were registered with the submissions API, which for the moment\r\n # means only openassessment (edx-ora2)\r\n submissions_scores = sub_api.get_scores(\r\n course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)\r\n )\r\n\r\n totaled_scores = {}\r\n # This next complicated loop is just to collect the totaled_scores, which is\r\n # passed to the grader\r\n for section_format, sections in grading_context['graded_sections'].iteritems():\r\n format_scores = []\r\n for section in sections:\r\n section_descriptor = section['section_descriptor']\r\n section_name = section_descriptor.display_name_with_default\r\n\r\n # some problems have state that is updated independently of interaction\r\n # with the LMS, so they need to always be scored. (E.g. foldit.,\r\n # combinedopenended)\r\n should_grade_section = any(\r\n descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']\r\n )\r\n\r\n # If there are no problems that always have to be regraded, check to\r\n # see if any of our locations are in the scores from the submissions\r\n # API. If scores exist, we have to calculate grades for this section.\r\n if not should_grade_section:\r\n should_grade_section = any(\r\n descriptor.location.to_deprecated_string() in submissions_scores\r\n for descriptor in section['xmoduledescriptors']\r\n )\r\n\r\n if not should_grade_section:\r\n with manual_transaction():\r\n should_grade_section = StudentModule.objects.filter(\r\n student=student,\r\n module_state_key__in=[\r\n descriptor.location for descriptor in section['xmoduledescriptors']\r\n ]\r\n ).exists()\r\n\r\n # If we haven't seen a single problem in the section, we don't have\r\n # to grade it at all! We can assume 0%\r\n if should_grade_section:\r\n scores = []\r\n\r\n def create_module(descriptor):\r\n '''creates an XModule instance given a descriptor'''\r\n # TODO: We need the request to pass into here. If we could forego that, our arguments\r\n # would be simpler\r\n with manual_transaction():\r\n field_data_cache = FieldDataCache([descriptor], course.id, student)\r\n return get_module_for_descriptor(student, request, descriptor, field_data_cache, course.id)\r\n\r\n for module_descriptor in yield_dynamic_descriptor_descendents(section_descriptor, create_module):\r\n\r\n (correct, total) = get_score(\r\n course.id, student, module_descriptor, create_module, scores_cache=submissions_scores\r\n )\r\n if correct is None and total is None:\r\n continue\r\n\r\n if settings.GENERATE_PROFILE_SCORES: \t# for debugging!\r\n if total > 1:\r\n correct = random.randrange(max(total - 2, 1), total + 1)\r\n else:\r\n correct = total\r\n\r\n graded = module_descriptor.graded\r\n if not total > 0:\r\n #We simply cannot grade a problem that is 12/0, because we might need it as a percentage\r\n graded = False\r\n\r\n scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))\r\n\r\n _, graded_total = graders.aggregate_scores(scores, section_name)\r\n if keep_raw_scores:\r\n raw_scores += scores\r\n else:\r\n graded_total = Score(0.0, 1.0, True, section_name)\r\n\r\n #Add the graded total to totaled_scores\r\n if graded_total.possible > 0:\r\n format_scores.append(graded_total)\r\n else:\r\n log.exception(\"Unable to grade a section with a total possible score of zero. \" +\r\n str(section_descriptor.location))\r\n\r\n totaled_scores[section_format] = format_scores\r\n\r\n grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)\r\n\r\n # We round the grade here, to make sure that the grade is an whole percentage and\r\n # doesn't get displayed differently than it gets grades\r\n grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100\r\n\r\n letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])\r\n grade_summary['grade'] = letter_grade\r\n grade_summary['totaled_scores'] = totaled_scores \t# make this available, eg for instructor download & debugging\r\n if keep_raw_scores:\r\n grade_summary['raw_scores'] = raw_scores # way to get all RAW scores out to instructor\r\n # so grader can be double-checked\r\n return grade_summary", "def get_grade(self) -> int :\n return self.grade", "def set_grade(\n self,\n assignment_id,\n student_id,\n grade_value,\n gradebook_id='',\n **kwargs\n ):\n # pylint: disable=too-many-arguments\n\n # numericGradeValue stringified because 'x' is a possible\n # value for excused grades.\n grade_info = {\n 'studentId': student_id,\n 'assignmentId': assignment_id,\n 'mode': 2,\n 'comment': 'from MITx {0}'.format(time.ctime(time.time())),\n 'numericGradeValue': str(grade_value),\n 'isGradeApproved': False\n }\n grade_info.update(kwargs)\n log.info(\n \"student %s set_grade=%s for assignment %s\",\n student_id,\n grade_value,\n assignment_id)\n return self.post(\n 'grades/{gradebookId}'.format(\n gradebookId=gradebook_id or self.gradebook_id\n ),\n data=grade_info,\n )", "def test_compute_grade_for_non_fa(self):\n run3_data = self.user_edx_data.get_run_data(self.run_no_fa.edx_course_key)\n run4_data = self.user_edx_data.get_run_data(self.run_no_fa_with_cert.edx_course_key)\n\n grade3_from_cur_grade = api._compute_grade_for_non_fa(run3_data)\n grade4_from_cert = api._compute_grade_for_non_fa(run4_data)\n\n assert isinstance(grade3_from_cur_grade, api.UserFinalGrade)\n assert isinstance(grade4_from_cert, api.UserFinalGrade)\n\n assert grade3_from_cur_grade.passed is False\n assert grade3_from_cur_grade.grade == self.current_grades.get(\n self.run_no_fa.edx_course_key).data.get('percent')\n # this is true if the enrollment is verified\n assert grade3_from_cur_grade.payed_on_edx is True\n assert grade4_from_cert.passed is True\n assert grade4_from_cert.grade == self.current_grades.get(\n self.run_no_fa_with_cert.edx_course_key).data.get('percent')\n # this is True as long as the certificate is verified\n assert grade4_from_cert.payed_on_edx is True", "def save_grade(request, course_id):\r\n\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n _check_access(request.user, course_key)\r\n\r\n if request.method != 'POST':\r\n raise Http404\r\n p = request.POST\r\n required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged'])\r\n skipped = 'skipped' in p\r\n #If the instructor has skipped grading the submission, then there will not be any rubric scores.\r\n #Only add in the rubric scores if the instructor has not skipped.\r\n if not skipped:\r\n required.add('rubric_scores[]')\r\n actual = set(p.keys())\r\n missing = required - actual\r\n if len(missing) > 0:\r\n return _err_response('Missing required keys {0}'.format(\r\n ', '.join(missing)))\r\n\r\n success, message = check_feedback_length(p)\r\n if not success:\r\n return _err_response(message)\r\n\r\n grader_id = unique_id_for_user(request.user)\r\n\r\n location = course_key.make_usage_key_from_deprecated_string(p['location'])\r\n\r\n try:\r\n result = staff_grading_service().save_grade(course_key,\r\n grader_id,\r\n p['submission_id'],\r\n p['score'],\r\n p['feedback'],\r\n skipped,\r\n p.getlist('rubric_scores[]'),\r\n p['submission_flagged'])\r\n except GradingServiceError:\r\n #This is a dev_facing_error\r\n log.exception(\r\n \"Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}\".format(\r\n request, course_id))\r\n #This is a staff_facing_error\r\n return _err_response(STAFF_ERROR_MESSAGE)\r\n except ValueError:\r\n #This is a dev_facing_error\r\n log.exception(\r\n \"save_grade returned broken json in the staff grading interface in open ended grading: {0}\".format(\r\n result_json))\r\n #This is a staff_facing_error\r\n return _err_response(STAFF_ERROR_MESSAGE)\r\n\r\n if not result.get('success', False):\r\n #This is a dev_facing_error\r\n log.warning(\r\n 'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))\r\n return _err_response(STAFF_ERROR_MESSAGE)\r\n\r\n # Ok, save_grade seemed to work. Get the next submission to grade.\r\n return HttpResponse(json.dumps(_get_next(course_id, grader_id, location)),\r\n mimetype=\"application/json\")", "def test_compute_grade_odd_grade(self, odd_value):\n # test for grade computed on current grades\n test_values = (\n (self.run_fa, api._compute_grade_for_fa, ),\n (self.run_no_fa, api._compute_grade_for_non_fa, ),\n )\n for course_run, grade_func in test_values:\n course_key = course_run.edx_course_key\n current_grade = self.current_grades[course_key]\n current_grade.data['percent'] = odd_value\n current_grade.save()\n user_edx_data = CachedEdxUserData(self.user)\n run_data = user_edx_data.get_run_data(course_key)\n grade = grade_func(run_data)\n assert grade.grade == 0.0", "def test_grading_exception(self):\r\n all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students)\r\n student1, student2, student3, student4, student5 = self.students\r\n self.assertEqual(\r\n all_errors,\r\n {\r\n student3: \"I don't like student3\",\r\n student4: \"I don't like student4\"\r\n }\r\n )\r\n\r\n # But we should still have five gradesets\r\n self.assertEqual(len(all_gradesets), 5)\r\n\r\n # Even though two will simply be empty\r\n self.assertFalse(all_gradesets[student3])\r\n self.assertFalse(all_gradesets[student4])\r\n\r\n # The rest will have grade information in them\r\n self.assertTrue(all_gradesets[student1])\r\n self.assertTrue(all_gradesets[student2])\r\n self.assertTrue(all_gradesets[student5])", "def test_multiple_averages(self):\n user = self.make_user()\n enrollment = EnrollmentFactory(grade_level__school_year__school=user.school)\n GradeFactory(\n score=50,\n student=enrollment.student,\n graded_work__course_task__course__grade_levels=[enrollment.grade_level],\n )\n GradeFactory(\n score=100,\n student=enrollment.student,\n graded_work__course_task__course__grade_levels=[enrollment.grade_level],\n )\n GradeFactory(\n graded_work__course_task__course__grade_levels=[enrollment.grade_level]\n )\n\n with self.login(user):\n self.get_check_200(\"reports:progress\", pk=enrollment.id)\n\n assert self.get_context(\"courses\")[0][\"course_average\"] == 50\n assert self.get_context(\"courses\")[1][\"course_average\"] == 100", "def update_lms_grades(request=None, sequence=None):\n outcome_request = OutcomeRequest().from_post_request(request) if request else OutcomeRequest()\n\n outcome_service = sequence.outcome_service\n if outcome_service is None:\n log.info(f\"Sequence: {sequence} doesn't contain an outcome service, grade is not sent.\")\n return\n consumer = outcome_service.lms_lti_connection\n\n outcome_request.consumer_key = consumer.consumer_key\n outcome_request.consumer_secret = consumer.consumer_secret\n outcome_request.lis_outcome_service_url = outcome_service.lis_outcome_service_url\n outcome_request.lis_result_sourcedid = sequence.lis_result_sourcedid\n\n log.debug(\"Update LMS grades. Used sequence = {} is completed = {}, grading_policy = {}\".format(\n sequence, sequence.completed, sequence.collection_order.grading_policy\n ))\n\n score = sequence.collection_order.grading_policy.calculate_grade(sequence)\n outcome_request.post_replace_result(score)\n lms_response = outcome_request.outcome_response\n user_id = sequence.lti_user\n if lms_response.is_success():\n log.info(\"Successfully sent updated grade to LMS. Student:{}, grade:{}, comment: success\".format(\n user_id, score\n ))\n elif lms_response.is_processing():\n log.info(\"Grade update is being processed by LMS. Student:{}, grade:{}, comment: processing\".format(\n user_id, score\n ))\n elif lms_response.has_warning():\n log.warning(\"Grade update response has warnings. Student:{}, grade:{}, comment: warning\".format(\n user_id, score\n ))\n else:\n log.error(\"Grade update request failed. Student:{}, grade:{}, comment:{}\".format(\n user_id, score, lms_response.code_major\n ))", "def save_grade(self, data):\r\n\r\n required = ['location', 'submission_id', 'submission_key', 'score', 'feedback', 'submission_flagged', 'answer_unknown']\r\n if data.get(\"submission_flagged\", False) in [\"false\", False, \"False\", \"FALSE\"]:\r\n required.append(\"rubric_scores[]\")\r\n success, message = self._check_required(data, set(required))\r\n if not success:\r\n return self._err_response(message)\r\n\r\n success, message = self._check_feedback_length(data)\r\n if not success:\r\n return self._err_response(message)\r\n\r\n data_dict = {k:data.get(k) for k in required}\r\n if 'rubric_scores[]' in required:\r\n data_dict['rubric_scores'] = data.getall('rubric_scores[]')\r\n data_dict['grader_id'] = self.system.anonymous_student_id\r\n\r\n try:\r\n response = self.peer_gs.save_grade(**data_dict)\r\n success, location_data = self.query_data_for_location(data_dict['location'])\r\n #Don't check for success above because the response = statement will raise the same Exception as the one\r\n #that will cause success to be false.\r\n response.update({'required_done' : False})\r\n if 'count_graded' in location_data and 'count_required' in location_data and int(location_data['count_graded'])>=int(location_data['count_required']):\r\n response['required_done'] = True\r\n return response\r\n except GradingServiceError:\r\n # This is a dev_facing_error\r\n log.exception(\"\"\"Error saving grade to open ended grading service. server url: {0}\"\"\"\r\n .format(self.peer_gs.url)\r\n )\r\n # This is a student_facing_error\r\n return {\r\n 'success': False,\r\n 'error': EXTERNAL_GRADER_NO_CONTACT_ERROR\r\n }", "def __ui_grade_student(self):\n student_id = input(\"Give student ID: \")\n discipline_name = input(\"Give discipline discipline_name: \")\n\n try:\n grade_value = input(\"Give grade: \")\n if not self.__student_controller.student_has_discipline(student_id, discipline_name):\n print(\"The student isn't enrolled at the given discipline!\")\n return\n self.__grade_controller.add_grade(\n student_id,\n self.__discipline_controller.get_id_by_name(discipline_name),\n grade_value\n )\n print(\"Grade successful! \\n\")\n\n except GradeException as ge:\n print(ge)\n return\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return\n except ValueError as ve:\n print(ve)\n return", "def add_grade(self, student, grade):\n try:\n self.grades[student.id].append(grade)\n except KeyError:\n raise ValueError('Student not in Grade Book.')", "def test_update_single_grading_period(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def grade(student, request, course, keep_raw_scores=False):\r\n with manual_transaction():\r\n return _grade(student, request, course, keep_raw_scores)", "def assign_grade(github, title, grade):\n QUERY = \"\"\"\n INSERT INTO Grades VALUES (?, ?, ?)\n \"\"\"\n\n db_cursor.execute(QUERY, (github, title, grade))\n db_connection.commit()\n\n print \"Successfully graded %s with a %s on %s\" % (github, grade, title)", "def test_dropping_grades_normally(self):\r\n self.dropping_setup()\r\n self.dropping_homework_stage1()\r\n\r\n self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])\r\n self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])\r\n self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 0]) # Order matters\r\n self.check_grade_percent(0.75)", "def test_none_grade(self):\r\n self.basic_setup()\r\n self.check_grade_percent(0)\r\n self.assertEqual(self.get_grade_summary()['grade'], None)", "def set_grade(github, project_title, grade_value):\n \n QUERY = \"\"\"INSERT INTO Grades VALUES (?, ?, ?)\"\"\"\n \n db_cursor.execute(QUERY, (github, project_title, grade_value))\n db_connection.commit()\n\n \n # print \"%s %s's grade: %s\" % (first_name, last_name, grade)\n print \"Successfully graded %s on Project %s: %s\" % (github, project_title, grade_value)", "def data_downgrades():\n pass", "def data_downgrades():\n pass", "def assign_grade(github, title, grade):\n QUERY = \"\"\"INSERT INTO Grades VALUES(?,?,?)\"\"\"\n db_cursor.execute(QUERY, (github, title, grade))\n db_connection.commit()\n print \"Success! %s received a grade of %s on the %s project!\" % (github, grade, title)", "def test_compute_grade_for_fa_certs(self):\n course_key = self.run_fa_with_cert.edx_course_key\n\n run_data = CachedEdxUserData(self.user).get_run_data(course_key)\n grade = api._compute_grade_for_fa(run_data)\n assert grade.passed is self.current_grades.get(course_key).data.get('passed')\n assert grade.grade == self.current_grades.get(course_key).data.get('percent')\n assert grade.payed_on_edx is True", "def edit_grade(self, username: str, token: str, course_abbreviation: str, student_id: str, updated_grade: float) -> bool:\n\n # Validate user first\n if not self.validate(username=username, token=token, check_privilege='instructor'):\n raise RuntimeError(\"User not verified!\")\n\n # Get the student's UID\n student_uid = self.get_uid(username=student_id)\n\n # Get a DB cursor\n cursor = self._db_connection.cursor()\n\n # Get the course ID from the abbreviation\n cursor.execute('''\n SELECT course_id FROM courses WHERE course_abbreviation LIKE ?;\n ''', (course_abbreviation,))\n db_result = cursor.fetchone()\n\n # If no associated courses are found\n if db_result is None:\n RuntimeError(f\"Could not find course associated with: {course_abbreviation}\")\n\n # Extract the course ID from the returned tuple\n course_id = db_result[0]\n\n # Run update in the DB\n cursor.execute('''\n UPDATE enrollment_records SET grade = ? WHERE uid = ? AND course_id = ?\n ''', (updated_grade, student_uid, course_id))\n self._db_connection.commit()\n\n return True", "def addGrade(self, student, grade):\n try:\n self.grades[student.getIDNumber()].append(grade)\n except KeyError:\n raise ValueError(\"Student not in Gradebook\")", "def test_freeze_user_final_grade(self, mock_refr):\n final_grade = api.freeze_user_final_grade(self.user, self.run_fa)\n assert final_grade is not None\n mock_refr.assert_called_once_with(self.user, self.run_fa.courseware_backend)\n fg_qset = FinalGrade.objects.filter(user=self.user, course_run=self.run_fa)\n assert fg_qset.exists() is True\n fg_status = fg_qset.first()\n assert fg_status.status == FinalGradeStatus.COMPLETE\n assert fg_status.user == self.user\n assert fg_status.course_run == self.run_fa\n assert fg_status.grade == final_grade.grade\n assert fg_status.passed == final_grade.passed", "def take_test(exam, student):\n\n student.score = exam.administer()", "def test_get_score(self):\r\n\r\n score_dict = self.get_score(True, 3, 3)\r\n\r\n # Score should be 1.0.\r\n self.assertEqual(score_dict[\"score\"], 1.0)\r\n\r\n # Testing score after data is stored in student_data_for_location in xmodule.\r\n _score_dict = self.peer_grading.get_score()\r\n\r\n # Score should be 1.0.\r\n self.assertEqual(_score_dict[\"score\"], 1.0)", "def test_b_grade_exact(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.33)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')", "def grade_this_guy(self):\n log.info(\"Function has been called\")\n answer = self._file_storage_path(self.raw_answer['sha1'], self.raw_answer['filename'])\n question = self._question_storage_path(self.raw_question['sha1'], self.raw_question['filename'])\n solution = self._solution_storage_path(self.raw_solution['sha1'], self.raw_solution['filename'])\n\n\n answer = os.path.join(IMAGEDIFF_ROOT, answer)\n question = os.path.join(IMAGEDIFF_ROOT, question)\n solution = os.path.join(IMAGEDIFF_ROOT, solution)\n\n self.score = grade(question, answer, solution)\n if self.score > self.points:\n self.score = self.points\n self.points=float(self.max_score())\n self.save()\n if self.score >= 0: \n self.runtime.publish(self, 'grade',{ 'value': self.score, 'max_value':self.max_score(),})\n log.info(\"runtime.publish-ed\")\n self.save()\n return Response(json_body=self.student_state())", "def test_submissions_api_overrides_scores(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.submit_question_answer('p3', {'2_1': 'Incorrect'})\r\n self.check_grade_percent(0.67)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')\r\n\r\n # But now we mock out a get_scores call, and watch as it overrides the\r\n # score read from StudentModule and our student gets an A instead.\r\n with patch('submissions.api.get_scores') as mock_get_scores:\r\n mock_get_scores.return_value = {\r\n self.problem_location('p3').to_deprecated_string(): (1, 1)\r\n }\r\n self.check_grade_percent(1.0)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'A')", "def take_test(exam, student):\n\n student.score = exam.administer()\n return student.score", "def grade(self):\n if round(self.numAvg,0) >= 70:\n return round(self.numAvg,0)\n elif self.PassSummer:\n return 70\n elif round(self.numAvg,0) >= 55 and not self.PassSummer:\n return round(self.numAvg,0)\n else:\n return 55", "def _assert_num_graded(self, student_id, location, num_graded, num_required):\r\n\r\n # Unlike the actual ORA service,\r\n # we keep track of counts on a per-student basis.\r\n # This means that every user starts with N essays to grade,\r\n # and as they grade essays, that number decreases.\r\n # We do NOT simulate students adding more essays to the queue,\r\n # and essays that the current student submits are NOT graded\r\n # by other students.\r\n num_pending = StudentState.INITIAL_ESSAYS_AVAILABLE - num_graded\r\n\r\n # Notifications\r\n response = requests.get(\r\n self._peer_url('get_notifications'),\r\n params={'student_id': student_id, 'course_id': 'test course'}\r\n )\r\n self._assert_response(response, {\r\n 'version': 1, 'success': True,\r\n 'count_required': num_required,\r\n 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],\r\n 'count_graded': num_graded,\r\n 'count_available': num_pending\r\n })\r\n\r\n # Location data\r\n if location is not None:\r\n response = requests.get(\r\n self._peer_url('get_data_for_location'),\r\n params={'location': location, 'student_id': student_id}\r\n )\r\n self._assert_response(response, {\r\n 'version': 1, 'success': True,\r\n 'count_required': num_required,\r\n 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],\r\n 'count_graded': num_graded,\r\n 'count_available': num_pending\r\n })", "def save(self, **kwargs):\n # Clean up any notifications which might have already been associated with this object\n self.notifications.all().delete()\n super(Grade, self).save(**kwargs)\n\n grades = Grade.objects.filter(student=self.student)\n attendances = AttendanceRecord.objects.filter(enrollment__student=self.student)\n behavior_effors = Behavior.objects.filter(enrollment__student=self.student)\n test_scores = StandardizedTestScore.objects.filter(student=self.student)\n\n calculator = GradeNotificationCalculator(student=self.student,\n grades=grades,\n attendances=attendances,\n behavior_efforts=behavior_effors,\n test_scores=test_scores)\n notifications = calculator.get_notifications(self)\n my_student = self.student\n for notification in notifications:\n # If there are already unread grade notifications for this user for this student, get rid of them\n Notification.objects.filter(user=my_student.case_manager,\n unread=True,\n student=my_student,\n title=notification.title,\n category=constants.NotificationCategories.GRADE).delete()\n # Add the new notification\n Notification.objects.create(user=my_student.case_manager,\n partial_link=\"/grades\",\n unread=True,\n category=constants.NotificationCategories.GRADE,\n content_object=self,\n **notification._asdict())", "def test_compute_grade_for_non_fa_enrollment_not_verified(self):\n course_key = self.run_no_fa.edx_course_key\n enrollment = self.enrollments.get(course_key)\n enrollment.data['mode'] = 'audit'\n enrollment.save()\n run3_data = CachedEdxUserData(self.user).get_run_data(course_key)\n grade3_from_cur_grade = api._compute_grade_for_non_fa(run3_data)\n assert grade3_from_cur_grade.passed is False\n assert grade3_from_cur_grade.grade == self.current_grades.get(\n self.run_no_fa.edx_course_key).data.get('percent')\n assert grade3_from_cur_grade.payed_on_edx is False", "def send_grade(consumer_key, edx_url, result_id, grade):\n if consumer_key not in settings.LTI_OAUTH_CREDENTIALS:\n raise SendGradeFailure(\"Invalid consumer_key %s\" % consumer_key)\n body = generate_request_xml(str(uuid.uuid1()), \"replaceResult\", result_id, grade)\n secret = settings.LTI_OAUTH_CREDENTIALS[consumer_key]\n response, content = _post_patched_request(consumer_key, secret, body, edx_url, \"POST\", \"application/xml\")\n if isinstance(content, bytes):\n content = content.decode(\"utf8\")\n if \"<imsx_codeMajor>success</imsx_codeMajor>\" not in content:\n raise SendGradeFailure(\"Send grades to edX returned %s\" % response.status)", "def get_grades(self, test=None):\r\n target_url = \"https://elearning.linnbenton.edu\\\r\n/grade/report/overview/index.php?id=2721\"\r\n\r\n if test is not None:\r\n target_url = test\r\n\r\n self.web_driver.get(target_url)\r\n self.write_log(f\"Navigating to {target_url}\")\r\n time.sleep(5)\r\n self.write_log(\"Scraping begun.\")\r\n return self.scrape_grades()", "def test_lti20_put_set_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(self.xmodule.module_score, 0.1)\r\n self.assertEqual(self.xmodule.score_comment, u\"ಠ益ಠ\")\r\n (_, evt_type, called_grade_obj), _ = self.system.publish.call_args\r\n self.assertEqual(evt_type, 'grade')\r\n self.assertEqual(called_grade_obj, {'user_id': self.USER_STANDIN.id, 'value': 0.1, 'max_value': 1.0})", "def test_update(self):\r\n grader = {\r\n \"id\": 0,\r\n \"type\": \"manual\",\r\n \"min_count\": 5,\r\n \"drop_count\": 10,\r\n \"short_label\": \"yo momma\",\r\n \"weight\": 17.3,\r\n }\r\n resp = self.client.ajax_post(self.url + '/0', grader)\r\n self.assertEqual(resp.status_code, 200)\r\n obj = json.loads(resp.content)\r\n self.assertEqual(obj, grader)\r\n current_graders = CourseGradingModel.fetch(self.course.id).graders\r\n self.assertEqual(len(self.starting_graders), len(current_graders))", "def test_b_grade_above(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.67)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')", "def publish_grade(self):\r\n score = self.lcp.get_score()\r\n self.runtime.publish(\r\n self,\r\n 'grade',\r\n {\r\n 'value': score['score'],\r\n 'max_value': score['total'],\r\n }\r\n )\r\n\r\n return {'grade': score['score'], 'max_grade': score['total']}", "def get_grade(self):\n return self.__grade_value", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def get_grade(course_det):\n return course_det[1]", "def test_recalculate_progress(self):\n self._build_sample_graph()\n self._create_lessons() # 3 lessons in unit 1\n self.student = models.Student(user_id='1')\n self._create_linear_progress() # Lesson 1 and 2 completed\n self.lesson1.properties[SKILLS_KEY] = [self.sa.id]\n self.lesson2.properties[SKILLS_KEY] = [self.sb.id]\n self.lesson3.properties[SKILLS_KEY] = [self.sa.id,\n self.sc.id]\n self.course.save()\n\n tracker = SkillCompletionTracker(self.course)\n lprogress_tracker = UnitLessonCompletionTracker(self.course)\n lprogress = lprogress_tracker.get_or_create_progress(self.student)\n expected = {\n self.sa: tracker.IN_PROGRESS,\n self.sb: tracker.COMPLETED,\n self.sc: tracker.NOT_ATTEMPTED\n }\n for skill, expected_progress in expected.iteritems():\n self.assertEqual(expected_progress,\n tracker.recalculate_progress(lprogress_tracker,\n lprogress, skill))", "def computeGrades(e1, e2, a):\n \n a = assignmentScores\n a.sort()\n i=0\n while i<10:\n sum+=sum a[i]\n avg = sum/10\n \n grade = ((e1 + e2) /2) * 0.4 + (avg) * 0.6\n \n return grade\n \n if grade >= 90 and grade <= 100:\n return(\"A\")\n \n elif grade >= 80 and grade < 90:\n return(\"B\")\n \n elif grade >= 70 and grade < 80:\n return(\"C\")\n \n elif grade >= 60 and grade < 70:\n return(\"D\")\n \n elif grade < 60:\n return(\"F\")", "def calc_grade(self, average):\n if 95 <= average:\n return 'S'\n elif 90 <= average:\n return 'A'\n elif 80 <= average:\n return 'B'\n elif 70 <= average:\n return 'C'\n elif 60 <= average:\n return 'D'\n else:\n return 'F'", "def test_update_impact_level(self):\n pass", "def update_employee(employee):\n employee_id = get_employee_input_int(\"Enter the employee id you want to update\")\n newGrade = get_employee_input_int(\"Enter the new grade for \")\n db.update_employee(employee_id, newGrade)\n print(employee.full_name + \"'s grade value has been updated to :-> \", newGrade)", "def update_g_score(self, value):\n self.g_score = value", "def _grade_with_errors(student, request, course, keep_raw_scores=False):\r\n if student.username in ['student3', 'student4']:\r\n raise Exception(\"I don't like {}\".format(student.username))\r\n\r\n return grade(student, request, course, keep_raw_scores=keep_raw_scores)", "def test_freeze_user_final_grade_error2(self, raise_on_exception, mock_refr, mock_get_fg):\n mock_refr.side_effect = AttributeError\n if not raise_on_exception:\n final_grade = api.freeze_user_final_grade(self.user, self.run_fa, raise_on_exception=raise_on_exception)\n assert final_grade is None\n else:\n with self.assertRaises(FreezeGradeFailedException):\n api.freeze_user_final_grade(self.user, self.run_fa, raise_on_exception=raise_on_exception)\n assert mock_get_fg.called is False\n mock_refr.assert_called_once_with(self.user, self.run_fa.courseware_backend)\n assert FinalGrade.objects.filter(user=self.user, course_run=self.run_fa).exists() is False\n\n con = get_redis_connection(\"redis\")\n failed_users_cache_key = api.CACHE_KEY_FAILED_USERS_BASE_STR.format(self.run_fa.edx_course_key)\n failed_users_count = con.llen(failed_users_cache_key)\n failed_users_list = list(map(int, con.lrange(failed_users_cache_key, 0, failed_users_count)))\n assert self.user.id in failed_users_list", "def test_grade_not_in_range(self):\r\n self.xmodule.verify_oauth_body_sign = Mock()\r\n request = Request(self.environ)\r\n request.body = self.get_request_body(params={'grade': '10'})\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'failure',\r\n 'description': 'Request body XML parsing error: score value outside the permitted range of 0-1.',\r\n 'messageIdentifier': 'unknown',\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)", "def test_grade_not_in_range(self):\n self.xmodule.verify_oauth_body_sign = Mock()\n request = Request(self.environ)\n request.body = self.get_request_body(params={'grade': '10'})\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'failure',\n 'description': 'Request body XML parsing error: score value outside the permitted range of 0-1.',\n 'messageIdentifier': 'unknown',\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def test_bad_grade_decimal(self):\n self.xmodule.verify_oauth_body_sign = Mock()\n request = Request(self.environ)\n request.body = self.get_request_body(params={'grade': '0,5'})\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n msg = \"could not convert string to float: '0,5'\"\n expected_response = {\n 'action': None,\n 'code_major': 'failure',\n 'description': f'Request body XML parsing error: {msg}',\n 'messageIdentifier': 'unknown',\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def gpa(self, new_gpa):\n if self.MIN_GPA <= new_gpa <= self.MAX_GPA:\n self._gpa = new_gpa\n else:\n raise ValueError", "def check_grade_percent(self, percent):\r\n grade_summary = self.get_grade_summary()\r\n self.assertEqual(grade_summary['percent'], percent)", "def test_bad_grade_decimal(self):\r\n self.xmodule.verify_oauth_body_sign = Mock()\r\n request = Request(self.environ)\r\n request.body = self.get_request_body(params={'grade': '0,5'})\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'failure',\r\n 'description': 'Request body XML parsing error: invalid literal for float(): 0,5',\r\n 'messageIdentifier': 'unknown',\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)", "def AddGrade(self, student, discipline, grade_value):\n if not self.__data['s'].HasKey(student.ID):\n raise NonExistentItemIDError(\"Student does not exist.\")\n if not self.__data['d'].HasKey(discipline.ID):\n raise NonExistentItemIDError(\"Discipline does not exist.\")\n self.__data['g'].AddItems([Grade(self.__data['g'].GetSafeKey(), student.ID, discipline.ID, grade_value)])\n self.__undo_list.append(['g'])\n self.__redo_list.clear()", "def test_strength(self):\n\n self.sold.health = 0.7\n self.sold.experience = 10\n self.assertEqual(self.sold.strength, 0.9)", "def test_radio_grades(self):\r\n\r\n for name, inputs in self.TEST_INPUTS.iteritems():\r\n # Turn submission into the form expected when grading this problem.\r\n submission = self._make_answer_dict(inputs)\r\n # Lookup the problem_name, and the whether this test problem\r\n # and inputs should be graded as correct or incorrect.\r\n problem_name, correctness = self.TEST_SCENARIOS[name]\r\n # Load the args needed to build the problem for this test.\r\n problem_args = self.TEST_PROBLEM_ARGS[problem_name]\r\n test_choices = problem_args[\"choices\"]\r\n test_script = problem_args[\"script\"]\r\n # Build the actual problem for the test.\r\n test_problem = self._make_problem(test_choices, 'radiotextgroup', test_script)\r\n # Make sure the actual grade matches the expected grade.\r\n self.assert_grade(\r\n test_problem,\r\n submission,\r\n correctness,\r\n msg=\"{0} should be {1}\".format(\r\n name,\r\n correctness\r\n )\r\n )", "def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,\r\n submission_flagged):\r\n data = {'course_id': course_id.to_deprecated_string(),\r\n 'submission_id': submission_id,\r\n 'score': score,\r\n 'feedback': feedback,\r\n 'grader_id': grader_id,\r\n 'skipped': skipped,\r\n 'rubric_scores': rubric_scores,\r\n 'rubric_scores_complete': True,\r\n 'submission_flagged': submission_flagged}\r\n\r\n result = self._render_rubric(self.post(self.save_grade_url, data=data))\r\n tags = [u'course_id:{}'.format(course_id)]\r\n self._record_result('save_grade', result, tags)\r\n return result", "def test_final_grade_with_no_certificate(self, year, cert_generated):\n self.run_1.start_date = datetime.datetime(year, 10, 1, tzinfo=pytz.UTC)\n self.run_1.save()\n FinalGradeFactory.create(\n user=self.user,\n course_run=self.run_1,\n passed=True,\n status='complete',\n grade=0.8\n )\n ExamRunFactory.create(course=self.run_1.course)\n CourseRunGradingStatus.objects.create(course_run=self.run_1, status='complete')\n cert_qset = MicromastersProgramCertificate.objects.filter(user=self.user, program=self.program)\n assert cert_qset.exists() is False\n api.generate_program_certificate(self.user, self.program)\n assert cert_qset.exists() is cert_generated", "def test_update_goal_metric(self):\n pass", "def add_course_grade(self, course, grade):\n course_grade_tuple = (course, grade)\n self.courses_grades.append(course_grade_tuple)", "def test_peer_calibrate_and_grade(self):\r\n # Initially, the student should NOT be able to grade peers,\r\n # because he/she hasn't submitted any essays.\r\n self.course_nav.go_to_sequential('Peer Module')\r\n self.assertIn(\"You currently do not have any peer grading to do\", self.peer_calibrate.message)\r\n\r\n # Submit an essay\r\n self.course_nav.go_to_sequential(self.peer_problem_name)\r\n self.submit_essay('peer', 'Censorship in the Libraries')\r\n\r\n # Need to reload the page to update the peer grading module\r\n self.course_info_page.visit()\r\n self.tab_nav.go_to_tab('Courseware')\r\n self.course_nav.go_to_section('Test Section', 'Test Subsection')\r\n\r\n # Select the problem to calibrate\r\n self.course_nav.go_to_sequential('Peer Module')\r\n self.assertIn(self.peer_problem_name, self.peer_grade.problem_list)\r\n self.peer_grade.select_problem(self.peer_problem_name)\r\n\r\n # Calibrate\r\n self.peer_confirm.start(is_calibrating=True)\r\n rubric = self.peer_calibrate.rubric\r\n self.assertEqual(rubric.categories, [\"Writing Applications\", \"Language Conventions\"])\r\n rubric.set_scores([0, 1])\r\n rubric.submit('peer')\r\n self.peer_calibrate.continue_to_grading()\r\n\r\n # Grade a peer\r\n self.peer_confirm.start()\r\n rubric = self.peer_grade.rubric\r\n self.assertEqual(rubric.categories, [\"Writing Applications\", \"Language Conventions\"])\r\n rubric.set_scores([0, 1])\r\n rubric.submit()\r\n\r\n # Expect to receive essay feedback\r\n # We receive feedback from all three peers, each of which\r\n # provide 2 scores (one for each rubric item)\r\n # Written feedback is a dummy value sent by the XQueue stub.\r\n self.course_nav.go_to_sequential(self.peer_problem_name)\r\n self.assertEqual(self.get_asynch_feedback('peer'), ['incorrect', 'correct'] * 3)\r\n\r\n # Verify the progress page\r\n self.progress_page.visit()\r\n scores = self.progress_page.scores('Test Section', 'Test Subsection')\r\n\r\n # First score is the self-assessment score, which we haven't answered, so it's 0/2\r\n # Second score is the AI-assessment score, which we haven't answered, so it's 0/2\r\n # Third score is peer-assessment, which we have answered, so it's 2/2\r\n self.assertEqual(scores, [(0, 2), (0, 2), (2, 2)])", "def setUp(self):\r\n super(CourseGraderUpdatesTest, self).setUp()\r\n self.url = get_url(self.course.id, 'grading_handler')\r\n self.starting_graders = CourseGradingModel(self.course).graders", "def grade_to_gpa(grade):\n\n letter_grade = \"\"\n gpa = 0.0\n\n if type(grade) is str:\n accepted_values = [\"A+\", \"A\", \"A-\", \"B+\", \"B\", \"B-\", \"FZ\"]\n\n # check that the grade is one of the accepted values\n if grade in accepted_values:\n\n # assign grade to letter_grade\n letter_grade = grade\n\n #If grade input is a string, but not an accepted value, raise a ValueError\n else:\n raise ValueError(\"Incorrect value. Grade must be an accepted letter grade.\")\n\n elif type(grade) is int:\n\n # check that grade is in the accepted range 0 to 100\n if 0 <= grade <= 100:\n\n # convert the numeric grade to a letter grade\n mark_to_letter = grade\n\n # assign the value to letter_grade\n # hint: letter_grade = mark_to_letter(grade)\n if mark_to_letter >= 90:\n letter_grade = \"A+\"\n elif mark_to_letter >= 85:\n letter_grade = \"A\"\n elif mark_to_letter >= 80:\n letter_grade = \"A-\"\n elif mark_to_letter >= 77:\n letter_grade = \"B+\"\n elif mark_to_letter >= 73:\n letter_grade = \"B\"\n elif mark_to_letter >= 70:\n letter_grade = \"B-\"\n else:\n letter_grade = \"FZ\"\n\n #If grade input is not in accepted range, raise ValueError\n else:\n raise ValueError(\"Incorrect value. Grade must be in the accepted range of 0 to 100.\")\n else:\n # raise a TypeError exception\n raise TypeError(\"Invalid type passed as parameter\")\n\n # write a long if-statement to convert letter_grade\n # assign the value to gpa\n if letter_grade == \"A+\":\n gpa = 4.0\n if letter_grade == \"A\":\n gpa = 4.0\n if letter_grade == \"A-\":\n gpa = 3.7\n if letter_grade == \"B+\":\n gpa = 3.3\n if letter_grade == \"B\":\n gpa = 3.0\n if letter_grade == \"B-\":\n gpa = 2.7\n if letter_grade == \"FZ\":\n gpa = 0.0\n\n #Return the gpa of the grade\n return gpa", "def test_update_score_multiple(self):\r\n self.update_score_multiple()\r\n score = self.openendedmodule.latest_score()\r\n self.assertEquals(score, 1)", "def this_is_the_grade(self, grade_to_set):\n\n\t\tcmds.intField(self.grade_intField, edit = True, value = grade_to_set['grade_value'])\n\t\tself.update_subcategory('intField')\n\t\tif grade_to_set['grade_value'] is not '':\n\t\t\tcmds.scrollField(self.comments_text_field, edit = True, text = grade_to_set['comment_text'])\n\t\t\tself.update_subcategory('comments_text')\n\t\tif grade_to_set['default_comments_text'] is not '':\t\n\t\t\tcmds.scrollField(self.default_comments, edit = True, text = grade_to_set['default_comments_text'])\n\t\t\tself.update_subcategory('default_comments_text')\n\t\tif grade_to_set['example_comments_text'] is not '':\n\t\t\tcmds.scrollField(self.example_comments, edit = True, text = grade_to_set['example_comments_text'])\n\t\t\tself.update_subcategory('example_comments_text')\n\n\t\tself.auto_flagged_list = grade_to_set.get('examples', [])\n\t\tself.log('auto_flagged_list updated: \\n{}'.format(self.auto_flagged_list))", "def test_linked_score(self):\r\n\r\n # Setup the peer grading module with the proper linked location.\r\n peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)\r\n\r\n score_dict = peer_grading.get_score()\r\n\r\n self.assertEqual(score_dict['score'], 1)\r\n self.assertEqual(score_dict['total'], 1)", "def test_compute_grade_for_non_fa_certs(self, certificate_type, status, grade_result, payed_edx_result):\n course_key = self.run_no_fa_with_cert.edx_course_key\n certificate = self.certificates[course_key]\n certificate.data.update(certificate_type=certificate_type, status=status)\n certificate.save()\n\n run_data = CachedEdxUserData(self.user).get_run_data(course_key)\n grade = api._compute_grade_for_non_fa(run_data)\n assert grade.passed is grade_result\n assert grade.grade == self.current_grades[course_key].data.get('percent')\n assert grade.payed_on_edx is payed_edx_result", "def test_update_software_asset_impact_level(self):\n pass", "def grade_conversion(grade):\n grade_converter = {\"A\": 4.00, \"A-\":3.67, \"B+\": 3.33, \"B\": 3.00, \"B-\": 2.67, \"C+\": 2.33, \"C\": 2.00, \"C-\": 1.67, \"D\": 1.00, \"F\": 0.0}\n while True:\n for val, val2 in grade_converter.items():\n if grade == val:\n return val2", "def test_only_students_courses(self):\n user = self.make_user()\n enrollment = EnrollmentFactory(grade_level__school_year__school=user.school)\n course = CourseFactory(grade_levels=[enrollment.grade_level])\n grade = GradeFactory(\n score=50,\n student=enrollment.student,\n graded_work__course_task__course=course,\n )\n grade_2 = GradeFactory(\n score=100,\n student=enrollment.student,\n graded_work__course_task__course=course,\n )\n GradeFactory(\n graded_work__course_task__course__grade_levels=[enrollment.grade_level]\n )\n\n with self.login(user):\n self.get_check_200(\"reports:progress\", pk=enrollment.id)\n\n assert self.get_context(\"courses\") == [\n {\n \"course\": grade.graded_work.course_task.course,\n \"grades\": [grade, grade_2],\n \"course_average\": 75,\n }\n ]", "def test_set_get_section_grader_ajax(self):\r\n grade_type_url = self.setup_test_set_get_section_grader_ajax()\r\n response = self.client.ajax_post(grade_type_url, {'graderType': u'Homework'})\r\n self.assertEqual(200, response.status_code)\r\n response = self.client.get_json(grade_type_url + '?fields=graderType')\r\n self.assertEqual(json.loads(response.content).get('graderType'), u'Homework')\r\n # and unset\r\n response = self.client.ajax_post(grade_type_url, {'graderType': u'notgraded'})\r\n self.assertEqual(200, response.status_code)\r\n response = self.client.get_json(grade_type_url + '?fields=graderType')\r\n self.assertEqual(json.loads(response.content).get('graderType'), u'notgraded')", "def test_e(self):\n user_dict = {'A': 3, 'B': 4, 'C': 5, 'D': 6, 'E': 7}\n user_key = 'e'\n self.assertEqual(7, switch_average(user_dict, user_key.upper()))", "def add_student(self, name: str, grade: int) -> None:\n school_grade = self.students.setdefault(grade, [])\n school_grade.append(name)\n school_grade.sort()", "def test_update_balance(self):\n current_year_tuple = (0.1, 0.1, 0.8)\n iteration_balance = 90\n contribution = 10\n expected_result = 110\n test_balance = investment_growth.update_balance(iteration_balance, contribution, current_year_tuple)\n self.assertEqual(test_balance, expected_result)", "def gr_u(ln, fn, gr):\r\n with conn:\r\n c.execute(\"\"\"UPDATE personnel SET grade=:grade\r\n WHERE first=:first COLLATE NOCASE AND last=:last COLLATE NOCASE\"\"\",\r\n {'first': fn, 'last': ln, 'grade': gr})\r\n\r\n print('New grade for ', fn, ln, end=': {}\\n'.format(gr))\r\n start()", "def test_freeze_user_final_grade_error3(self, raise_on_exception, mock_refr, mock_get_fg):\n mock_get_fg.side_effect = AttributeError\n if not raise_on_exception:\n final_grade = api.freeze_user_final_grade(self.user, self.run_fa, raise_on_exception=raise_on_exception)\n assert final_grade is None\n else:\n with self.assertRaises(FreezeGradeFailedException):\n api.freeze_user_final_grade(self.user, self.run_fa, raise_on_exception=raise_on_exception)\n mock_refr.assert_called_once_with(self.user, self.run_fa.courseware_backend)\n mock_get_fg.assert_called_once_with(self.user, self.run_fa)\n assert FinalGrade.objects.filter(user=self.user, course_run=self.run_fa).exists() is False\n\n con = get_redis_connection(\"redis\")\n failed_users_cache_key = api.CACHE_KEY_FAILED_USERS_BASE_STR.format(self.run_fa.edx_course_key)\n failed_users_count = con.llen(failed_users_cache_key)\n failed_users_list = list(map(int, con.lrange(failed_users_cache_key, 0, failed_users_count)))\n assert self.user.id in failed_users_list", "def test_get_final_grade(self, fa, non_fa):\n fa.return_value = 0.7\n non_fa.return_value = 0.53\n\n # run the test just with only one fa and one not fa course runs\n\n assert api.get_final_grade(self.user, self.run_fa) == 0.7\n assert fa.called is True\n assert fa.call_count == 1\n call_arg = fa.call_args_list[0][0][0]\n assert isinstance(call_arg, UserCachedRunData)\n assert call_arg.edx_course_key == self.run_fa.edx_course_key\n assert non_fa.called is False\n\n fa.reset_mock()\n\n assert api.get_final_grade(self.user, self.run_no_fa) == 0.53\n assert non_fa.called is True\n assert non_fa.call_count == 1\n call_arg = non_fa.call_args_list[0][0][0]\n assert isinstance(call_arg, UserCachedRunData)\n assert call_arg.edx_course_key == self.run_no_fa.edx_course_key\n assert fa.called is False", "def test_freeze_user_final_grade_error1(self, raise_on_exception, mock_refr, mock_get_fg):\n # case not ready to be frozen because the freeze date is in the future\n if not raise_on_exception:\n final_grade = api.freeze_user_final_grade(\n self.user, self.run_no_fa, raise_on_exception=raise_on_exception)\n assert final_grade is None\n else:\n with self.assertRaises(FreezeGradeFailedException):\n api.freeze_user_final_grade(\n self.user, self.run_no_fa, raise_on_exception=raise_on_exception)\n assert mock_refr.called is False\n assert mock_get_fg.called is False\n assert FinalGrade.objects.filter(user=self.user, course_run=self.run_no_fa).exists() is False", "def change_score(self, change: float = 1):\n self._score += change", "def update_score():\n pass", "def test_set_score_publish(self, credit_dict, result):\n self.xblock.credit_dict = credit_dict\n self.xblock.instructor_answer = 10\n self.xblock.set_score()\n self.xblock.runtime.publish.assert_called_with(\n self.xblock,\n 'grade',\n {\n 'value': result,\n 'max_value': 1,\n },\n )", "def _assert_symbolic_grade(\r\n self, problem, student_input, dynamath_input, expected_correctness,\r\n snuggletex_resp=\"\"\r\n ):\r\n input_dict = {'1_2_1': str(student_input),\r\n '1_2_1_dynamath': str(dynamath_input)}\r\n\r\n # Simulate what the Snuggletex server would respond\r\n with mock.patch.object(requests, 'post') as mock_post:\r\n mock_post.return_value.text = snuggletex_resp\r\n\r\n correct_map = problem.grade_answers(input_dict)\r\n\r\n self.assertEqual(\r\n correct_map.get_correctness('1_2_1'), expected_correctness\r\n )", "def test_self_get_grade_not_enrolled(self):\n # a user not enrolled in the course cannot request her grade\n unenrolled_user = UserFactory(password=self.password)\n self.client.login(username=unenrolled_user.username, password=self.password)\n resp = self.client.get(self.get_url(unenrolled_user.username))\n assert resp.status_code == status.HTTP_404_NOT_FOUND\n assert 'error_code' in resp.data\n assert resp.data['error_code'] == 'user_not_enrolled'" ]
[ "0.6894253", "0.67889714", "0.6747671", "0.6735463", "0.65195036", "0.63752735", "0.63277483", "0.6276599", "0.62044054", "0.6194973", "0.6135887", "0.6101513", "0.6052034", "0.5957101", "0.5942507", "0.59255475", "0.58964777", "0.5884683", "0.58781224", "0.58647764", "0.5830922", "0.5823659", "0.5820654", "0.57440203", "0.5729321", "0.57227397", "0.57199264", "0.5705244", "0.56837296", "0.56826293", "0.56766444", "0.56766444", "0.56541353", "0.56458145", "0.5624948", "0.5603122", "0.5592209", "0.55919635", "0.557402", "0.55583316", "0.55203366", "0.55165714", "0.55154985", "0.54987437", "0.5498299", "0.5440808", "0.5438714", "0.5435012", "0.54333246", "0.54015195", "0.53958845", "0.5390893", "0.5369581", "0.53673965", "0.53669083", "0.53574944", "0.53552496", "0.5349976", "0.53395", "0.5331028", "0.5326621", "0.532218", "0.5318505", "0.5316611", "0.5299955", "0.52967215", "0.5273549", "0.5269046", "0.5268859", "0.52683693", "0.5262795", "0.5258496", "0.52540225", "0.52487713", "0.52430296", "0.5241875", "0.5238879", "0.5234121", "0.5226904", "0.52134997", "0.52117497", "0.5200226", "0.52000034", "0.517571", "0.51684034", "0.5166609", "0.516443", "0.51562476", "0.51551604", "0.51511085", "0.51481164", "0.513147", "0.5127439", "0.5124349", "0.51232624", "0.5118205", "0.5114992", "0.51082623", "0.5099524", "0.5091985" ]
0.8471558
0
Return true if the socket managed by this connection is connected
def is_connected(self): return self.socket is not None and self.socket.connected and super(WebsocketTransport, self).is_connected()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_connected(self):\n if self._socket:\n return True\n else:\n return False", "def is_connected(self):\r\n return self.__socket is not None", "def is_connected(self):\n return self._socket is not None", "def getIsConnected(self):\n if self._socket == None:\n return False\n\n # Assume we are still connected. TODO: Do a test receive?\n return True", "def is_connected(cls,socket):\n pass", "def is_connected(self):\n return self._current_protocol is not None", "def is_connected(self):\n return self._port.is_connected()", "def is_connected(self):\n return \"_connection\" in self.__dict__", "def is_connected(self):\n\t\tif self._connection is None:\n\t\t\treturn False\n\n\t\treturn True", "def is_connected(self):\n return self._connection and self._connection.is_open", "def connected(self):\n return self.port.is_open", "def is_connected(self):\n if self.server: return True\n return False", "def connected(self):\n\n if self._connection:\n if self._connection.is_closed == True:\n return False\n else:\n return True\n else:\n return False", "def is_connected(self) -> bool:\n try:\n # When MSG_PEEK is used the data is treated as unread\n # and the next recv shall still return this data\n data = self.socket.recv(self.BUFFER_SIZE, socket.MSG_PEEK)\n if len(data) == 0:\n return False\n return True\n except ConnectionResetError:\n return False", "def connect(self) -> bool:\n if self.socket is None:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.bind((self.host, self.port))\n self.socket.setblocking(False)\n self.socket.listen(1)\n if self.endpoint is None:\n if self.socket is not None:\n try:\n self.endpoint, _ = self.socket.accept()\n self.connected = True\n return True\n except (BlockingIOError, OSError):\n pass\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.bind((self.host, self.port))\n self.socket.setblocking(False)\n self.socket.listen(1)\n return self.connected", "def is_connected(self):\n if self.connected and self.connack_rec:\n return 1\n return 0", "def Connected(self):\r\n return self.Port.is_open", "def isConnected(self):\n if self._session is None:\n return False\n return self._session.isalive() is True", "def is_connected(self):\n return self.connector and self.connector.state == 'connected'", "def isConnected(self):\n\n return self._connection is not None", "def is_connected(self):\n return self.connector and self.connector.state == \"connected\"", "def is_connected(self) -> bool:\n pass", "def isConnected(self):\n return self.__cooperationClient.hasConnections()", "def connected(self):\n return self._connection_event.is_set()", "def is_connected(self):\n return self.serial_connection.isOpen()", "def connected(self) -> bool:\n return self.state == STATE_CONNECTED", "def connected(self) -> bool:\n return self._client is not None and not self._client.closed", "def is_connected(self):\n return self.is_connected", "def is_open(self):\n return self._socket is not None", "def is_connected(self) -> bool:\n return self._backend.is_connected", "def is_connected(self):\n return self.factory.is_connected", "def is_connected(self) -> bool:\n return False if self._snitun is None else self._snitun.is_connected", "def is_connected(self):\n return True", "def is_connected(self):\n # need to wrap in try/except b/c of wc3270's socket connection dynamics\n try:\n # this is basically a no-op, but it results in the the current status\n # getting updated\n self.exec_command(b\"ignore\")\n\n # connected status is like 'C(192.168.1.1)', disconnected is 'N'\n return self.status.connection_state.startswith(b\"C(\")\n except NotConnectedException:\n return False", "def is_connected(self):\n return self.connected_channel is not None", "def is_connected(self):\n return self._connected", "def is_connected(self):\n return self._connected", "def is_connected(self):\n return self._connected", "def is_connected(self):\n return self._connected", "def is_connected(self):\n return self._connected", "def is_connected(self):\n return self.connected", "def is_connected(self):\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()", "def isConnected(self):\n return self.transport is not None and self.started", "def isConnected(self):\n return self._isConnected", "def is_connected():\r\n global connection\r\n if connection is None:\r\n return False\r\n else:\r\n return True", "def isconnected(self) -> bool:", "def isConnected():", "def isConnected(self):\n return self.connected", "def isconnected(self) -> bool:\n ...", "def is_connected(self) -> bool:\n return hasattr(_app_ctx_stack.top, \"zodb_connection\")", "def is_connected(self):\n return self._ws is not None", "def connected(self) -> bool:\n\t\treturn self._raw_result['data']['connected']", "def is_connected(self):\n\t\treturn call_sdk_function('PrlSrv_IsConnected', self.handle)", "def connected(self) -> bool:\n return self._connected", "def connected(self) -> bool:\n return self._connected", "def is_connected(self):\n output = self.run_commands(['q'])\n return output.find('Info: Found {0}'.format(self._connected)) != -1", "def is_connected(self) -> bool:", "def is_connected(self):\n return self.hw_connected", "def CheckIfConnecting(self):\n if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():\n return True\n else:\n return False", "def isConnected(self):\n return False", "def connected(self):\n return bool(self.serial)", "def is_connected(self, test=False):\n return self._server.is_connected()", "def is_connected(self):\n return self.hub.is_connected and self.client.is_running", "def is_connected():\n \n try:\n socket.create_connection((\"www.google.com\", 80))\n return True\n except OSError:\n pass\n return False", "def _connected(self):\n logging.debug(\"Socket connected.\")\n self._isConnected = True\n self._hasError = False", "def CheckIfWiredConnecting(self):\n if self.wired.connecting_thread:\n return self.wired.connecting_thread.is_connecting\n else:\n return False", "def is_connected(self):\n return False", "def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False", "def is_socket(self):\n return self._is_socket", "def check_connectivity(self):\n return self.connected", "def is_connected(self) -> bool:\n return self._imap is not None", "def connected(self) -> bool:\n raise NotImplementedError", "def CheckWiredConnectingMessage(self):\n if self.wired.connecting_thread:\n return self.wired.connecting_thread.GetStatus()\n else:\n return False", "def is_connected(self, port: Identifier) -> bool:\n recv_port_full = self.__kernel + port\n return recv_port_full in self.__peers", "def CheckIfWirelessConnecting(self):\n if self.wifi.connecting_thread:\n return self.wifi.connecting_thread.is_connecting\n else:\n return False", "def connected(self):\r\n return self._ws is not None and not self._ws.closed", "def check_socket(self):\n return self.__send_command(cmd=\"PING\")", "def is_connected():\r\n ipconfig_output = terminal('ipconfig | findstr /i gateway')\r\n if ipconfig_output != None:\r\n return any(i for i in ipconfig_output if i.isdigit())\r\n \r\n # Alternative way if ipconfig has error in some systems\r\n ## Slower than ipconfig workaround\r\n try:\r\n socket().connect(('8.8.8.8', 53))\r\n return True\r\n except:\r\n return False", "def is_connected(self):\n try:\n self.get_value()\n return True\n except:\n return False", "def Connected(self) -> bool:", "def Connected(self) -> bool:", "def isconnected(self):\n return self._wlan.isconnected()", "def is_connected():\n import socket\n try:\n host = socket.gethostbyname(\"www.gov.uk\")\n socket.create_connection((host, 80), 2)\n return True\n except:\n pass\n return False", "def connected(self):\n return False", "def get_connected(self) -> bool:\n try:\n return self._background_process.is_alive()\n except AttributeError:\n return False", "def is_connected(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsConnected', self.handle))", "def connect(self):\n try:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect(self.address)\n self.socket.setblocking(False)\n return True\n except:\n return False", "def check_port(self):\r\n\t\treturn(self.connect.is_open)", "def connection_open(self):\n return self.conn_status == self.CONN_OPEN", "def available(self) -> bool:\n return bool(self._connected)", "def can_connect_to(host: str, port: int) -> bool:\n\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n return bool(sock.connect_ex((host, port)) == 0)", "def is_connected(self) -> bool:\n\n return self.send(self.cmd.GET_SYSTEMLINE) == self.cmd.DEFAULT_SYSTEM_LINE", "def CheckWirelessConnectingMessage(self):\n if not self.wifi.connecting_thread == None:\n stat = self.wifi.connecting_thread.GetStatus()\n return stat\n else:\n return False", "def __CheckConnectStatus(self):\r\n if not self.tn:\r\n print \"Connection is down!\"\r\n return False\r\n else:\r\n print \"Connection is alive!\"\r\n return True", "async def connect(self) -> bool:\n\n if not await self._connection.connect():\n return False\n\n await self._connected.wait()\n if not self._connected_successfully:\n return False\n\n self._events.fire(\"connected\")\n return True", "def ready(self):\n if self.socket is None or self._is_connected is False:\n return False\n\n try:\n # Use a timeout of 0 so we get an \"instant\" result\n ready, _, _ = select.select([self.socket], [], [], 0)\n except (socket.error, socket.timeout, ValueError):\n # Evt17: Transport connection closed\n self.event_queue.put('Evt17')\n return False\n\n return bool(ready)", "def is_connected(self) -> bool:\n return (\n self._last_seen is not None\n and (dt_util.utcnow() - self._last_seen)\n < self._router.consider_home_interval\n )", "def is_connected(self, sid: Optional[str] = None) -> bool:\n\n # Fetch the client id.\n sid = sid or self.get_sid()\n\n # Check if the client id exists and if it is included\n # in the list of connected clients.\n return sid is not None and sid in self.clients", "def is_alive(self):\n if (self._s.fileno()>0 and self._running and self._listen):\n return True\n else:\n return False", "def connected(self):\n return self.opened() and bool(self._dll.JLINKARM_EMU_IsConnected())" ]
[ "0.9105765", "0.8836678", "0.8816751", "0.8545325", "0.8359238", "0.82961154", "0.82776904", "0.8272576", "0.82721364", "0.82631487", "0.82311773", "0.8210377", "0.8137813", "0.80617905", "0.80345", "0.80271804", "0.79471153", "0.79411083", "0.7917166", "0.7908697", "0.7903613", "0.78818285", "0.78815967", "0.7850428", "0.7848777", "0.7805482", "0.78041357", "0.7782073", "0.77814436", "0.77792245", "0.7773662", "0.7771303", "0.7753894", "0.77485543", "0.77376604", "0.7735147", "0.7735147", "0.7735147", "0.7735147", "0.7735147", "0.7720413", "0.7711662", "0.77100617", "0.7709099", "0.7682385", "0.76711714", "0.7648899", "0.76487756", "0.7644709", "0.76424026", "0.7638034", "0.7635354", "0.763469", "0.7631966", "0.7631966", "0.7625813", "0.7606751", "0.76018685", "0.75949967", "0.7554903", "0.7551386", "0.75416386", "0.75124466", "0.7456729", "0.7454169", "0.74094874", "0.74080306", "0.73883605", "0.7377427", "0.73771596", "0.7359741", "0.73373586", "0.72903836", "0.7279071", "0.7267489", "0.7249317", "0.7242912", "0.7242519", "0.7240368", "0.72315353", "0.72315353", "0.72081685", "0.7193213", "0.7182579", "0.7169545", "0.71562505", "0.71535146", "0.71531534", "0.7146686", "0.7131818", "0.71287197", "0.71236897", "0.7122239", "0.7112468", "0.70999616", "0.7087828", "0.70680124", "0.7064622", "0.7061379", "0.70349956" ]
0.82416207
10
Disconnect the underlying socket connection
def disconnect_socket(self): self.running = False if self.socket is not None: self.socket.close() self.current_host_and_port = None self.socket = None self.notify('disconnected')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disconnect(self):\n self.connected = False\n self.socket.close()", "def disconnect(self):\n self.connected = False\n try:\n self.protocol.send_message(self.sock, '__!goodbye__')\n data = self.protocol.recover_message(self.sock)\n except:\n pass\n self.sock.close()\n self.sock = None", "def disconnect(self) -> None:\n self.client_socket.close()", "def disconnect(self):\n\n if self.connected:\n try:\n self.sock.shutdown(socket.SHUT_WR)\n self.sock.close()\n self.connected = False\n except SocketError as e:\n print(\"Server must have disconnected first!\")\n\n print(\"Disconnection successful!\")\n else:\n print(\"Nothing to disconnect!\")", "def disconnect(self):\n if self._connected:\n self.socket.close()\n self._connected = False\n self._subscribed = False\n self._running = False\n self.isy.connection_events.notify(ES_DISCONNECTED)", "def disconnect(self):\n logger.debug(\n \"TCPSPP: Terminate connection to %s:%d\",\n *self.sock.getpeername(),\n )\n self.sock.shutdown(socket.SHUT_RDWR)\n self.sock.close()", "def disconnect(self):\n log.debug(\"Disconnecting %s\" % self)\n self.connection.unbind_s()\n self.connection = None", "def disconnect(self, timeout=-1):\n if self.switch_socket:\n self.switch_socket.close()\n self.switch_socket = None\n self.switch_addr = None\n with self.packets_cv:\n self.packets = []\n with self.connect_cv:\n self.connect_cv.notifyAll()\n if self.bridge_socket:\n self.bridge_socket.close()", "def disconnect(self):\n self.server_sock.close()\n self.client_sock.close()", "def disconnect(self):\n print(\"<== Conexión cerrada ==>\")\n self.is_alive = False\n self._socket.close()", "def disconnect(self,):\n # check if connection ACKed\n if self.connack_rec:\n # if ACKed, disconnect\n self.send_q.put(Message.DisconnectFrame().encode())\n\n # if connected, disconnect\n if self.connected:\n # kill the TCP thread\n self.stop_thread = True\n self.tcp_thread.join()\n\n # close the socket\n self.sock.close()\n\n # reset flags and subscriptions\n self.connack_rec = False\n self.connected = False\n self.topics = []\n self.sub_req = 0\n self.unsub_req = 0\n else:\n return False", "def disconnect(self):\n _error = None\n try:\n self.connection.shutdown()\n except Exception as e:\n log.error(e, exc_info=True)\n _error = 'disconnect error: %s' % e\n finally:\n if _error is not None and config.DEBUG_TO_CONSOLE:\n print (_error)\n self.is_connected = False\n self.connection = None", "def disconnect(self):\n\n logging.info('Disconnecting...')\n\n if self._socket is not None:\n self._socket.close()\n self._socket = None\n self.state = consts.SMPP_CLIENT_STATE_CLOSED\n self.queue.join()", "def disconnect(self):\n self.stop()\n self._send_command('exit')\n self.sock.close()\n self.disconnected = True", "def _disconnect(self):\n self.socket.send_json({\"worker_id\": self.socket_id, \"message\": \"disconnect\"})\n self.socket.close()\n self.context.term()\n exit()", "def disconnect(self, reason=None):\n\n print (\"Disconnecting from server\")\n\n if reason:\n reason = \" :\" + reason\n else:\n reason = \"\"\n\n self._sendmsg(\"QUIT\" + reason)\n\n self._sockfile.close()\n self._sock.close()\n self._sockfile = None\n self._sock = None\n self._connected = False", "def disconnect(self):\n r = requests.post(f'{self.SERVER_ADDR}/api/disconnect', headers={'Authorization': 'Token ' + self.token})\n r.raise_for_status()", "async def disconnect(self):\n\n # Debug info message\n log.info(\"disconnect\")\n\n # SSH?\n if self._protocol == \"ssh\":\n\n # Yes\n\n # Then disconnect using SSH\n await self.disconnectSSH()\n\n # Telnet?\n elif self._protocol == \"telnet\":\n\n # Yes\n\n # Then disconnect using Telnet\n await self.disconnectTelnet()\n\n else:\n\n # Unsupported protocol\n\n # Raise an exception\n raise Exception(f\"Unsupported protocol: {self._protocol}\")", "def disconnect(self, connection, disallow_local_disconnect=True):\n\n sock = connection[0]\n address = connection[1]\n\n terminated = self.read_nodestate(2)\n\n try:\n if disallow_local_disconnect:\n Primitives.log(\"Terminated:\"+str(terminated), in_log_level=\"Debug\")\n\n if address == Primitives.get_local_ip() and not terminated:\n\n Primitives.log(\"(Bug) Refusing to disconnect from localhost;\"\n \"that's a terrible idea...\", in_log_level=\"Warning\")\n return None\n\n else:\n\n Primitives.log(\"\\n\\tSelf.disconnect() called.\\n\", in_log_level=\"Info\")\n\n verbose_connection_msg = str(\"Disconnecting from \" + address\n + \"\\n\\t( \" + str(sock) + \" )\")\n\n Primitives.log(verbose_connection_msg, in_log_level=\"Info\")\n\n conn_remove_msg = str(\"Server -> Removing \" + str(sock) + \" from network tuple\")\n Primitives.log(conn_remove_msg, in_log_level=\"Info\")\n self.remove(connection)\n sock.close()\n\n Primitives.log(\"Successfully Disconnected.\", in_log_level=\"Info\")\n\n # Socket not in network tuple . Probably already disconnected, or the socket was [closed]\n except IndexError:\n Primitives.log(\"Already disconnected from that address; passing;\", in_log_level=\"Warning\")", "def disconnect(self):\n self.__connection.disconnect()", "def disconnect(self):\n\t\tif not self.did_handshake:\n\t\t\traise UsageError(\"Not connected!\")\n\t\ttry:\n\t\t\tself.sendMessage(ID_CTRL + \"DISCONNECT\", True)\n\t\tfinally:\n\t\t\tself.cid = None\n\t\t\tself.did_handshake = False\n\t\t\tself.joinstate = 0\n\t\t\tself.createstate = 0\n\t\t\tself.sendClose()", "def disconnect(self):\n logger.debug(\"disconnecting\")\n if self.connected and self.conn:\n self.can_listen = False\n self.conn.close()\n self.connected = False", "def disconnect(self):\n self.is_connected = False\n self.connection.disconnect(True)\n print('Disconnected from the Connected server')", "def disconnect(self):\n\n con = self.connection\n if con.connected:\n con.log.info('disconnecting...')\n con.switchto(self.initial_state)\n con.sendline('exit')\n sleep(2)\n con.log.info('closing connection...')\n con.spawn.close()", "def DisconnectReuseSocket(self) -> bool:", "def disconnect(self):\n\t\tself.client.disconnect()\n\t\tself.log.info(\"disconnected OBS Websocket _connection.\")", "async def disconnect(self):\n if not self._session:\n await self._create_session()\n await self._session.post(self._network.SERVER_ADDR + '/api/disconnect')", "async def disconnect(self):\n try:\n #print(\"Send disconnect command\")\n await self._writeControlPacket(ControlPacketsGenerator.getDisconnectPacket())\n except Exception as err:\n # TODO: catch this error if it is something like already disconnected\n #print(\"Unknown error\")\n raise err\n\n try:\n # Disconnect from this side as well.\n #print(\"Disconnect from this side as well\")\n self.core.ble.disconnect()\n except Exception as err:\n #print(\"Unknown error\")\n raise err", "def _disconnection(sock):\r\n\tsock_ip = sock.getpeername()[0]\r\n\r\n\t_sockets.remove(sock)\r\n\t_clients.pop(sock_ip)\r\n\r\n\t_logger.info(\"Disconnection from {0}. Current clients: {1}\" \\\r\n\t\t.format(sock_ip, len(_clients)))\r\n\r\n\tsock.close()\r\n\ton_disconnect.invoke(sock_ip)", "def disconnect(self, print_msg = True):\n if self.sockfd is not None:\n if print_msg:\n print ('Closing socket')\n self.sockfd.close()", "def _disconnect(self):\n self._factory.stopTrying()\n self._connection.disconnect()", "def disconnect(self):\n self.connection.close()", "def disconnect(self):\r\n self._manual_disconnect = True\r\n self.transport.close()", "def disconnect(self) -> None:\n self.connection.close()", "def disconnect():\n logging.info('Client disconnected')", "def disconnect(conn):\n conn.close()", "def disconnect(self):\n self.conn.close()\n return", "def disconnect(self):\r\n try:\r\n self.connection.close()\r\n print (\"disconnected!\")\r\n except Exception as error:\r\n print (\"disconnect() - error - {}\".format(error))", "def stop_socket(self):\n self.socket.shutdown(socket.SHUT_RDWR)", "async def disconnect(self):\r\n from asyncio import shield\r\n if self._session is not None:\r\n await shield(self._session.connector.close())\r\n await shield(self._session.close())\r\n self._session = None", "async def disconnect(self) -> None:\n\n self._set_connected_reset()\n await self._connection.disconnect()", "def disconnect(self):\n response = requests.post(\n self._server_url + _DISCONNECT_URL,\n data={\"id\": self._chat_id}\n )\n self._chat_ready_flag = False", "def stop(self):\n self.logger.info('Close socket')\n self.sock.close()", "async def disconnect(self):\n if self._state == const.STATE_DISCONNECTED:\n return\n if self._reconnect_task:\n self._reconnect_task.cancel()\n await self._reconnect_task\n self._reconnect_task = None\n await self._disconnect()\n self._state = const.STATE_DISCONNECTED\n\n _LOGGER.debug(\"Disconnected from %s\", self.host)\n self._avr.dispatcher.send(const.SIGNAL_TELNET_EVENT, const.EVENT_DISCONNECTED)", "def disconnect(self):\n if self.Connection is not None:\n self.Connection.close()\n self.Connection = None", "def disconnect(self):\n\n self.connection.logout()", "def disconnect(self):\n if self.client:\n logger.info(\"Disconnecting client\")\n self.client.disconnect()\n self.client.__del__()\n self.client = None\n if self.is_host:\n self.host.kick_client('127.0.0.1')\n if self.is_host:\n logger.info(\"Disconnecting host\")\n self.send_to_all_client(HostDisconnectEvent())\n # Kill the broadcast\n self.stop_broadcast.set()\n # Stops accepting connection\n self.host.accepting_disallow()\n # Disconnects all clients\n self.host.disconnect_clients()\n self.host.disconnect()\n self.host.__del__()\n self.host = None", "def disconnect(self,connection_name):\n return self.network.disconnect(connection_name)", "async def disconnect(self):\n await self._client.disconnect()", "def disconnect(self):\n \n self.net.active(False)", "def drop_connection(self):\n if self._transmit_socket:\n self._transmit_socket.shutdown(socket.SHUT_RDWR)\n self._transmit_socket.close()\n self._transmit_socket = None\n self._has_connection.clear()", "def disconnect(self):\n stream=self.get_stream()\n if stream:\n stream.disconnect()", "async def disconnect(self, connection_name: str = \"main\") -> None:\n if self.connections[connection_name] is not None:\n try:\n await self.connections[connection_name][\"websocket\"].close()\n except socket.gaierror:\n self.logger.debug(\"Socket gaia error, let's disconnect anyway...\")\n except websockets.exceptions.ConnectionClosedError:\n self.logger.debug(\"WebSockets connection closed error, let's disconnect anyway...\")\n except websockets.exceptions.ConnectionClosedOK:\n self.logger.debug(\"WebSockets connection closed ok, let's disconnect anyway...\")\n except ConnectionResetError:\n self.logger.debug(\"Connection reset error, let's disconnect anyway...\")\n del self.connections[connection_name]", "def disconnect(self):\n if self.is_connected:\n try:\n self.client.unregister()\n finally:\n if self.client.is_running:\n self.client.stop()\n self.hub.disconnect()", "async def disconnect(self) -> None:\n self.client.loop_stop()\n self.client.disconnect()\n self.connected = False\n self.log.debug(\"Disconnected.\")", "def disconnect_from_player(connection):\n \n # get sockets\n socket_in = connection[0]\n socket_out = connection[1]\n \n # shutdown sockets\n socket_in.shutdown(socket.SHUT_RDWR) \n socket_out.shutdown(socket.SHUT_RDWR)\n \n # close sockets\n socket_in.close()\n socket_out.close()", "def disconnect(self):\n if self.conn_status == self.CONN_OPEN:\n self.ssh.close()\n self.conn_status = self.CONN_CLOSED", "def disconnect(self) -> None:\n try:\n self.s.close()\n except OSError as e:\n logging.exception(e)", "def shutdown(self):\n self.connected = False\n self.protocol.send_message(self.sock, '__!shutdown__')\n data = self.protocol.recover_message(self.sock)\n self.sock.close()\n self.sock = None", "async def disconnect(self):\n if not self.is_connected:\n return\n\n await self.stop()\n\n payload = {\n 'op': 4,\n 'd': {\n 'guild_id': self.guild_id,\n 'channel_id': None,\n 'self_mute': False,\n 'self_deaf': False\n }\n }\n\n await self._bot._connection._get_websocket(int(self.guild_id)).send(json.dumps(payload))", "def disconnect(self):\n self.controlProtocol.disconnect()", "def disconnect(self):\n try:\n self.con.close()\n except Exception as e:\n raise e", "def ws_disconnect(message):\n Group('clients').discard(message.reply_channel)", "def disconnect(self):\n\n if self.connect:\n Disconnect(self.connect)", "def __del__(self):\n if self.connection_obj:\n self.logger.info('Disconnecting from host {0}:{1}'.format(self.host, self.port))\n Disconnect(self.connection_obj)", "def disconnect(self) -> None:\n if ':' in self.device_id:\n self.cmd(f\"disconnect {self.device_id}\", devices=False)", "def disconnect(self) -> None:\n with self._lock:\n self._disconnected = True", "def closeSocket(self,socket,reason):\n try:\n socket.send(\"{0};{1}\".format(int(MessageType.DISCONNECT),reason))\n except:\n print(\"can't send close message to socket, was already closed.\")\n finally:\n try:\n socket.close()\n except:\n print(\"Can't close socket, was already closed.\")\n finally:\n try:\n if socket in self.connections:\n self.connections.remove(socket)\n except Exception,e:\n print(\"Socket was already removed from connections.\")", "async def disconnectSSH(self):\n\n # Debug info message\n log.info(\"disconnectSSH\")\n\n # Connection previously open in SSH?\n if self.conn:\n\n # Yes\n\n # Then close the SSH connection\n self.conn.close()\n\n # No more connection to disconnect\n self.conn = None", "def disconnect(self) -> None:\n ...", "def disconnect(self):\n if self.connection.is_connected():\n self.logger.debug(\"disconnecting\")\n self.connection.disconnect()\n\n return self", "def disconnect(self):\n\n\t\tself._alive = False\n\t\tself._ser.close()", "def disconnect(self):\n\n\t\tself._alive = False\n\t\tself._ser.close()", "def disconnect(self):\n if (self.Port!= None):\n if self.State==1:\n self.reset()\n self.Port.close()\n else:\n print \"Can't disconnect, reader not connected\"\n raise self.ErrorNotConnected(\"Can't disconnect\")\n \n self.clear_attr()\n self.State = 0", "def disconnect():\n return c.close()", "def __del__(self):\n\n if hasattr(self, '_socket') and self._socket is not None:\n try:\n self.unbind()\n except (exceptions.PDUError, exceptions.ConnectionError) as error:\n if len(getattr(error, 'args', tuple())) > 1:\n logging.warning('({0}) {1}. Ignored'.format(error.args[1], error.args[0]))\n else:\n logging.warning('{error}. Ignored'.format(error=error))\n self.disconnect()", "def disconnect(self):\n raise NotImplementedError('disconnect() is not implemented')", "def disconnect(self) -> None:\n self.log.debug(f\"Disconnecting from {self.host} : {self.port}\")\n if self._connected:\n self.client.publish(\n self.lastwill_message.topic, self.lastwill_message.payload\n )\n self.client.loop_stop()\n self.client.disconnect()", "def stop(self):\n logging.info('closing socket')\n self.socket.close()\n if self.is_alive():\n self.join()\n if not self.exceptions.empty():\n six.reraise(*self.exceptions.get())", "async def disconnect(self):\n self.log.info(\"Disconnect requested\")\n self.connected = False\n self.writer.close()\n await self.writer.wait_closed()", "async def disconnect(self):\n self._logger.info(\"Host {}: SSH: Disconnecting\".format(self._host))\n self._logger.info(\"Host {}: SSH: Disconnecting\".format(self._host))\n await self._cleanup()\n self._conn.close()\n await self._conn.wait_closed()", "def disconnect_from_player(connection):\n\n # get sockets\n socket_in = connection[0]\n socket_out = connection[1]\n\n # shutdown sockets\n socket_in.shutdown(socket.SHUT_RDWR)\n socket_out.shutdown(socket.SHUT_RDWR)\n\n # close sockets\n socket_in.close()\n socket_out.close()", "def disconnect(self):\n raise NotImplementedError", "def disconnect(self):\n raise NotImplementedError", "def disconnect(self) -> None:\n if not self.connectivity_service.is_connected():\n return\n self.logger.debug(\"Disconnecting\")\n self.connectivity_service.disconnect()", "def on_disconnect(self):\n # self.node_worker_ref.socketIO.disconnect()\n self.log.info('Disconnected from the server')", "def disconnect(self):\r\n\t\tif self.ser != None:\r\n\t\t\tself.ser.close()\r\n\t\tself.connected = 0\r\n\t\tself.ser = None", "def close(self):\n if self.tcpsocket:\n logging.debug(\"socket closing\")\n self._isConnected = False\n self.tcpsocket.close()\n # self.tcpsocket = None\n self.socketClosed.emit()", "def loseConnection(self):\n self.transport.loseConnection()", "async def disconnectTelnet(self):\n\n # Debug info message\n log.info(\"disconnectTelnet\")\n\n # Connection previously open in Telnet?\n if self._writer:\n\n # Yes\n\n # Then close the SSH connection\n self._writer.close()\n\n # No more connection to disconnect\n self._writer = None", "def disconnect(self, reason=\"Disconnection requested.\"):\n if not self.connected:\n return False\n\n try:\n self._socket.shutdown(socket.SHUT_RDWR)\n self._socket.close()\n except socket.error as error:\n logger.warning(\"Couldn't disconnect from %s: %s.\",\n self._repr_remote(), error)\n finally:\n self.finalize(reason=reason)\n return True", "def on_disconnect(self, raw_msg, server, port, **kwargs):", "def close_connection(self):\n if self.socket:\n self.socket.close()", "async def disconnect(self, clear_snitun_token=False) -> None:\n if not self._snitun:\n _LOGGER.error(\"Can't handle request-connection without backend\")\n raise RemoteNotConnected()\n\n # Stop reconnect task\n if self._reconnect_task:\n self._reconnect_task.cancel()\n\n if clear_snitun_token:\n self._token = None\n\n # Check if we already connected\n if not self._snitun.is_connected:\n return\n await self._snitun.disconnect()\n self.cloud.client.dispatcher_message(const.DISPATCH_REMOTE_DISCONNECT)", "def disconnect(self):\n if self._heart_beat_timer.is_running():\n self._heart_beat_timer.stop()\n\n self._disconnect_issued = True\n self._ws_jsonrpc_cache = []\n self._logger.debug(\"Cleared JSONRPCRequests cache.\")\n\n if self.is_connected:\n self._ws.close()\n self._ws = None\n self._logger.debug(\"Disconnected client websocket.\")", "def disconnect(self, reason=None):\n self.sendClose(self.CLOSE_STATUS_CODE_NORMAL, reason)", "def disconnectServer(controlName):\n _disconnectServer(controlName)", "def disconnect(self, extra_headers=None):\n try:\n if self.connection.connected:\n with self.subscription_lock:\n # Need a copy since unsubscribe() removes the destination from the collection.\n subcpy = copy(self.subscribed_destinations)\n for destination in subcpy:\n self.unsubscribe(destination)\n disconnect = frame.DisconnectFrame(extra_headers=extra_headers)\n result = self.send_frame(disconnect)\n try:\n self.connection.disconnect()\n except NotConnectedError:\n pass\n return result\n finally:\n self.shutdown_event.set()", "def close_connection(self):\r\n self.running = False\r\n self.client_socket.close()", "def disconnect(self):\n self.serial.close()\n self.connected = 0" ]
[ "0.8091306", "0.8073663", "0.8069597", "0.7900187", "0.7873242", "0.7846093", "0.7717508", "0.7700298", "0.7663556", "0.76379234", "0.75356364", "0.7519973", "0.7475285", "0.74681747", "0.74621254", "0.7447179", "0.74322003", "0.74192196", "0.7402269", "0.7398234", "0.7384707", "0.7368797", "0.7347791", "0.734419", "0.73358345", "0.7322066", "0.726548", "0.72361714", "0.7235266", "0.72341126", "0.7233436", "0.72269875", "0.72148573", "0.72120047", "0.71417487", "0.7136488", "0.71354216", "0.7129426", "0.71262914", "0.7124439", "0.7123181", "0.7101544", "0.7062683", "0.7051952", "0.7029836", "0.70169604", "0.6994812", "0.6986397", "0.6986169", "0.6986132", "0.6984916", "0.6984373", "0.696975", "0.69693756", "0.696055", "0.69308066", "0.6913254", "0.69071454", "0.6905058", "0.6904479", "0.68798417", "0.68791974", "0.68712825", "0.68631846", "0.68612486", "0.68597466", "0.6856991", "0.6854827", "0.684955", "0.68477285", "0.6843544", "0.6841751", "0.6841751", "0.68383104", "0.68331254", "0.68307805", "0.68286437", "0.68233895", "0.6807022", "0.6799473", "0.67944086", "0.67891884", "0.67564607", "0.67564607", "0.67448795", "0.67416966", "0.67412144", "0.6734894", "0.6727192", "0.67246586", "0.67094654", "0.67067796", "0.66990405", "0.6692469", "0.66910297", "0.6684618", "0.6676109", "0.6665015", "0.66629905", "0.66627264" ]
0.81088334
0
Close the socket and clear the current host and port details.
def cleanup(self): try: self.socket.close() except: pass # ignore errors when attempting to close socket self.socket = None self.current_host_and_port = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self):\n print('Closing server socket (host {}, port {})'.format(self.host, self.port))\n if self.sock:\n self.sock.close()\n self.sock = None", "def close(self):\n self.socket.close()", "def close(self):\n self.socket.close()", "def close(self):\n self.socket.close()", "def close(self):\n self.socket.close()", "def close(self):\n self.socket.close()", "def close(self):\n self.socket.close()", "def close(self):\n if self.socket:\n self.socket.close()\n self.socket = None", "def close(self):\n if self.socket:\n self.socket.close()\n self.socket = None", "def close(self):\n self.s.close()\n print(\"Socket closed\")", "def close(self):\n print('Closing socket connected with {}.'.format(self.remote_addr))\n self.remote_socket.close()\n self.terminate()", "def close(self):\n if self._socket is not None:\n self._socket.close()\n self._socket = None", "def close(self) -> None:\n self._socket.close()", "def close(self):\n if self.socket is not None:\n try:\n self.socket.shutdown(2)\n except Exception:\n pass\n try:\n self.socket.close()\n except Exception:\n pass\n self.socket = None", "def disconnect_socket(self):\n self.running = False\n\n if self.socket is not None:\n self.socket.close()\n\n self.current_host_and_port = None\n self.socket = None\n self.notify('disconnected')", "def __del__(self):\r\n if not self.is_dummy:\r\n self.socket.close()", "def close(self):\n self._sock.close()\n self._sock = None", "def stop(self):\n self.logger.info('Close socket')\n self.sock.close()", "def Close(self):\n self._sock.close()", "def cleanup(self):\n self._socket.close()\n os.remove(_get_control_socket_path())", "def __del__(self):\n self.sock.shutdown(socket.SHUT_RDWR)\n self.sock.close()", "def _cleanupSocket(self, sock):\n sock.close()", "def _close(self):\n \n # Close device\n logger.debug(\"%s: TCP port closing started...\" % \\\n self.__class__.__name__)\n self._tcp_socket.close()\n self._socket = None\n logger.debug(\"%s: ...TCP port closing complete.\" % \\\n self.__class__.__name__)", "def teardown(self):\n if self.__socket:\n self.__socket.close()", "def shutdown(self):\r\n self.socket.close()\r\n # self.socket_video.close()\r\n self.socket_state.close()", "def close(self):\n if self.sock is not None:\n self.sock.close()\n self._sock = None", "def stop_socket(self):\n self.socket.shutdown(socket.SHUT_RDWR)", "def server_close(self):\n\t\tself.socket.close()", "def close(self):\n if self.socket is None or self._is_connected is False:\n return\n\n try:\n self.socket.shutdown(socket.SHUT_RDWR)\n except socket.error:\n pass\n\n self.socket.close()\n self.socket = None\n self._is_connected = False\n # Evt17: Transport connection closed\n self.event_queue.put('Evt17')", "def _close(self):\n \n # Close device\n logger.debug(\"%s: TCP port closing started...\" % \\\n self.__class__.__name__)\n self._router = None\n self._platform = None\n self._tcp_socket.close()\n logger.debug(\"%s: ...TCP port closing complete.\" % \\\n self.__class__.__name__)", "def close(self):\n # Shut down the socket to prevent further sends/receives\n self.client_socket.shutdown(socket.SHUT_RDWR)\n # Close the socket\n self.client_socket.close()", "def shutdown(self):\n self.sock.close()", "def close(self):\n self._closed = True\n self.sock.close()", "def close(self):\n self.context['socket'].close()", "def clear_socket(self):\n if hasattr(self, \"_socket\"):\n if isinstance(self.poller.sockets, dict):\n sockets = list(self.poller.sockets.keys())\n for socket in sockets:\n log.trace(\"Unregistering socket: %s\", socket)\n self.poller.unregister(socket)\n else:\n for socket in self.poller.sockets:\n log.trace(\"Unregistering socket: %s\", socket)\n self.poller.unregister(socket[0])\n del self._socket", "def close(self):\r\n # close the connection and the socket\r\n self.conn.close()\r\n self.theSocket.close()", "def close(self):\n if self._socketPoller != None:\n self._socketPoller.close()\n self._socketPoller = None\n\n if self._socket != None:\n self._socket.close()\n self._socket = None", "def _lowLevelClose(self):\n self.socket_reference.close()", "def close(self):\n\n\t\ttry:\n\t\t\tself.running = False\n\t\t\tself.sock.shutdown(socket.SHUT_RDWR)\n\t\t\tself.sock.close()\n\t\texcept:\n\t\t\tpass", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.socket.close()", "def close(self):\n if self.tcpsocket:\n logging.debug(\"socket closing\")\n self._isConnected = False\n self.tcpsocket.close()\n # self.tcpsocket = None\n self.socketClosed.emit()", "def _close(self):\n if self.opened == False: return\n \n # Close device\n logger.debug(\"%s: Socket closing started...\" % \\\n self.__class__.__name__)\n self._socket.close()\n logger.debug(\"%s: ...Socket closing complete.\" % \\\n self.__class__.__name__)", "def cleanup(self):\n self.sock.close()", "def close(self):\n if self.push_socket is not None:\n self.push_socket.close()\n\n if self.pull_socket is not None:\n self.pull_socket.close()\n\n if self.control_socket is not None:\n self.control_socket.close()", "def close(sock):\n cport = sock['addr'][1]\n if cport not in connections:\n # socket has already closed\n return\n addr = sock['addr']\n print 'closing connection with: {0}:{1}'.format(addr[0], addr[1])\n sock['conn'].close()\n del connections[cport]", "def close(self):\n # Report server that connection is closed\n self._socket.sendall(''.encode())\n self._socket.close()", "def close(self):\n if self.socket is not None:\n self.socket.close()\n self.socket = None\n self.socket_lock = None\n\n # help blocking send thread close\n self.send_queue.put(None)\n self.send_queue = None\n getLogger(__name__).info(\"Connection closed.\")", "def close(self):\n log.info(\"Closing listener from %s on port %s.\",\n self.address, self.port)\n self._server.stop()\n self._server = None\n self._address = \"\"\n self._port = 0", "def closesock(s):\n s.close()\n s = None", "def clear(self):\r\n self.buffer = ''\r\n self.connected = False\r\n self.listening = False\r\n del self.transaction_id\r\n try:\r\n self.socket.close()\r\n except:\r\n pass\r\n self.socket = None", "def stop(self):\n logging.info('closing socket')\n self.socket.close()\n if self.is_alive():\n self.join()\n if not self.exceptions.empty():\n six.reraise(*self.exceptions.get())", "def close(self):\n self._relaypid = None\n self._portoffset = None", "def disconnect(self):\n self.connected = False\n self.socket.close()", "def CloseSocket(client_socket):\r\n client_socket.shutdown(0)\r\n client_socket.close()", "def close_connection(self):\n if self.socket:\n self.socket.close()", "def close(self):\n if self.pending_messages:\n print(\"?? closing client socket with unsent messages. message list follows. \")\n for message in self.pending_messages:\n print(\"*** \", message)\n self.socket_.shutdown(socket.SHUT_RDWR)\n self.socket_.close()", "def close_connection(self):\n self.nodesocket.close()", "def close(self):\n\n\t\tif (self.port != None) and (self.state == State.Connected):\n\t\t\tself.send(\"atz\")\n\t\t\tself.port.close()\n\n\t\tself.port = None\n\t\tself.ELMver = \"Unknown\"", "def __del__(self):\n\n if hasattr(self, '_socket') and self._socket is not None:\n try:\n self.unbind()\n except (exceptions.PDUError, exceptions.ConnectionError) as error:\n if len(getattr(error, 'args', tuple())) > 1:\n logging.warning('({0}) {1}. Ignored'.format(error.args[1], error.args[0]))\n else:\n logging.warning('{error}. Ignored'.format(error=error))\n self.disconnect()", "def shutdown(self):\n self.connected = False\n self.protocol.send_message(self.sock, '__!shutdown__')\n data = self.protocol.recover_message(self.sock)\n self.sock.close()\n self.sock = None", "def close(self):\n self.connection.close()\n print(\"Connection on port \" + str(self.port) + \" closed.\")", "def close_connection(self):\r\n self.running = False\r\n self.client_socket.close()", "def close_stream(self):\n sel.socket.close()", "def disconnect(self):\n self.server_sock.close()\n self.client_sock.close()", "def __exit__(self, *args):\n self.sock.close()", "def _close(self):\n \n # Close device\n logger.debug(\"%s: UDP port closing started...\" % \\\n self.__class__.__name__)\n self._udp_socket.close()\n self._socket = None\n logger.debug(\"%s: ...UDP port closing complete.\" % \\\n self.__class__.__name__)", "def ShutDownSocket(self, s):\r\n try:\r\n s.shutdown(socket.SHUT_RDWR)\r\n s.close()\r\n except socket.error:\r\n pass", "def shutdown(self):\n self.socket_thread.stop()", "def run(self):\n\n self.socket.close()", "def disconnect(self):\n print(\"<== Conexión cerrada ==>\")\n self.is_alive = False\n self._socket.close()", "def close(self):\r\n # If the Multiplexer is still initialized, then lets send a close message\r\n if self.mux != None and self.mux.isAlive():\r\n # Create termination frame\r\n termFrame = MultiplexerFrame()\r\n termFrame.initConnTermFrame(self.id)\r\n \r\n # Tell our partner to terminate the client connection\r\n self.mux._sendFrame(termFrame)\r\n \r\n # Remove from the list of virtual sockets\r\n if self.mux != None:\r\n self.mux.virtualSocketsLock.acquire()\r\n del self.mux.virtualSockets[self.id]\r\n self.mux.virtualSocketsLock.release()\r\n \r\n # Clean-up\r\n self.mux = None\r\n self.socketInfo = None\r\n self.buffer = None\r\n self.bufferInfo = None\r\n\r\n try:\r\n self.socketLocks[\"nodata\"].release()\r\n except:\r\n pass\r\n self.socketLocks = None", "def close_socket(self, sock):\n print_debug(\"Closing socket.\")\n try:\n sock.close()\n # If data socket being closed, print status message.\n if sock != self.s:\n print_debug(\"Socket closed.\")\n except socket.error:\n error_quit(\"Error closing socket!\", 500)\n except Exception:\n error_quit(\"An unknown error occurred while closing the socket!\", 500)", "def remote_destroy(self):\r\n self.transport.loseConnection()", "def close(self):\n logging.info(\"Client.close(%s)\", self)\n try:\n self.socket.close()\n except:\n logging.exception(\"Client.close(%s)\", self)\n\n logging.info(\"Connection Closed\")", "def _close(self):\n \n # Close device\n logger.debug(\"%s: UDP port closing started...\" % \\\n self.__class__.__name__)\n self._router = None\n self._platform = None\n self._udp_socket.close()\n logger.debug(\"%s: ...UDP port closing complete.\" % \\\n self.__class__.__name__)", "def disconnect(self) -> None:\n self.client_socket.close()", "def hup(self):\r\n if self.sock:\r\n self.connected = False\r\n self.sock.shutdown(2)\r\n self.sock.close()", "def close(self):\n logging.debug(\"Closing TCP stream\")\n # Sometimes the socket read might be blocked in the reader thread. Therefore we force the shutdown by closing \n # the socket here\n self._wantExit = True \n if not self.socket is None:\n self.socket.shutdown(socket.SHUT_RDWR)\n self.socket.close()\n StreamInterface.close(self)", "def _clear_io_state(self):\n fd = self._shadow_sock\n if self._shadow_sock.closed:\n fd = self._fd\n self.io_loop.remove_handler(fd)", "def shutdown(self):\n self._send_command('shutdown')\n self.sock.close()\n self.disconnected = True", "def CloseNetwork():\r\n global this_sock\r\n this_sock.close()\r\n return", "def disconnect(self):\n logger.debug(\n \"TCPSPP: Terminate connection to %s:%d\",\n *self.sock.getpeername(),\n )\n self.sock.shutdown(socket.SHUT_RDWR)\n self.sock.close()", "def __close_conn(self, target_host_socket):\n self.__client.close()\n target_host_socket.close()\n logging.info(\"Client <> Target closed\")", "def __exit__(self, *args, **kwargs):\n\n self.sock.close()", "def close(self):\n self._server.shutdown()\n self._server = None", "def close(self):\n with self._recv_lock:\n with self._send_lock:\n self.rbuf = b''\n self.rbuf_unconsumed = self.rbuf\n self.sbuf[:] = []\n self.sock.close()\n return", "def shutdown(self):\r\n # First call superclass shutdown()\r\n HTTPServer.shutdown(self)\r\n\r\n # We also need to manually close the socket\r\n self.socket.close()", "def terminate(self):\n LOGGER.debug('[%s]: Remote side hung-up. Terminating ...', self.name)\n try:\n self._secure_sock.shutdown(socket.SHUT_RDWR)\n self._secure_sock.close()\n self._epoll.unregister(self._sock_fd)\n self._epoll.close()\n except OSError as os_err:\n LOGGER.error(str(os_err))", "def close(self) :\n if self.ssh is not None :\n self.ssh.close()\n self.ssh = None\n self.connected = False", "def close(self):\n if not self.is_open:\n raise GrblHostError(\"not open!\")\n self.ser.close()\n self.ser = None\n self.is_open = False\n self.grbl_version = None", "def close(self):\n if self._closed:\n return\n\n self.listening = False\n\n self.ssl_enabled = False\n\n if self._slave:\n self._slave.close()\n\n self._safely_call(self.on_close)\n\n self._remote_address = None\n self._local_address = None\n\n _Channel.close(self)", "def SCPI_sock_close(session):\n \n session.close()", "def close(self):\n if self._conn:\n logger.info(\"close connection.\")\n self._conn.unbind_s()", "def close(self):\n self.socket.close()\n return True", "def shutdown(self):\n # First call superclass shutdown()\n HTTPServer.shutdown(self)\n\n # We also need to manually close the socket\n self.socket.close()", "def shutdown(self):\n self.channel.close()\n self.conn.close()", "def stop(self):\n\n net_tuple = self.read_nodestate(0)\n\n # 1. Kill localhost client\n try:\n localhost_socket = self.lookup_socket(\"127.0.0.1\")\n localhost_connection = (localhost_socket, \"127.0.0.1\")\n self.send(localhost_connection, \"stop\")\n\n except ConnectionRefusedError:\n pass # Localhost is already disconnected\n\n log_msg = \"Attempting to gracefully disconnect and disassociate from all clients...\"\n Primitives.log(log_msg, in_log_level=\"Info\")\n\n # 2. Disconnect from all clients\n for connection in net_tuple:\n log_msg = str(\"Trying to disconnect from socket: \" + str(connection[0]))\n Primitives.log(log_msg, in_log_level=\"Debug\")\n\n try:\n self.disconnect(connection, disallow_local_disconnect=True)\n\n except OSError:\n another_log_msg = str(\"Failed to disconnect from socket: \"+str(connection[0]))\n Primitives.log(another_log_msg, in_log_level=\"Warning\")\n\n finally:\n Primitives.log(\"Successfully disconnected\", in_log_level=\"Debug\")\n\n # Forcefully close localhost socket\n localhost_sock_name = localhost.getsockname()\n localhost.close()\n\n Primitives.log(\"Exiting gracefully;\", in_log_level=\"Info\")\n\n # 3. Kill the network injector and terminate the Server.\n\n self.write_nodestate(nodeState, 2, True) # set terminated=True\n self.write_nodestate(nodeState, 4, True) # set injector_terminated = True\n\n # Hack the socket.listen() loop in the init() function by connecting to it(localhost),\n # which will force it to terminate.\n\n temp = socket.socket()\n temp.connect(localhost_sock_name) # This will kill the localhost socket\n temp.close()\n\n # noinspection PyProtectedMember\n os._exit(0)", "def shutdown(self):\n\n self.active = False\n\n try:\n self.listen_socket.shutdown(socket.SHUT_RDWR)\n except:\n self.logger.info(\"Ignoring listen soc shutdown error\")\n self.listen_socket = None\n\n with self.connect_cv:\n self.connect_cv.notifyAll()\n\n self.wakeup()\n self.dbg_state = \"down\"", "async def close(self):\n\n if self.closed:\n return\n\n await super().close()\n\n if self.socket:\n await self.socket.close()\n\n await self._session.close()", "def closeSocket(self,socket,reason):\n try:\n socket.send(\"{0};{1}\".format(int(MessageType.DISCONNECT),reason))\n except:\n print(\"can't send close message to socket, was already closed.\")\n finally:\n try:\n socket.close()\n except:\n print(\"Can't close socket, was already closed.\")\n finally:\n try:\n if socket in self.connections:\n self.connections.remove(socket)\n except Exception,e:\n print(\"Socket was already removed from connections.\")" ]
[ "0.75493366", "0.747122", "0.747122", "0.747122", "0.747122", "0.747122", "0.747122", "0.7457175", "0.7457175", "0.7422063", "0.7360431", "0.73594326", "0.7347399", "0.7302408", "0.72775865", "0.7206107", "0.71936274", "0.7185615", "0.7109103", "0.7105997", "0.70846534", "0.7084252", "0.70735955", "0.7051045", "0.7041725", "0.70320594", "0.70225793", "0.69987553", "0.69823325", "0.6977687", "0.69769764", "0.6974536", "0.69741905", "0.69633645", "0.6939721", "0.69221175", "0.68820924", "0.68721056", "0.6859904", "0.6857897", "0.6854656", "0.68461514", "0.681359", "0.68048716", "0.67988336", "0.6740262", "0.66960853", "0.6647649", "0.66075766", "0.6585808", "0.65814537", "0.65772176", "0.6553259", "0.6551345", "0.653514", "0.6530941", "0.6506054", "0.6506", "0.6503916", "0.64951944", "0.6477804", "0.64770794", "0.64737535", "0.64703906", "0.6446699", "0.6436819", "0.642475", "0.64172375", "0.64101917", "0.6396763", "0.636998", "0.6347863", "0.63370097", "0.63360184", "0.63167536", "0.6313511", "0.6294188", "0.62867624", "0.62804663", "0.6277701", "0.62676543", "0.6253437", "0.6252681", "0.624307", "0.6232463", "0.6214329", "0.6209856", "0.6208341", "0.6207214", "0.62005043", "0.62004", "0.619731", "0.619674", "0.6186814", "0.6160212", "0.6105107", "0.6102154", "0.6095625", "0.60938543", "0.6091143" ]
0.779209
0
Try connecting to the (host, port) tuples specified at construction time.
def attempt_connection(self): self.connection_error = False sleep_exp = 1 connect_count = 0 while self.running and self.socket is None and connect_count < self.__reconnect_attempts_max: for host_and_port in self.__hosts_and_ports: try: log.info("Attempting connection to websocket %s", host_and_port) self.socket = websocket.WebSocket() proto, host, port, path = host_and_port[3], host_and_port[0], host_and_port[1], host_and_port[2] if port: ws_uri = '{}://{}:{}/{}'.format(proto, host, port, path) else: ws_uri = '{}://{}/{}'.format(proto, host, path) self.socket.connect(ws_uri, timeout=self.__timeout) self.current_host_and_port = host_and_port log.info("Established connection to %s", ws_uri) break except WebSocketException: self.socket = None connect_count += 1 log.warning("Could not connect to host %s, port %s", host_and_port[0], host_and_port[1], exc_info=1) if self.socket is None: sleep_duration = (min(self.__reconnect_sleep_max, ((self.__reconnect_sleep_initial / (1.0 + self.__reconnect_sleep_increase)) * math.pow(1.0 + self.__reconnect_sleep_increase, sleep_exp))) * (1.0 + random.random() * self.__reconnect_sleep_jitter)) sleep_end = monotonic() + sleep_duration log.debug("Sleeping for %.1f seconds before attempting reconnect", sleep_duration) while self.running and monotonic() < sleep_end: time.sleep(0.2) if sleep_duration < self.__reconnect_sleep_max: sleep_exp += 1 if not self.socket: raise exception.ConnectFailedException()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect(self, host, port):\n pass", "def connect(self):\n \n try:\n self.__sock.connect((self.__host, self.__port))\n\n except socket.error,e:\n print 'Oops, unable to connect. Try again!',e\n sys.exit(1)", "async def _connect(self, host_loc):\n parsed_hostloc = urlparse(host_loc)\n scheme, host, path, parameters, query, fragment = parsed_hostloc\n if parameters or query or fragment:\n raise TypeError(\n \"Supplied info beyond scheme, host.\"\n + \" Host should be top level only: \",\n path,\n )\n\n host, port = get_netloc_port(parsed_hostloc)\n if scheme == \"http\":\n return await self._open_connection_http((host, int(port))), port\n else:\n return await self._open_connection_https((host, int(port))), port", "def connect(self):\n try:\n self.sock.connect((self.hostname, self.port))\n print 'connected to ' + self.hostname\n except socket.gaierror as e:\n print(\"Recieved error when connecting to \" + str((self.hostname, self.port)))\n raise e", "def connect(self, connection_host, connection_port):\n self.connection.connect((connection_host, connection_port))", "def __init__(self, host=HOST, port=PORT):\r\n self._socket = None\r\n\r\n if host is not None:\r\n self.connect(host, port)", "def make_connection( hostname, port = 4663 ):\n \tconnection = socket.socket();", "def init_tcp_conn(target: str, port: int) -> socket.socket:\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.settimeout(5)\n try:\n conn.connect((target, port))\n return conn\n except socket.timeout as e:\n print(e)\n return None", "def connect(self, host=None, port=None):\n host = self.host if host is None else host\n port = self.port if port is None else port\n self.socket.connect(host, port)", "def _connect_to_target(self, host):\n port = 80\n if ':' in host:\n host, _, port = host.partition(':')\n (socket_family, _, _, _, address) = socket.getaddrinfo(host, port)[0]\n self.target = socket.socket(socket_family)\n self.target.connect(address)", "def connect(self) -> None:\n self.s.connect((self.ip, self.port))", "def connect(self, host, port):\n\n self.connect_count = self.RETRY_COUNT\n timeout = None if self.debug_mode else FvpConnector.MAX_IDLE_TIME\n\n while not self.has_connect_timed_out():\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.settimeout(timeout)\n self.sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)\n self.sock.connect((host, port))\n return\n except ConnectionRefusedError:\n time.sleep(FvpConnector.RETRY_PERIOD)\n\n raise Exception(\"Failed to connect to FVP\")", "def _connect(self):\n\n if self.connecting:\n rlog(10, self.name, 'already connecting')\n raise AlreadyConnecting()\n\n if self.connected:\n rlog(10, self.name, 'already connected')\n raise AlreadyConnected()\n\n self.stopped = 0\n self.connecting = True\n self.connectok.clear()\n self.connectlock.acquire()\n\n # create socket\n if self.ipv6:\n rlog(10, self.name, 'creating ipv6 socket')\n self.oldsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n self.ipv6 = 1\n else:\n rlog(10, self.name, 'creating ipv4 socket')\n self.oldsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n assert(self.oldsock)\n\n # optional bind\n server = self.server\n elite = self.cfg['bindhost'] or config['bindhost']\n if elite:\n try:\n self.oldsock.bind((elite, 0))\n except socket.gaierror:\n rlog(10, self.name, \"can't bind to %s\" % elite)\n # resolve the IRC server and pick a random server\n if not server:\n # valid IPv6 ip?\n try: socket.inet_pton(socket.AF_INET6, self.server)\n except socket.error: pass\n else: server = self.server\n if not server: \n # valid IPv4 ip?\n try: socket.inet_pton(socket.AF_INET, self.server)\n except socket.error: pass\n else: server = self.server\n if not server:\n # valid hostname?\n ips = []\n try:\n for item in socket.getaddrinfo(self.server, None):\n if item[0] in [socket.AF_INET, socket.AF_INET6] and item[1] == socket.SOCK_STREAM:\n ip = item[4][0]\n if ip not in ips: ips.append(ip)\n except socket.error: pass\n else: server = random.choice(ips)\n\n # do the connect .. set timeout to 30 sec upon connecting\n rlog(10, self.name, 'connecting to %s (%s)' % (server, self.server))\n self.oldsock.settimeout(5)\n self.oldsock.connect((server, int(self.port)))\n\n # we are connected\n rlog(10, self.name, 'connection ok')\n time.sleep(1)\n self.connected = True\n\n # make file socket\n self.fsock = self.oldsock.makefile(\"r\")\n\n # set blocking\n self.oldsock.setblocking(self.blocking)\n self.fsock._sock.setblocking(self.blocking)\n\n # set socket time out\n if self.blocking:\n socktimeout = self.cfg['socktimeout']\n if not socktimeout:\n socktimeout = 301.0\n else:\n socktimeout = float(socktimeout)\n self.oldsock.settimeout(socktimeout)\n self.fsock._sock.settimeout(socktimeout)\n # enable ssl if set\n if self.ssl:\n rlog(10, self.name, 'ssl enabled')\n self.sock = socket.ssl(self.oldsock) \n else:\n self.sock = self.oldsock\n\n # try to release the outputlock\n try:\n self.outputlock.release()\n except thread.error:\n pass\n\n # start input and output loops\n start_new_thread(self._readloop, ())\n start_new_thread(self._outloop, ())\n\n # logon and start monitor\n self._logon()\n self.nickchanged = 0\n self.reconnectcount = 0\n saymonitor.start()\n return 1", "def connect_to_server(self):\n\n try:\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((self.hostname, self.port))\n return client\n except Exception as e:\n print(\"Can't connect to server: \", e)\n sys.exit()", "def tryconnect(name, port):\n return port_talker.TCPTalk(name, port, 2, '', None, 0, 1) # use ext. resolver", "def connect(self, host:str, port:int, userid:str=None, password:str=None, startup:str=None, \n highAvailability:bool=False, highAvailabilitySites:Optional[List[str]]=None, keepAliveTime:Optional[int]=None, reconnect:bool=False) -> bool:\n if highAvailabilitySites is None:\n highAvailabilitySites = []\n if keepAliveTime is None:\n keepAliveTime = -1\n if userid is None:\n userid = \"\"\n if password is None:\n password = \"\"\n if startup is None:\n startup = \"\"\n if self.cpp.connect(host, port, userid, password, startup, highAvailability, highAvailabilitySites, keepAliveTime, reconnect):\n self.host = host\n self.port = port\n self.userid = userid\n self.password = password\n return True\n else:\n return False", "def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((self.host, PORT)) # probably throws errors\n self.connected = True", "def __init__(self):\n self.try_to_connect()", "def createConnection(addr):\r\n\r\n # cast port number to integer\r\n addr = (addr[0],int(addr[1]))\r\n\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.settimeout(5)\r\n try:\r\n s.connect(addr)\r\n except (socket.timeout, ConnectionRefusedError):\r\n return None\r\n return s", "def connect(self, host):\n return False", "def test_connect(self):\n port = socket_any_family()\n port.bind((\"\", 0))\n port.listen(3)\n\n clientSSL = Connection(Context(SSLv23_METHOD), socket(port.family))\n clientSSL.connect((loopback_address(port), port.getsockname()[1]))\n # XXX An assertion? Or something?", "def connect_socket(self):\n try:\n self.socket.connect((self.request.host, int(self.request.port)))\n except socket.gaierror:\n raise socket.gaierror(\"Socket connection could not be established\")\n except socket.timeout:\n raise socket.timeout(\"Socket connection timed out\")\n except InterruptedError:\n raise InterruptedError(\"Socket connection has been interrupted by a signal\")", "def pcp_connect(self, hostname, port, username, password):\n\n\t\tfd = 0\n\t\tif hostname == None or hostname == '' or hostname.startswith('/'):\n\t\t\tif sys.platform == 'win32':\n\t\t\t\tself.pcp_internal_error(f'ERROR: hostname not provided')\n\t\t\t\tself.connState = ConnStateType.BAD\n\t\t\t\treturn\n\n\t\t\ttry:\n\t\t\t\tfd = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n\t\t\texcept Exception as e: \n\t\t\t\tself.pcp_internal_error(f'ERROR: failed to create UNIX domain socket. socket error \"{e}\"')\n\t\t\t\tself.connState = ConnStateType.BAD\n\t\t\t\treturn\n\n\t\t\tpath = None\n\t\t\tif hostname == None or hostname == '':\n\t\t\t\tpath = UNIX_DOMAIN_PATH\n\t\t\t\thostname = path\n\t\t\telse:\n\t\t\t\tpath = hostname\n\n\t\t\tunix_addr = os.path.join(path, f'.s.PGSQL.{port}')\n\n\t\t\ttry:\n\t\t\t\tfd.connect(unix_addr)\n\t\t\texcept Exception as e:\n\t\t\t\tfd.close()\n\t\t\t\tself.pcp_internal_error(f'ERROR: connection to socket \"{unix_addr}\" failed with error \"{e}\"')\n\t\t\t\tself.connState = ConnStateType.BAD\n\t\t\t\treturn\n\t\telse:\n\t\t\ttry:\n\t\t\t\tfd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t\texcept Exception as e:\n\t\t\t\tself.pcp_internal_error(f'ERROR: failed to create INET domain socket with error \"{e}\"')\n\t\t\t\tself.connState = ConnStateType.BAD\n\t\t\t\treturn\n\n\t\t\ttry:\n\t\t\t\tfd.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n\t\t\texcept Exception as e:\n\t\t\t\tfd.close()\n\t\t\t\tself.pcp_internal_error(f'ERROR: set socket option failed with error \"{e}\"')\n\t\t\t\tself.connState = ConnStateType.BAD\n\t\t\t\treturn\n\t\t\t\n\t\t\ttry:\n\t\t\t\tsocket.gethostbyname(hostname)\n\t\t\texcept socket.gaierror as e:\n\t\t\t\tfd.close()\n\t\t\t\tself.pcp_internal_error(f'ERROR: could not retrieve hostname. gethostbyname failed with error \"{e}\"')\n\t\t\t\tself.connState = ConnStateType.BAD\n\t\t\t\treturn\n\n\t\t\ttry:\n\t\t\t\tfd.connect((hostname, port))\n\t\t\texcept OSError as e:\n\t\t\t\tfd.close()\n\t\t\t\tself.pcp_internal_error(f'ERROR: connection to host \"{hostname}\" failed with error \"{e}\"')\n\t\t\t\tself.connState = ConnStateType.BAD\n\t\t\t\treturn\n\t\t\t\n\t\tself.pcpConn.pcp_open(fd)\n\t\tif self.pcpConn == None:\n\t\t\tfd.close()\n\t\t\tself.pcp_internal_error('ERROR: failed to allocate memory')\n\t\t\tself.connState = ConnStateType.BAD\n\t\t\treturn\n\t\t\n\t\tself.connState = ConnStateType.CONNECTED\n\n\t\t#\n\t\t# If username is not provided. Use the os user name and do not complain\n\t\t# if it (getting os user name) gets failed\n\t\t#\n\t\tif username == None:\n\t\t\tusername = getpass.getuser()\n\n\t\t#\n\t\t# If password is not provided. lookup in pcppass file\n\t\t#\n\t\tif password == None or password == '':\n\t\t\tpassword = self._PasswordFromFile(hostname, str(port), username)\n\t\t\n\t\tif self._pcp_authorize(username, password) < 0:\n\t\t\tself.pcpConn.pcp_close()\n\t\t\tself.pcpConn = None\n\t\t\tself.connState = ConnStateType.AUTH_ERROR\n\t\telse:\n\t\t\tself.connState = ConnStateType.OK", "def connect(self):\n sock = socket.create_connection((self.host, self.port))\n try:\n self.sock = ssl.wrap_socket(sock, keyfile=self.key_file,\n certfile=self.cert_file,\n cert_reqs=self.cert_reqs,\n ca_certs=self.ca_certs)\n except ssl.SSLError, e:\n raise Error('Error validating SSL certificate for \"' + self.host +\n '\": ' + str(e))\n\n if self.cert_reqs == ssl.CERT_REQUIRED:\n self._VerifyHostName(self.host, self.sock.getpeercert())", "def _connect(self):\n try:\n #print(\"try to connect _connect\")\n sock = gevent.socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(self.remote_address)\n except socket.error as error:\n logger.warning(\"Couldn't connect to %s: %s.\",\n self._repr_remote(), error)\n else:\n self.initialize(sock, self.remote_service_coord)", "def connect(self, host, port=6667):\n\t\tprint(host)\n\t\tprint(port)\n\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n\t\tself.s = ssl.wrap_socket(sock)\n\t\tself.s.connect((host, port))", "def _connect(self):\n hostport = self.getHost()\n channelOpenData = forwarding.packOpen_direct_tcpip((self.host, self.port), (hostport.host, hostport.port))\n self.connector.connection.openChannel(self, channelOpenData)", "def connection_init(self, port, ip):\n\t\t# Инициализация сокета и сообщение серверу о нашем появлении\n\t\tself.transport = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t\n\t\t# Таймаут необходим для освобождения сокета.\n\t\tself.transport.settimeout(5)\n\t\t\n\t\t# Соединяемся, 5 попыток соединения, флаг успеха ставим в True если\n\t\t# удалось\n\t\tconnected = False\n\t\tfor i in range(5):\n\t\t\tclient_log.info(f'Попытка подключения №{i + 1}')\n\t\t\ttry:\n\t\t\t\tself.transport.connect((ip, port))\n\t\t\texcept (OSError, ConnectionRefusedError):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tconnected = True\n\t\t\t\tclient_log.debug(\"Connection established.\")\n\t\t\t\tbreak\n\t\t\ttime.sleep(1)\n\t\t\n\t\t# Если соединится не удалось - исключение\n\t\tif not connected:\n\t\t\tclient_log.critical('Не удалось установить соединение с сервером')\n\t\t\traise ServerError('Не удалось установить соединение с сервером')\n\t\t\n\t\tclient_log.debug('Starting auth dialog.')\n\t\t\n\t\t# Запускаем процедуру авторизации\n\t\t# Получаем хэш пароля\n\t\tpasswd_bytes = self.password.encode('utf-8')\n\t\tsalt = self.username.lower().encode('utf-8')\n\t\tpasswd_hash = hashlib.pbkdf2_hmac('sha512', passwd_bytes, salt, 10000)\n\t\tpasswd_hash_string = binascii.hexlify(passwd_hash)\n\t\t\n\t\tclient_log.debug(f'Passwd hash ready: {passwd_hash_string}')\n\t\t\n\t\t# Получаем публичный ключ и декодируем его из байтов\n\t\tpubkey = self.keys.publickey().export_key().decode('ascii')\n\t\t\n\t\t# Авторизируемся на сервере\n\t\twith socket_lock:\n\t\t\tpresense = {\n\t\t\t\tACTION: PRESENCE,\n\t\t\t\tTIME: time.time(),\n\t\t\t\tUSER: {\n\t\t\t\t\tACCOUNT_NAME: self.username,\n\t\t\t\t\tPUBLIC_KEY: pubkey\n\t\t\t\t}\n\t\t\t}\n\t\t\tclient_log.debug(f\"Presense message = {presense}\")\n\t\t\t# Отправляем серверу приветственное сообщение.\n\t\t\ttry:\n\t\t\t\tsend_message(self.transport, presense)\n\t\t\t\tans = get_message(self.transport)\n\t\t\t\tclient_log.debug(f'Server response = {ans}.')\n\t\t\t\t# Если сервер вернул ошибку, бросаем исключение.\n\t\t\t\tif RESPONSE in ans:\n\t\t\t\t\tif ans[RESPONSE] == 400:\n\t\t\t\t\t\traise ServerError(ans[ERROR])\n\t\t\t\t\telif ans[RESPONSE] == 511:\n\t\t\t\t\t\t# Если всё нормально, то продолжаем процедуру\n\t\t\t\t\t\t# авторизации.\n\t\t\t\t\t\tans_data = ans[DATA]\n\t\t\t\t\t\thash = hmac.new(passwd_hash_string, ans_data.encode('utf-8'), 'MD5')\n\t\t\t\t\t\tdigest = hash.digest()\n\t\t\t\t\t\tmy_ans = RESPONSE_511\n\t\t\t\t\t\tmy_ans[DATA] = binascii.b2a_base64(\n\t\t\t\t\t\t\tdigest).decode('ascii')\n\t\t\t\t\t\tsend_message(self.transport, my_ans)\n\t\t\t\t\t\tself.process_server_ans(get_message(self.transport))\n\t\t\texcept (OSError, json.JSONDecodeError) as err:\n\t\t\t\tclient_log.debug(f'Connection error.', exc_info=err)\n\t\t\t\traise ServerError('Сбой соединения в процессе авторизации.')", "def connect(host, port = DEFAULT_SERVER_PORT):\n return factory.connect(host, port, SlaveService)", "def __init__(self, host, user, password, port=22):\n self.host = host\n self.user = user\n self.port = port\n self.password = password", "def connect_from_env(self, host, port):\n password = None\n infos = host.split('@')\n if len(infos) == 2:\n password = infos[0]\n host = infos[1]\n\n self.connect(host, port)\n if password:\n self.password(password)", "def __init__(self, sock, addr, **kws):\r\n super(Connect, self).__init__(sock, **kws)\r\n self.addr = addr\r\n self.connect_attempted = False", "def _create_connection(self):\r\n if not self._hosts:\r\n raise CQLConnectionError(\"At least one host required\")\r\n\r\n hosts = copy(self._hosts)\r\n random.shuffle(hosts)\r\n\r\n for host in hosts:\r\n try:\r\n transport = self._create_transport(host)\r\n new_conn = cql.connect(\r\n host.name,\r\n host.port,\r\n user=self._username,\r\n password=self._password,\r\n consistency_level=self._consistency,\r\n transport=transport\r\n )\r\n new_conn.set_cql_version('3.0.0')\r\n return new_conn\r\n except Exception as exc:\r\n logging.debug(\"Could not establish connection to\"\r\n \" {}:{} ({!r})\".format(host.name, host.port, exc))\r\n\r\n raise CQLConnectionError(\"Could not connect to any server in cluster\")", "def connect(self):\n self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind(('',9013))\n for addr in XANT_ADDRS:\n tries = 0\n maxtries = 2\n corelog.debug(\"Trying to connect to xant at %s\" % (str(addr)))\n while tries < maxtries:\n tries += 1\n try:\n self.sock.connect(addr)\n break\n except Exception, e:\n corelog.debug(\"Failed on %d try\\n%s\" % (tries,str(e)))\n if tries < maxtries:\n corelog.info(\"Succeeded in connecting to Xant at %s after %d tries\" % (str(addr),tries))\n self.sock.setblocking(False)\n self._connected = True\n self.addr = addr\n return addr\n corelog.warning(\"Failed to connect to Xant\")\n self.addr = None\n return None", "def connect(self):\n try:\n self.ssh = paramiko.SSHClient()\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n credentials = self.auth.to_dict()\n credentials.update({'hostname': self.host})\n self.ssh.connect(**credentials)\n self.conn_status = self.CONN_OPEN\n except paramiko.AuthenticationException:\n self.conn_status = self.CONN_FAILED\n except paramiko.ssh_exception.SSHException:\n if self.loop_counter < 3:\n time.sleep(2)\n self.loop_counter += 1\n self.connect()", "def establish_connection(self):\n conninfo = self.client\n for name, default_value in items(self.default_connection_params):\n if not getattr(conninfo, name, None):\n setattr(conninfo, name, default_value)\n if conninfo.hostname == 'localhost':\n conninfo.hostname = '127.0.0.1'\n conn = self.Connection(host=conninfo.host,\n userid=conninfo.userid,\n password=conninfo.password,\n login_method=conninfo.login_method,\n virtual_host=conninfo.virtual_host,\n insist=conninfo.insist,\n ssl=conninfo.ssl,\n connect_timeout=conninfo.connect_timeout)\n conn.client = self.client\n return conn", "def __init__(self, host, port):\n self.host = host\n self.port = port", "def __init__(self, host, port):\n self.host = host\n self.port = port", "def connect(self, host, port):\n\t\tif self.is_server:\n\t\t\traise socket.error(\"\"\"A server socket was used in place of a client\n\t\t\t\t\t\t\t socket for connecting\"\"\")\n\n\t\tself.socket.connect((host, port))\n\t\tself.socket_connected = True", "def __init__(self, host, server_port):\n\n # Set up the socket connection to the server\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n # TODO: Finish init process with necessary code\n self.host = host\n self.server_port = server_port\n self.run()", "def init_connexion():\n connexion = socket(AF_INET, SOCK_STREAM)\n connexion.bind((hote, port))\n\n return connexion", "def connect(self) -> None:\n self.terminate()\n self._new_client().connect(\n hostname=self.ip,\n port=self.port,\n username=self.username,\n password=self.password,\n look_for_keys=False,\n allow_agent=False)", "def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n if self.print_send:\n print(' - connecting...')\n self.socket.settimeout(1)\n self.socket.connect(self.host_port)\n if self.print_send:\n print(' - connected')\n except socket.timeout:\n raise Timeout('Timeout connecting to projector')\n except Exception as err:\n raise Error('Connection failed', err)\n self.expect(b'PJ_OK')\n self.send(b'PJREQ')\n self.expect(b'PJACK')", "def connect(self):\n self.client.connect(self.host, self.port)\n self.client.loop_forever()", "def can_connect_to(host: str, port: int) -> bool:\n\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n return bool(sock.connect_ex((host, port)) == 0)", "async def connect(self):\n\n # Display info message\n log.info(\"connect\")\n\n try:\n\n # SSH?\n if self._protocol == \"ssh\":\n\n # Yes\n\n # Then Connect using SSH\n await self.connectSSH()\n\n # Telnet?\n elif self._protocol == \"telnet\":\n\n # Yes\n\n # Then Connect using Telnet\n await self.connectTelnet()\n\n else:\n\n # Unsupported protocol\n\n # Raise an exception\n raise Exception(f\"connect: unsupported protocol: {self._protocol}\")\n\n except Exception:\n\n # There was a problem with a connection method\n\n # Display info message\n log.info(\"connect: connection error\")\n\n raise", "def connect():", "def _connect_socket(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((self.ip, self.port))\n print(\"Connected to %s at port %d\" % (self.ip, self.port))", "def _connect(self, conn):\n conn_options = {\n 'quiet': self.quiet,\n 'src_name': \"master\",\n 'dest_name': None,\n 'version': \"5.0.0\",\n 'unique': True,\n 'verbose': self.verbose,\n }\n\n certs_paths = {}\n if 'ssl_ca' in dir(conn) and conn.ssl_ca is not None:\n certs_paths['ssl_ca'] = conn.ssl_ca\n if 'ssl_cert' in dir(conn) and conn.ssl_cert is not None:\n certs_paths['ssl_cert'] = conn.ssl_cert\n if 'ssl_key' in dir(conn) and conn.ssl_key is not None:\n certs_paths['ssl_key'] = conn.ssl_key\n\n conn_options.update(certs_paths)\n\n master_info = \"%s:%s\" % (conn['host'],\n conn['port'])\n master = None\n\n # Clear socket if used with a local server\n if (conn['host'] == 'localhost' or conn['host'] == \"127.0.0.1\" or\n conn['host'] == \"::1\" or conn['host'] == \"[::1]\"):\n conn['unix_socket'] = None\n\n # Increment num_retries if not set when --prompt is used\n if self.prompt_user and self.num_retries == 0:\n self.num_retries += 1\n\n # Attempt to connect to the server given the retry limit\n for i in range(0, self.num_retries + 1):\n try:\n servers = connect_servers(conn, None, conn_options)\n master = servers[0]\n break\n except UtilError, e:\n print \"FAILED.\\n\"\n if i < self.num_retries and self.prompt_user:\n print \"Connection to %s has failed.\\n\" % master_info + \\\n \"Please enter the following information \" + \\\n \"to connect to this server.\"\n conn['user'] = raw_input(\"User name: \")\n conn['passwd'] = getpass.getpass(\"Password: \")\n else:\n # retries expired - re-raise error if still failing\n raise UtilError(e.errmsg)\n\n return (master, master_info)", "def create_connection(host, port, local_machine, LOGGER):\n\n try:\n general_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n general_socket.settimeout(100)\n\n if local_machine == \"client\":\n general_socket.connect((host, port))\n elif local_machine == \"server\":\n general_socket.bind((host, port))\n general_socket.listen(5)\n except socket.error as soe:\n LOGGER.info(soe)\n sys.exit(1)\n except Exception as exp:\n LOGGER.unknown_error(exp)\n sys.exit(1)\n else:\n if local_machine == \"client\":\n LOGGER.info(f\"Successfully Connected To [{host}:{port}]\")\n elif local_machine == \"server\":\n LOGGER.info(\"Booting Server [...]\")\n LOGGER.info(\"Server Online!\")\n\n return general_socket", "def init_connection(srv_ip, srv_port):\n svrsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n srvaddr = (srv_ip, srv_port)\n svrsock.bind(srvaddr)\n print('Laptop IP:', srv_ip)\n print('Laptop Port:', srv_port)\n svrsock.listen(1)\n print('waiting to be connected...')\n clnsock, clnaddr = svrsock.accept()\n print('\\nconnected!\\n')\n print('IOS IP:', clnaddr[0])\n print('IOS PORT:', clnaddr[1])\n svrsock.settimeout(0)\n clnsock.settimeout(0)\n return svrsock, clnsock, clnaddr", "def connect(self):\n if not self._socket:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((self.host, self.port))\n self._socket.settimeout(0.0)", "def connect(self, host, port):\n if self._connectedTo is not None:\n raise ValueError(\"Already connected\")\n self._connectedTo = (host, port)", "def connect(self):\n self.socket.connect((\"localhost\",self.PORT_NUM))", "def connect(self, host, port, uri, timeout):\n _abstract()", "def connect(self, host, port, uri, timeout):\n _abstract()", "def connect(self,ip,port):\n return self.network.connect(ip,port)", "def _try_connect(_host, _port, _client_id):\n try:\n conn = OpenRGBClient(_host, _port, name=_client_id)\n conn.comms.stop_connection()\n except OSError as exc:\n raise CannotConnect from exc\n\n return True", "def connect(self, port=None, options=None):\n pass", "def connect(self,ip,port):\n import time\n import socket\n\n try:\n self.socket_reference.connect((ip, port))\n except socket.error:\n self.close()\n reload(socket)\n raise CommClientException(\"Cannot connect to \" + ip + \":\" + str(port))", "def _try_connect(self):\n try:\n return mysql.connect(\n host=self.host,\n database=self.database,\n user=self.user,\n passwd=self.password\n )\n except Error as e:\n raise ConnectionError(f\"Could not connect to {self.connection_string}\") from e", "def _connect(self):\n\n # Get the timeout\n m_timeout = OMPv4.TIMEOUT\n if self.__timeout:\n m_timeout = self.__timeout\n\n # Connect to the server\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(m_timeout)\n try:\n sock.connect((self.__host, int(self.__port)))\n except socket.error, e:\n raise ServerError(str(e))\n self.socket = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLSv1)\n\n # Authenticate to the server\n self._authenticate(self.__username, self.__password)", "def connect(self):\n self.ipv4 = socket.gethostbyname(socket.gethostname())\n self.addr = (self.ipv4, HttpServer.PORT)\n self.server.bind(self.addr)\n print(\"[SETUP] server bound to IPv4 address\", self.ipv4, \"on port\", HttpServer.PORT)\n self.server.listen()\n print(\"[SETUP] server listening for connections\")", "def connect(self):\n try:\n self.sock = socket.create_connection((self.host, self.port), self.connect_timeout)\n except SocketTimeout:\n raise InnerConnectionTimeoutError()\n\n if self.timeout is socket._GLOBAL_DEFAULT_TIMEOUT:\n self.sock.settimeout(socket.getdefaulttimeout())\n else:\n self.sock.settimeout(self.timeout)", "def connect(hostname, port, connections_result_passed, connections_result_failed):\n a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n location = (hostname, int(port))\n result_of_check = a_socket.connect_ex(location)\n a_socket.close()\n if result_of_check == 0:\n print(\"Connectivity for \" + hostname + \":\" + str(port) + \" is Successful!!\")\n result = next((True for connections_result_passed in connections_result_passed if hostname in connections_result_passed), False)\n if result:\n result_hostname = [connections_result_passed for connections_result_passed in connections_result_passed\n if hostname in connections_result_passed]\n index = connections_result_passed.index(''.join(result_hostname))\n connections_result_passed.append(connections_result_passed[index] + ',' + str(port))\n connections_result_passed.pop(index)\n else:\n connections_result_passed.append(\" \" + hostname + \":\" + str(port))\n else:\n print(\"Connectivity for \" + hostname + \":\" + str(port) + \" has Failed!!\")\n result = next((True for connections_result_failed in connections_result_failed if\n hostname in connections_result_failed), False)\n if result:\n result_hostname = [connections_result_failed for connections_result_failed in connections_result_failed\n if hostname in connections_result_failed]\n index = connections_result_failed.index(''.join(result_hostname))\n connections_result_failed.append(connections_result_failed[index] + ',' + str(port))\n connections_result_failed.pop(index)\n else:\n connections_result_failed.append(\" \" + hostname + \":\" + str(port))\n return connections_result_passed, connections_result_failed", "def tcp_socket_open(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(1)\n try:\n return sock.connect_ex((host, port)) == 0\n except socket.timeout:\n return False", "def connect(self, num_retry_attempts=1):\n pass", "def __init__(self, server_addr, server_port, local_port):\n\n if local_port is None:\n self.local_addr = ('localhost', 7700) \n else:\n self.local_addr = ('localhost', local_port)\n self.server_socket = (server_addr, server_port)\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.connection.bind(self.local_addr)\n self.message_q = []\n self.failed = False\n \n try:\n self.connection.create_connect(server_port)\n\n except:\n sys.stderr.write('failed to connect to server \\n')\n self.failed = True\n self.connection.close()\n return None", "def connect(self, host, port):\n logging.debug(\"Connecting to %s:%i\", host, port)\n self._hasError = False\n self.tcpsocket = QTcpSocket()\n self.tcpsocket.error.connect(self.processError)\n self.tcpsocket.connected.connect(self._connected)\n self.tcpsocket.connected.connect(lambda: self._stopWaiting.emit())\n self.tcpsocket.readyRead.connect(self.receive)\n\n self.tcpsocket.connectToHost(host, port)\n self.waitForConnection()", "def connect(self):\n auth = self.botconfig.find('auth')\n logging.info(\"Connecting ...\" )\n if not auth.get('server', None):\n # we don't know the server, but the lib can probably figure it out\n super(SleekBot, self).connect()\n else:\n super(SleekBot, self).connect((auth.attrib['server'], auth.get('port', 5222)))", "def __init__(self, host, port):\n self._host = host\n self._port = port", "def __init__(self, host, port):\n\n super(TcpListeningSocket, self).__init__(host, port)\n\n self.socket.bind( (self._host, self._port) )\n self.socket.listen(1)", "def check_host_port(host_ip, port):\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n address_str = \"Host: {} Port: {} \".format(host_ip, port)\n\n try:\n\n s.connect((host_ip, port))\n result = True\n msg = address_str + \"OPEN\"\n\n except socket.error:\n\n result = False\n msg = address_str + \"CLOSED\"\n\n finally:\n\n s.close()\n\n return result, msg", "def _connect(self):\n #print(\"Connecting...\")\n self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable", "def __init__(self, server_addr, server_port):", "def connect(self):\n if isinstance(self._sock, socket.socket):\n return\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.host, self.port))\n except socket.error, e:\n raise ConnectionError(\"Error %s connecting to %s:%s. %s.\" % (e.args[0], self.host, self.port, e.args[1]))\n else:\n self._sock = sock\n self._fp = self._sock.makefile('r')", "def connect(self, params, connect_timeout=_CONNECT_TIMEOUT):\n if connect_timeout is not None:\n connect_timeout = connect_timeout / 1000 # Convert to seconds\n try:\n self._socket = socket.create_connection(params, connect_timeout)\n self._host = params[0]\n except ValueError:\n try:\n self._socket = socket.socket(socket.AF_UNIX)\n self._socket.settimeout(connect_timeout)\n self._socket.connect(params)\n self._is_socket = True\n except AttributeError:\n raise InterfaceError(\"Unix socket unsupported\") from None\n self._socket.settimeout(None)", "def connect(self):\r\n try:\r\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(self.ip))\r\n print (\"connected!\")\r\n except Exception as error:\r\n print (\"connect() - error - {}\".format(error))", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if len(args):\n # args[0] is a connection object\n\n try:\n self._host = args[0].get_phos_host()\n self._port = args[0].get_phos_port()\n except AttributeError:\n # Not a Phos connection object. Too bad.\n pass\n logging.getLogger(\"pyhive.hive\").setLevel(logging.WARN)\n logging.getLogger(\"requests.packages.urllib3.connectionpool\").setLevel(logging.WARN)", "def __init__(self, hostname, username, password, timeout, optional_args):\n raise NotImplementedError", "def _connect(*args):\n return None, None", "def _connect(*args):\n return None, None", "def __init__(self, address=\"lex\", port=8000, **kwargs):\n self.connect(address, port)", "def __init__(self, host, server_port):\n # Set up the socket connection to the server\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.run(host, server_port)\n\n # TODO: Finish init process with necessary code\n #Vegard sier vi ikke skal skrive noe her", "def _connect(self):\n for attempt in range(1, self.num_attempts + 1):\n try:\n conn = self.rabbitmq_context.get_connection(self.timeout)\n chan = conn.channel()\n return (conn, chan)\n except AMQPError as ex:\n if attempt >= self.num_attempts:\n if self.ignore_connection_failure:\n raise ex\n else:\n self.fail(\n \"Could not access RabbitMQ host {0} because {1}\"\n .format(self.rabbitmq_context.host, repr(ex)))\n else:\n time.sleep(self.seconds_between_attempts)", "def _connect(self, **kwargs):\n global _connection\n if self.reuse and _connection:\n self.connection = _connection\n else:\n if pymongo.version_tuple[0] < 3:\n try:\n self.connection = Connection(host=self.host,\n port=self.port, **kwargs)\n # pymongo >= 3.0 does not raise this error\n except PyMongoError:\n if self.fail_silently:\n return\n else:\n raise\n else:\n self.connection = Connection(host=self.host, port=self.port,\n **kwargs)\n try:\n self.connection.is_locked\n except ServerSelectionTimeoutError:\n if self.fail_silently:\n return\n else:\n raise\n _connection = self.connection\n\n self.db = self.connection[self.database_name]\n if self.username is not None and self.password is not None:\n auth_db = self.connection[self.authentication_database_name]\n self.authenticated = auth_db.authenticate(self.username,\n self.password)\n\n if self.capped:\n #\n # We don't want to override the capped collection\n # (and it throws an error anyway)\n try:\n self.collection = Collection(self.db, self.collection_name,\n capped=True, max=self.capped_max,\n size=self.capped_size)\n except OperationFailure:\n # Capped collection exists, so get it.\n self.collection = self.db[self.collection_name]\n else:\n self.collection = self.db[self.collection_name]", "def __init__(self, host='localhost', port=9090, unix_socket=None):\r\n\r\n self.host = host\r\n self.port = port\r\n self.handle = None\r\n self._unix_socket = unix_socket\r\n self._timeout = None", "def connect(self,addr=None,port=None):\n\n self.type = 'connect'\n\n if addr != None:\n self.remote_location = (addr,int(port))\n try:\n s = socket(AF_INET,SOCK_STREAM)\n s.settimeout(1.0)\n s.connect(self.remote_location)\n self.status = 'connected'\n s.settimeout(0.0)\n self.sock = s\n except error as e:\n self.errno = e.errno\n self.status = 'closed'", "async def _connect_routine(self):\n self.event_when_connected = \"connected\"\n\n if self.connect_loop_wait > 0:\n self.event('reconnect_delay', self.connect_loop_wait)\n await asyncio.sleep(self.connect_loop_wait, loop=self.loop)\n\n record = await self.pick_dns_answer(self.default_domain)\n if record is not None:\n host, address, dns_port = record\n port = self.address[1] if self.address[1] else dns_port\n self.address = (address, port)\n self._service_name = host\n else:\n # No DNS records left, stop iterating\n # and try (host, port) as a last resort\n self.dns_answers = None\n\n if self.use_ssl:\n ssl_context = self.get_ssl_context()\n else:\n ssl_context = None\n\n if self._current_connection_attempt is None:\n return\n try:\n await self.loop.create_connection(\n lambda: self, self.address[0], self.address[1], ssl=ssl_context,\n server_hostname=self.default_domain if self.use_ssl else None\n )\n self.connect_loop_wait = 0\n except socket.gaierror:\n self.event('connection_failed', 'No DNS record available for %s' % self.default_domain)\n except OSError as e:\n _LOGGER.debug('Connection failed: %s', e)\n self.event(\"connection_failed\", e)\n if self._current_connection_attempt is None:\n return\n self.connect_loop_wait = self.connect_loop_wait * 2 + 1\n self._current_connection_attempt = asyncio.ensure_future(\n self._connect_routine(), loop=self.loop,\n )", "def connect_to_server(host, port) -> socket.SocketIO:\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = (host, port)\n print('[CLIENT LOG] connecting to {} port {}'.format(host,port)) \n sock.connect(server_address)\n return sock", "def connect(self, host, port, db):\r\n params = self.make_connection_params(host, port, db)\r\n return self.get_connection(params)", "def connect(self) -> None:\n self.client_socket.connect((self.server_name, self.server_port))", "def __init__(self, port, host='', ping_timer=25):\n # TODO get these values from config\n self.host = host\n self.port = port\n self.socket = None\n self.all_connections = []\n self.all_clients = {}\n self.ping_timer_time = ping_timer", "async def connect(self, conn_factory):\n assert False", "def connect(self, addr):\n\t\tif self.client is not None:\n\t\t\tif self.client.did_handshake:\n\t\t\t\traise UsageError(\"Already Connected!\")\n\t\ttry:\n\t\t\tif DEBUG:\n\t\t\t\tprint \"creating Factory...\"\n\t\t\tself.factory = P22PClientFactory(self, addr)\n\t\t\tif self.factory.isSecure and (ssl is not None):\n\t\t\t\tif DEBUG:\n\t\t\t\t\tprint \"\\t->creating SSL-Context...\"\n\t\t\t\tcontext = ssl.ClientContextFactory()\n\t\t\telse:\n\t\t\t\tcontext = None\n\t\t\t# self.reactor.connectTCP(ip,port,self.factory,timeout=10)\n\t\t\tif DEBUG:\n\t\t\t\tprint \"calling connectWS...\"\n\t\t\td = websocket.connectWS(self.factory, context, timeout=10)\n\t\t\tif DEBUG:\n\t\t\t\tprint \"\\t->Result: \", d\n\t\t\ttry:\n\t\t\t\twhile self.client is None:\n\t\t\t\t\t# wait for self.client to be set\n\t\t\t\t\tpass\n\t\t\t\tif DEBUG:\n\t\t\t\t\tprint \"\\t->client set\"\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\t_stop()\n\t\t\t\treturn\n\t\t\tif self.client is False:\n\t\t\t\tself.client = None\n\t\t\t\traise ConnectionError(\"Cant connect to Server!\")\n\t\t\tself.reactor.addSystemEventTrigger(\n\t\t\t\t\"before\", \"shutdown\", self.disconnect\n\t\t\t\t)\n\t\t\ttry:\n\t\t\t\twhile not self.client.did_handshake:\n\t\t\t\t\tpass\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\t_stop()\n\t\t\t\treturn\n\t\texcept:\n\t\t\tself.factory = None\n\t\t\tself.client = None\n\t\t\traise", "def connect(self):\n try:\n sock = socket.create_connection((self.host, self.port), self.connect_timeout)\n except SocketTimeout:\n raise InnerConnectionTimeoutError()\n\n if self.timeout is socket._GLOBAL_DEFAULT_TIMEOUT:\n sock.settimeout(socket.getdefaulttimeout())\n else:\n sock.settimeout(self.timeout)\n\n if self._tunnel_host:\n self.sock = sock\n self._tunnel()\n self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)", "def connect(host, port, service=VoidService, config={}, ipv6=False, keepalive=False):\n s = SocketStream.connect(host, port, ipv6=ipv6, keepalive=keepalive)\n return connect_stream(s, service, config)", "def socket_port(ip, port):\n socket.setdefaulttimeout(3) \n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = s.connect_ex((ip, port))\n if result == 0:\n print(ip, u':', port, u'port is occupied')\n return False\n return True\n except Exception as error:\n print('error:', error)\n return False", "def __init__(self, hostname, debugOut=None, noProto=False, connectNow=True, portNumber=4403):\n\n logging.debug(f\"Connecting to {hostname}\")\n\n server_address = (hostname, portNumber)\n sock = socket.create_connection(server_address)\n\n # Instead of wrapping as a stream, we use the native socket API\n # self.stream = sock.makefile('rw')\n self.stream = None\n self.socket = sock\n\n StreamInterface.__init__(\n self, debugOut=debugOut, noProto=noProto, connectNow=connectNow)", "def waithp(host, port):\n debug(\"waithp({0},{1})\".format(safestr(host), safestr(port)))\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.connect((host, int(port)))\n except: # pylint: disable=bare-except\n a, b, c = sys.exc_info()\n traceback.print_exception(a, b, c)\n sock.close()\n raiseRecoverableError('Server at {0}:{1} is not ready'.format(safestr(host), safestr(port)))\n sock.close()", "def _connect_to_peer(self, port, ip_address):\n getLogger(__name__).info(\"Attempting to connect to peer {}:{}...\"\n .format(ip_address, port))\n conn = self._create_new_socket()\n connected = False\n\n for i in range(self.CONNECT_ATTEMPTS):\n try:\n conn.connect((ip_address, port))\n connected = True\n break\n except (ConnectionRefusedError, OSError):\n getLogger(__name__).info(\"Attempt {}/{} failed\"\n .format(i + 1, self.CONNECT_ATTEMPTS))\n if i < self.CONNECT_ATTEMPTS:\n sleep(i + 1)\n\n if connected:\n self._set_socket(conn)\n getLogger(__name__).info(\"Connection established\")\n else:\n getLogger(__name__).info((\"Connection could not be established, \"\n \"starting in offline mode.\"))" ]
[ "0.73735076", "0.69012636", "0.6894325", "0.6791065", "0.67294514", "0.6698651", "0.66936195", "0.665516", "0.6614224", "0.66082466", "0.6591837", "0.65764606", "0.65412575", "0.64659244", "0.64588004", "0.642933", "0.6411092", "0.64094645", "0.63941145", "0.6381039", "0.6369129", "0.63575023", "0.6346798", "0.6340565", "0.6327857", "0.6314461", "0.6312976", "0.6271688", "0.62666756", "0.62619126", "0.6252464", "0.6238557", "0.6231947", "0.6225219", "0.6221556", "0.62036234", "0.6199467", "0.6199467", "0.6198923", "0.6194261", "0.61909336", "0.61890036", "0.6185331", "0.6183405", "0.6181976", "0.61804986", "0.61764175", "0.6175639", "0.61700785", "0.61697465", "0.6165117", "0.616455", "0.61624885", "0.61588013", "0.6155351", "0.6155351", "0.6155055", "0.6153519", "0.61438966", "0.61422074", "0.6139912", "0.613718", "0.6132008", "0.6114944", "0.61129606", "0.6108781", "0.60994536", "0.6094427", "0.60923314", "0.6091924", "0.6089934", "0.6084391", "0.60812545", "0.6072025", "0.6066927", "0.60641927", "0.60630125", "0.60626954", "0.60592264", "0.6059198", "0.6053401", "0.6053401", "0.6048851", "0.60255146", "0.601494", "0.60146874", "0.60125434", "0.60123277", "0.60106176", "0.6010531", "0.60017043", "0.5998067", "0.59722084", "0.59717655", "0.5959962", "0.5959618", "0.5955202", "0.5951707", "0.5946", "0.594524", "0.59418243" ]
0.0
-1
Call the protocol disconnection, and then stop the transport itself.
def disconnect(self, receipt=None, headers=None, **keyword_headers): Protocol11.disconnect(self, receipt, headers, **keyword_headers) self.transport.stop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self):\n self._transport = None\n self._cleanup()\n self._disconnected_callback = None", "def transportProtocolDisconnected(self, obj):\n if obj:\n log.msg(\"Protocol disconnected, losing connection..\")\n self.connection.loseConnection()\n try:\n self.factory.stopTrying()\n except Exception, e:\n pass", "def disconnect(self):\r\n self._manual_disconnect = True\r\n self.transport.close()", "def disconnect(self):\n self.controlProtocol.disconnect()", "def disconnect(self):\n self.connected = False\n try:\n self.protocol.send_message(self.sock, '__!goodbye__')\n data = self.protocol.recover_message(self.sock)\n except:\n pass\n self.sock.close()\n self.sock = None", "def stop(self):\n transport = self._transport\n\n # Send all pending data\n if hasattr(transport, \"stop\"):\n transport.stop()\n\n # Clear all internal state\n self._transport = self.client = None", "def _disconnect(self):\n self._factory.stopTrying()\n self._connection.disconnect()", "async def disconnect(self):\n\n # Debug info message\n log.info(\"disconnect\")\n\n # SSH?\n if self._protocol == \"ssh\":\n\n # Yes\n\n # Then disconnect using SSH\n await self.disconnectSSH()\n\n # Telnet?\n elif self._protocol == \"telnet\":\n\n # Yes\n\n # Then disconnect using Telnet\n await self.disconnectTelnet()\n\n else:\n\n # Unsupported protocol\n\n # Raise an exception\n raise Exception(f\"Unsupported protocol: {self._protocol}\")", "def stop(self):\n self.conn.stop()", "def stop(self):\n if self._connected:\n self._client.loop_stop()\n self._client.disconnect()\n self._connected = False\n logger.info(\"Connection with MQTT Broker closed.\")", "def stop(self):\n if self._host:\n #threads.blockingCallFromThread(reactor, self._factory.stopTrying)\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None", "def stop(self):\n if not self:\n return\n\n self._disconnect_clients()\n self._transport.close()\n self._stopped = True", "def stop(self):\n self.connection.abort()", "def stop(self):\n self.connection.abort()", "def stop(self) -> None:\n self.mqttc.disconnect()", "def disconnect(self):\n \n self.net.active(False)", "def stop(self):\n self.bus.log('Stopping down OLA Plugin.')\n self.bus.unsubscribe(\n self.channel_names['channel_request'],\n self.handle_channel\n )\n self.bus.unsubscribe(\n self.channel_names['channel_set'],\n self.handle_channel_set\n )\n self.ola_connection.disconnect()\n # wait for thread to finish.\n self.ola_connection.join()", "def disconnect(self):\n\n con = self.connection\n if con.connected:\n con.log.info('disconnecting...')\n con.switchto(self.initial_state)\n con.sendline('exit')\n sleep(2)\n con.log.info('closing connection...')\n con.spawn.close()", "def disconnect(self):\n self.stop()\n self._send_command('exit')\n self.sock.close()\n self.disconnected = True", "def _on_disconnection(self, *_):\n\n if self._proxy is not None:\n self._proxy.close()\n self._proxy = None", "def disconnect(self):\n log.debug(\"Disconnecting %s\" % self)\n self.connection.unbind_s()\n self.connection = None", "async def disconnect(self):\n try:\n #print(\"Send disconnect command\")\n await self._writeControlPacket(ControlPacketsGenerator.getDisconnectPacket())\n except Exception as err:\n # TODO: catch this error if it is something like already disconnected\n #print(\"Unknown error\")\n raise err\n\n try:\n # Disconnect from this side as well.\n #print(\"Disconnect from this side as well\")\n self.core.ble.disconnect()\n except Exception as err:\n #print(\"Unknown error\")\n raise err", "def stopListening(self):\n self._protocol.stopProtocol()\n return succeed(None)", "def disconnect(self):\n logger.debug(\n \"TCPSPP: Terminate connection to %s:%d\",\n *self.sock.getpeername(),\n )\n self.sock.shutdown(socket.SHUT_RDWR)\n self.sock.close()", "def stop(self):\n self.udpSock.close()", "def disconnect(self):\n self.connected = False\n self.socket.close()", "def disconnect(self, timeout=-1):\n if self.switch_socket:\n self.switch_socket.close()\n self.switch_socket = None\n self.switch_addr = None\n with self.packets_cv:\n self.packets = []\n with self.connect_cv:\n self.connect_cv.notifyAll()\n if self.bridge_socket:\n self.bridge_socket.close()", "def stop_connection(self):\n self.libEDK.EE_EngineDisconnect()\n self.libEDK.EE_EmoStateFree(self.e_state)\n self.libEDK.EE_EmoEngineEventFree(self.e_event)", "def disconnect(self):\n stream=self.get_stream()\n if stream:\n stream.disconnect()", "def disconnect(self):\n if self._connected:\n self.socket.close()\n self._connected = False\n self._subscribed = False\n self._running = False\n self.isy.connection_events.notify(ES_DISCONNECTED)", "def disconnect(self):\n if self._is_running:\n self._is_running = False\n\n self._data_thread.stop()\n self._command_thread.stop()", "def _disconnect(self):\n self.socket.send_json({\"worker_id\": self.socket_id, \"message\": \"disconnect\"})\n self.socket.close()\n self.context.term()\n exit()", "def remote_destroy(self):\r\n self.transport.loseConnection()", "def onstop(self, sender, **kwargs):\n try:\n self._soc.close()\n self._socket.close()\n\n except Exception as e:\n _log.error(\"Error : {}\".format(e))", "def shutdown(self):\n self.connected = False\n self.protocol.send_message(self.sock, '__!shutdown__')\n data = self.protocol.recover_message(self.sock)\n self.sock.close()\n self.sock = None", "def stop_network(self):\n self.net.stop()\n cleanup()", "def disconnect(self):\n print(\"<== Conexión cerrada ==>\")\n self.is_alive = False\n self._socket.close()", "def disconnect(self):\n _error = None\n try:\n self.connection.shutdown()\n except Exception as e:\n log.error(e, exc_info=True)\n _error = 'disconnect error: %s' % e\n finally:\n if _error is not None and config.DEBUG_TO_CONSOLE:\n print (_error)\n self.is_connected = False\n self.connection = None", "def perspective_protocolDied(self, remoteProtocol):\r\n self._endpoint.destroyProtocol(remoteProtocol)", "def __del__(self):\n if self.connection_obj:\n self.logger.info('Disconnecting from host {0}:{1}'.format(self.host, self.port))\n Disconnect(self.connection_obj)", "def teardown(self):\n try:\n self.loop.run_until_complete(self.webhook_connection.disconnect())\n except Exception:\n print_exc()\n raise", "async def disconnect(self):\n if self._state == const.STATE_DISCONNECTED:\n return\n if self._reconnect_task:\n self._reconnect_task.cancel()\n await self._reconnect_task\n self._reconnect_task = None\n await self._disconnect()\n self._state = const.STATE_DISCONNECTED\n\n _LOGGER.debug(\"Disconnected from %s\", self.host)\n self._avr.dispatcher.send(const.SIGNAL_TELNET_EVENT, const.EVENT_DISCONNECTED)", "def on_disconnect(self, raw_msg, server, port, **kwargs):", "def remote_disconnect(self, protocol, remoteID):\r\n remoteID = UUID(bytes=remoteID)\r\n\r\n protocol.unregisterConnection(self, remoteID)\r\n\r\n assert remoteID in self._protocols[protocol]\r\n self._protocols[protocol].remove(remoteID)\r\n\r\n if not self._protocols[protocol]:\r\n del self._protocols[protocol]\r\n\r\n if not self._protocols:\r\n self.stop()", "async def disconnect(self):", "def stop(self):\n self.logger.info('stopping')\n self._stopping = True\n if self._channel:\n self._channel.close()\n self._closing = True\n self._connection.close()\n self._connection.ioloop.start()\n self.logger.info('stopped')", "def stop_messenger(self):\n if self.connected:\n self.messenger.stop()\n self.connected = False", "def disconnect(self) -> None:\n ...", "def disconnect(self):\n self.__connection.disconnect()", "def disconnect(self):\n\t\tif not self.did_handshake:\n\t\t\traise UsageError(\"Not connected!\")\n\t\ttry:\n\t\t\tself.sendMessage(ID_CTRL + \"DISCONNECT\", True)\n\t\tfinally:\n\t\t\tself.cid = None\n\t\t\tself.did_handshake = False\n\t\t\tself.joinstate = 0\n\t\t\tself.createstate = 0\n\t\t\tself.sendClose()", "def __del__(self):\n\n if hasattr(self, '_socket') and self._socket is not None:\n try:\n self.unbind()\n except (exceptions.PDUError, exceptions.ConnectionError) as error:\n if len(getattr(error, 'args', tuple())) > 1:\n logging.warning('({0}) {1}. Ignored'.format(error.args[1], error.args[0]))\n else:\n logging.warning('{error}. Ignored'.format(error=error))\n self.disconnect()", "def stop_protocols(self, context: ResourceCommandContext) -> None:\n self.handler.stop_protocols()", "async def async_disconnect(self) -> None:\n\n def stop() -> None:\n \"\"\"Stop the MQTT client.\"\"\"\n # Do not disconnect, we want the broker to always publish will\n self._mqttc.loop_stop()\n\n def no_more_acks() -> bool:\n \"\"\"Return False if there are unprocessed ACKs.\"\"\"\n return not any(not op.is_set() for op in self._pending_operations.values())\n\n # stop waiting for any pending subscriptions\n await self._subscribe_debouncer.async_cleanup()\n # reset timeout to initial subscribe cooldown\n self._subscribe_debouncer.set_timeout(INITIAL_SUBSCRIBE_COOLDOWN)\n # stop the unsubscribe debouncer\n await self._unsubscribe_debouncer.async_cleanup()\n # make sure the unsubscribes are processed\n await self._async_perform_unsubscribes()\n\n # wait for ACKs to be processed\n async with self._pending_operations_condition:\n await self._pending_operations_condition.wait_for(no_more_acks)\n\n # stop the MQTT loop\n async with self._paho_lock:\n await self.hass.async_add_executor_job(stop)", "def do_disconnect(self, *noargs):\n self.link.close()", "async def disconnect(self) -> None:\n self.client.loop_stop()\n self.client.disconnect()\n self.connected = False\n self.log.debug(\"Disconnected.\")", "def disconnect(self):\n self.is_connected = False\n self.connection.disconnect(True)\n print('Disconnected from the Connected server')", "def shutdown(self):\n if self.alive:\n libplasma.disconnect(self.conn)\n self.alive = False", "def stop(self):\n\n if not self._is_running:\n return\n\n pushcenter_logger.debug(\"[NURESTPushCenter] Stopping...\")\n\n self._thread.stop()\n self._thread.join()\n\n self._is_running = False\n self._current_connection = None\n self._start_time = None\n self._timeout = None", "def stop(self):\n self.running = False\n self.hop_channel(\"auto\")", "def disconnect(self):\n\n\t\tself._alive = False\n\t\tself._ser.close()", "def disconnect(self):\n\n\t\tself._alive = False\n\t\tself._ser.close()", "def connectionLost(self, reason):\r\n _Protocol.remote_destroy(self)", "def disconnect(self,):\n # check if connection ACKed\n if self.connack_rec:\n # if ACKed, disconnect\n self.send_q.put(Message.DisconnectFrame().encode())\n\n # if connected, disconnect\n if self.connected:\n # kill the TCP thread\n self.stop_thread = True\n self.tcp_thread.join()\n\n # close the socket\n self.sock.close()\n\n # reset flags and subscriptions\n self.connack_rec = False\n self.connected = False\n self.topics = []\n self.sub_req = 0\n self.unsub_req = 0\n else:\n return False", "def timeoutConnection(self):\n self.transport.stopProducing()", "def disconnect(self):\n _abstract()", "def disconnect(self):\n _abstract()", "def disconnect(self) -> None:\n self._log.info(\"Disconnecting from robot...\")\n self._is_running = False\n self._transport.close()\n\n self._read_event.set()\n if self._read_thread.is_alive():\n self._read_thread.join(2)\n self._keep_alive_event.set() # Will cancel keep alive from sleeping\n if self._keep_alive_thread.is_alive():\n self._keep_alive_thread.join(2)\n self._log.info(\"Disconnected.\")", "def stop(self):\n self.disconnect()", "def stop(self):\n self.disconnect()", "def disconnect(self):\n logger.debug(\"disconnecting\")\n if self.connected and self.conn:\n self.can_listen = False\n self.conn.close()\n self.connected = False", "def _disconnect(self):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")", "def stop() -> None:\n # Do not disconnect, we want the broker to always publish will\n self._mqttc.loop_stop()", "def _disconnect(self) -> None:\n self._agent.stop()\n self._agent_thread.join()", "def disconnect(self):\n if self.proxy is not None:\n self.proxy.shutdown()\n self.proxy = None\n self._connected = False\n self.lockfile = {}", "def _plugin_stop(handle):\n GPIO.cleanup()\n _LOGGER.info('MAX31865 (async) Disconnected.')", "async def disconnectTelnet(self):\n\n # Debug info message\n log.info(\"disconnectTelnet\")\n\n # Connection previously open in Telnet?\n if self._writer:\n\n # Yes\n\n # Then close the SSH connection\n self._writer.close()\n\n # No more connection to disconnect\n self._writer = None", "def disconnect(self):\n if self.is_connected:\n try:\n self.client.unregister()\n finally:\n if self.client.is_running:\n self.client.stop()\n self.hub.disconnect()", "async def disconnect(self):\r\n from asyncio import shield\r\n if self._session is not None:\r\n await shield(self._session.connector.close())\r\n await shield(self._session.close())\r\n self._session = None", "def device_disconnect(self):\n pass", "def disconnect(self):\n r = requests.post(f'{self.SERVER_ADDR}/api/disconnect', headers={'Authorization': 'Token ' + self.token})\n r.raise_for_status()", "def close(self):\n asyncio.ensure_future(self.__inner_protocol.release())", "def close(self):\n try:\n if not self.telnet:\n return\n self.telnet.close()\n self.telnet = None\n except (ConnectionError, Exception):\n error_message = \"Remote device is disconnected abnormally\"\n LOG.error(error_message, error_no=\"00401\")", "def stop_mqtt(self):\n if self.mqtt:\n logger.debug('Disconnecting from MQTT server')\n self.mqtt.disconnect()\n self.mqtt.loop_stop()", "def disconnect(self):\n self.connection.close()", "def stop(self) -> None:\n self._stream.stop()", "async def stop(self) -> None:\n for broadcast_port in self._broadcast_ports:\n transport = self._transports.get(broadcast_port)\n\n if transport and not transport.is_closing():\n logger.info(\"stopping the udp bridge on port %s\", broadcast_port)\n transport.close()\n else:\n logger.info(\"udp bridge on port %s not started\", broadcast_port)\n\n self._is_running = False", "def disconnect_socket(self):\n self.running = False\n\n if self.socket is not None:\n self.socket.close()\n\n self.current_host_and_port = None\n self.socket = None\n self.notify('disconnected')", "def loseConnection(self):\n self.transport.loseConnection()", "def disconnect(self):\n\t\tself.client.disconnect()\n\t\tself.log.info(\"disconnected OBS Websocket _connection.\")", "def stop(self):\n\n # Close our synchronous connection if we've got one\n #if self._nonpool:\n # self._nonpool.close()\n # self._nonpool = None\n # self._nonpool_lastused = None\n\n if not self._started:\n return\n #self._pool.close()\n self._started = False\n #del self._pool", "def close_UDP_connection(self):\n self.beacon.stop_beaconing()", "async def do_disconnect(self):\n # We shouldn't be asked to disconnect if it wasn't us who connected\n # originally\n if not self.connect_requested:\n log.info(\n \"%s disconnecting, although we didn't connect originally\", self.name\n )\n self.connect_requested = False\n # Actually do it\n await self.disconnect()", "def destroy(self):\r\n self._protocol.destroy()\r\n\r\n self._protocol = None", "def disconnect(self):\n raise NotImplementedError", "def disconnect(self):\n raise NotImplementedError", "def _telegram_stop_callback(self, update: Update, _: CallbackContext):\n\n rospy.loginfo(\"Stopping Telegram ROS bridge for chat id {}\".format(self._telegram_chat_id))\n update.message.reply_text(\n \"Disconnecting chat_id {}. So long and thanks for all the fish!\"\n \" Type /start to reconnect\".format(self._telegram_chat_id)\n )\n self._telegram_chat_id = None", "def rtt_stop(self):\n self.rtt_control(enums.JLinkRTTCommand.STOP, None)", "def on_disconnect( client, userdata, rc ):\n logging.info( \"Disconnected from Broker. Returned code: %s\\n\" %rc )\n client.connected_flag = False\n client.disconnect_flag = True", "def close_connection(self):\r\n self.running = False\r\n self.client_socket.close()", "def stop(self):\n\t\tself.stream.stop_stream()" ]
[ "0.76196885", "0.73054206", "0.7146738", "0.7062836", "0.70271856", "0.6989695", "0.6907226", "0.6885972", "0.6851431", "0.67708176", "0.6740143", "0.6737467", "0.6714717", "0.6714717", "0.66998696", "0.6699246", "0.6678658", "0.66435677", "0.66178954", "0.66106606", "0.6605769", "0.6591837", "0.65867627", "0.6572208", "0.6571212", "0.65241694", "0.6517839", "0.65175724", "0.65017027", "0.6487301", "0.64808166", "0.6473158", "0.64729583", "0.6471146", "0.6439251", "0.6437304", "0.64275414", "0.6422135", "0.6409665", "0.6409232", "0.6392118", "0.6385948", "0.63764226", "0.63728255", "0.6362488", "0.63427305", "0.63420224", "0.63341635", "0.6323112", "0.6316566", "0.6316428", "0.6311888", "0.6301328", "0.6296932", "0.62882984", "0.62880623", "0.6286827", "0.6283101", "0.6280385", "0.6276677", "0.6276677", "0.627514", "0.6271995", "0.62679213", "0.6267539", "0.6267539", "0.6259145", "0.62557405", "0.62557405", "0.62546486", "0.6252893", "0.6244523", "0.62396514", "0.623029", "0.622453", "0.62239206", "0.62128866", "0.62097955", "0.62046415", "0.6200259", "0.61969095", "0.61952746", "0.6192301", "0.6192144", "0.61876947", "0.6183362", "0.6182543", "0.6181857", "0.6172756", "0.6172169", "0.6171945", "0.61709625", "0.61680984", "0.61633617", "0.61633617", "0.61620057", "0.616108", "0.6156926", "0.614876", "0.61467147" ]
0.633989
47
Returns the network connected to the tenant router. Assumes a single router with a single tenant network connected.
def _tenant_network(self): port = self._connection.network.ports.find_by_device_owner('network:router_interface') if port: return self._connection.network.networks.get(port.network_id) else: raise errors.ImproperlyConfiguredError('Could not find tenancy network')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _external_network(self):\n try:\n router = next(self._connection.network.routers.all())\n except StopIteration:\n raise errors.ImproperlyConfiguredError('Could not find tenancy router.')\n return self._connection.network.networks.get(router.external_gateway_info['network_id'])", "def get_network(self):\n return self.get_ip_network()[-1]", "def get_network(self):\n return self._network", "def network(self) -> str:\n return pulumi.get(self, \"network\")", "def network(self):\n return self.__network", "def network(self):\n return self._network", "def network(self):\n return self._network", "def network(self):\n return self._network", "def get_network(self) -> Optional[str]:\n return self.get_value(self._network_attribute)", "def network(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"network\")", "def get_network_type(self):\n net_type = self._data['type']\n if net_type == 'Shared':\n return 'guest'\n elif net_type == 'Isolated':\n return 'isolated'", "def GetCurrentNetwork(self, iwconfig=None):\n current_network = str(self.wifi.GetCurrentNetwork(iwconfig))\n return current_network", "def getnetwork(ipaddr):\n return '192.168.1.0/24'", "def get_network_id(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetNetworkId', self.handle)", "def get_network_on_vc(options):\n datacenter = get_datacenter(options)\n networks = datacenter.network\n\n name = get_network_name(options)\n for network in networks:\n if re.search(name, network.name):\n return network", "def network(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network\")", "def network_interface(self): \n return self._network_interface", "def network_address(self):\n\n return self._network_address", "def network(self):\n address = unicode(\"%s/%s\" % (self.address, _get_cidr(self.netmask)))\n return IPv4Network(address, strict=False)", "def private_network(self) -> str:\n return pulumi.get(self, \"private_network\")", "def get_network_default_gateway(self):\n return self.mycam.devicemgmt.GetNetworkDefaultGateway()", "def get_network_type(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetNetworkType', self.handle)", "def get_network(self) -> EthereumNetwork:\n return EthereumNetwork(int(self.w3.net.version))", "def cosmo_find_external_net(self):\n nets = self.list_networks()['networks']\n ls = [net for net in nets if net.get('router:external')]\n if len(ls) == 1:\n return ls[0]\n if len(ls) != 1:\n raise RuntimeError(\n \"Expected exactly one external network but found {0}\".format(\n len(ls)))", "def get_stored_network(cls):\n store = cls.get_store()\n try:\n network_dict = store['network']\n except KeyError:\n network_dict = {}\n network_name = network_dict.get(\n 'value', ChainID.MAINNET.name)\n network = ChainID[network_name]\n return network", "def network_access(self):\n return self._network_access", "def public_network(self, **kwargs):\r\n return self._get_network('frontend', **kwargs)", "def get_cidr_graphs_connection(self):\n return self.m_connection.cidr_graphs", "def transit_router_cidr_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"transit_router_cidr_id\")", "def getPeerToPeerNetwork(self):\r\n raise NotImplementedError()", "def getNodeNetworks(self,node):\n data = self.connect('get','nodes/%s/network' % (node),None)\n return data", "def get_gateway(self, node):\n router = None\n\n for ip, n in self._nodes_dict.items():\n\n if n is node:\n continue\n\n if n.is_gateway:\n return ip\n\n if not router and n.is_router:\n router = ip\n\n return router", "def getNets(self):\n\t\treturn NetLoader.listNetworks()", "def virtual_network(self):\n return self.broker.virtual_network(**{\"VirtualNetworkMemberID\": self.VirtualNetworkMemberID})", "def guess_network(self):\n # decide what sort of network we are going to use\n # return the actual type\n # right now we just use the first host only network and that's it\n host_only = list(HostOnlyNetwork.find_networks())\n if host_only:\n return host_only[0]\n else:\n return NewHostOnlyNetwork()", "def network_config(self) -> Optional[pulumi.Input['NodeNetworkConfigArgs']]:\n return pulumi.get(self, \"network_config\")", "def get_net(con):\n try:\n return con.virtual_network_read(fq_name=conf.get('default_net', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find net.')\n return None", "def network_config(self) -> pulumi.Output['outputs.PrivateCloudNetworkConfig']:\n return pulumi.get(self, \"network_config\")", "def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")", "def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")", "def net(self):\n if self._net is None:\n self._net = Net(name=self.name)\n return self._net", "def host_network(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"host_network\")", "def host_network(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"host_network\")", "def network_id(tenant_id, auth_token, network_name):\r\n content = common_utils.do_request(\r\n tenant_id, auth_token,\r\n method='GET',\r\n body='', service=\"network\",\r\n path='networks.json')\r\n for network in range(len(content[\"networks\"])):\r\n if content[\"networks\"][network][\"name\"] == network_name:\r\n network_id = content[\"networks\"][network][\"id\"]\r\n return network_id", "def __get_external_network_test_helper__(self):\n external_network_id = None\n external_network_list = self.neutron_operations.find_networks(router_external=True)\n if len(external_network_list) != 0:\n external_net_region = self.conf[PROPERTIES_CONFIG_REGION][PROPERTIES_CONFIG_REGION_EXTERNAL_NET]\n if self.region_name in external_net_region:\n ext_net_config = external_net_region[self.region_name]\n for external_network in external_network_list:\n if external_network['name'] == ext_net_config:\n external_network_id = external_network['id']\n if external_network_id is None:\n external_network_id = external_network_list[0]['id']\n self.assertIsNotNone(external_network_id, \"No external networks found\")\n\n return external_network_id", "def get_network_interfaces(self):\n return self.mycam.devicemgmt.GetNetworkInterfaces()", "def get_active_networks(self):\n return self.call(self.context,\n self.make_msg('get_active_networks', host=self.host),\n topic=self.topic)", "def network_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_network(**kwargs)", "def networks(self): # type: () -> t.Optional[t.Dict[str, t.Dict[str, t.Any]]]\n return self.network_settings.get('Networks')", "def returnNetworkNode(self):\n\n networkNodes = cmds.ls(type=\"network\")\n for node in networkNodes:\n attrs = cmds.listAttr(node)\n if \"moduleName\" in attrs:\n if cmds.getAttr(node + \".moduleName\") == self.name:\n networkNode = node\n\n return networkNode", "def get_net_id(self, net_name):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n result = self.request(\"GET\", _url, _headers, _body)\n\n if result is None:\n LOG_OBJ.error(\n \"No response from Server while trying to\"\n \" get networks of tenant: %s\" %\n self.project_info[\"project_id\"])\n return result\n\n if result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network Failed with status %s \" % result.status)\n return result.status\n\n output = json.loads(result.data)\n LOG_OBJ.debug(\"Networks: %s\" % output['networks'])\n\n for nets in output['networks']:\n if nets['name'].lower() == net_name.lower() and \\\n net_name == config.extnet_name:\n LOG_OBJ.debug(\"Net ID : %s \" % nets['id'])\n return nets['id']\n if nets['name'].lower() == net_name.lower() and \\\n nets['tenant_id'] == self.project_info[\"project_id\"]:\n LOG_OBJ.debug(\"Net ID : %s \" % nets['id'])\n return nets['id']\n\n LOG_OBJ.debug(\"Net:%s Not Found\" % net_name)\n return", "def transit_router_cidr_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"transit_router_cidr_id\")", "def external_network_id(self) -> str:\n return pulumi.get(self, \"external_network_id\")", "def network_configuration(self) -> pulumi.Output['outputs.ServiceNetworkConfiguration']:\n return pulumi.get(self, \"network_configuration\")", "def wan_address(self):\n if self._community.dispersy:\n host = self._community.dispersy.wan_address[0]\n\n if host == \"0.0.0.0\":\n host = self._community.dispersy.lan_address[0]\n\n else:\n host = \"0.0.0.0\"\n\n _, port = self._socket.getsockname()\n return (host, port)", "def get_azure_ips_connection(self):\n return self.m_connection.azure_ips", "def get_nt_server():\n # TODO: Make dynamic based on self.team_number\n return '10.59.87.2'", "def network_mode(self) -> Optional[pulumi.Input[Union[str, 'NetworkMode']]]:\n return pulumi.get(self, \"network_mode\")", "def network_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(NetworkManagementClient)\n return self.client", "def get_network(self, name, disconnected=False):\n return self.get_networks(as_dict=True,\n disconnected=disconnected).get(name)", "def networks(self) -> pulumi.Output[Sequence['outputs.NetworkConfigResponse']]:\n return pulumi.get(self, \"networks\")", "def get_probable_router_mac(self):\n return self.probable_router_mac", "def get_device(self):\n addr = self.address\n servers = [server for server in pyrax.cloudservers.list()\n if addr in server.networks.get(\"private\", \"\")]\n try:\n return servers[0]\n except IndexError:\n return None", "def getlan():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('10.255.255.255', 1))\n lan = str(s.getsockname()[0])\n s.close()\n except socket.error:\n s.close()\n sys.exit('>> Unable to find LAN IP')\n\n return lan", "def _get_network(self, kind, router=True, vlans=True, vlan_ids=True):\r\n network = {}\r\n macs = self.get('%s_mac' % kind)\r\n network['mac_addresses'] = macs\r\n\r\n if len(macs) == 0:\r\n return network\r\n\r\n if router:\r\n network['router'] = self.get('router', macs[0])\r\n\r\n if vlans:\r\n network['vlans'] = self.get('vlans', macs[0])\r\n\r\n if vlan_ids:\r\n network['vlan_ids'] = self.get('vlan_ids', macs[0])\r\n\r\n return network", "def get_connection(self):\n\n return self.REMOTE_CONNECTION", "def network_config(self) -> Optional[pulumi.Input['PrivateCloudNetworkConfigArgs']]:\n return pulumi.get(self, \"network_config\")", "def network_configuration(self) -> Optional[pulumi.Input['ServiceNetworkConfigurationArgs']]:\n return pulumi.get(self, \"network_configuration\")", "def network_configuration(self) -> Optional[pulumi.Input['ServiceNetworkConfigurationArgs']]:\n return pulumi.get(self, \"network_configuration\")", "def private_network(self, **kwargs):\r\n return self._get_network('backend', **kwargs)", "def get_gateway(self):\n return self.gateway", "def get_network_name_on_vc(options):\n network = get_network_on_vc(options)\n if network:\n return network.name", "def tenant_internet_gateway_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"tenant_internet_gateway_ids\")", "def network_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_id\")", "def nat_gateway(self) -> Optional['outputs.SubResourceResponse']:\n return pulumi.get(self, \"nat_gateway\")", "def show_networks():\n return get_networks()", "def network_settings(self): # type: () -> t.Dict[str, t.Any]\n return self.inspection['NetworkSettings']", "def get_virustotal_connection(self):\n return self.m_connection.virustotal", "def get_virtual_network_id(self):\n\t\treturn call_sdk_function('PrlVmDevNet_GetVirtualNetworkId', self.handle)", "def network_views():\n return 'networkview?'", "def virtual_router_mac(self):\n return self._virtual_router_mac", "def network_profile(self) -> Optional[pulumi.Input['AgentPoolNetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")", "def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']", "def __call__(self) -> list:\n return self.network", "def networks(self) -> dict:\n return self.data[\"networks\"]", "def networks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkConfigArgs']]]]:\n return pulumi.get(self, \"networks\")", "def get_network_dataplane(self) -> Union[str, None]:\n return self.raw_param.get(\"network_dataplane\")", "def _get_network_type(self, host):\n network_type = host.get(\"network\")\n default_network = self.config.get(\"default_network\")\n if network_type is None:\n network_type = self._metadata.get(\"network\", default_network)\n if not network_type:\n raise ProvisioningConfigError(\n \"No network type specified and project doesn't have default \"\n \"network type (property 'default_network') specified in \"\n \"provisioning config.\"\n )\n return network_type", "def get_networks(self):\n url = '%s/v2.0/networks' % self.catalog['network']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['networks']\n else:\n LOG.error('Get networks failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def network_cloud(self) -> Optional[pulumi.Input['VirtualNetworksPropertiesNetworkCloudArgs']]:\n return pulumi.get(self, \"network_cloud\")", "def network_config(self) -> pulumi.Input['PrivateCloudNetworkConfigArgs']:\n return pulumi.get(self, \"network_config\")", "def is_network_node():\n return config.NODE_IP == config.NETWORK_NODE_IP", "def get_network_protocols(self):\n return self.mycam.devicemgmt.GetNetworkProtocols()", "def get_network_by_id(self, id):\n return self.network.get_network(id)", "def get_network_name(self): # type: () -> str\n networks = self.get_network_names()\n\n if not networks:\n raise ApplicationError('No network found for Docker container: %s.' % self.id)\n\n if len(networks) > 1:\n raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (self.id, ', '.join(networks)))\n\n return networks[0]", "def computer_network_name(self) -> str:\n return self._computer_network_name", "def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")", "def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")", "def networks(self) -> Sequence['outputs.NetworkConfigResponse']:\n return pulumi.get(self, \"networks\")", "def networkMode(self):\n\n response = self.at.sendCommand(\"AT+CEREG?\")\n\n # If we failed to query the network mode, that's a paddlin'\n if not response:\n raise modem.AtError(response, \"Failed to query network mode\")\n\n lines = response.lines\n\n if len(lines) < 1:\n raise modem.AtError(response, \"Invalid network mode response\")\n\n fields = lines[0].split(\",\")\n\n # If there isn't at least the prefix and the current mode, that's a\n # paddlin'\n if len(fields) < 2:\n raise modem.AtError(response, \"Invalid network mode response\")\n\n try:\n return int(fields[1])\n\n except ValueError:\n raise modem.AtError(response, \"Invalid network mode\")" ]
[ "0.77449316", "0.7281764", "0.7092121", "0.70122546", "0.6769052", "0.6733666", "0.6733666", "0.6733666", "0.6582788", "0.65117043", "0.6201502", "0.61733115", "0.6108502", "0.60738486", "0.60403067", "0.6020271", "0.600645", "0.5939682", "0.5937701", "0.5896336", "0.58795285", "0.58781123", "0.5866178", "0.5846407", "0.5834734", "0.58169097", "0.5801624", "0.57950926", "0.5762272", "0.57591575", "0.57285464", "0.5707451", "0.5694497", "0.568046", "0.565965", "0.5657665", "0.56543785", "0.5634362", "0.5627202", "0.5627202", "0.55987483", "0.5580583", "0.5580583", "0.55695474", "0.5558631", "0.54960465", "0.5494471", "0.5482196", "0.5478738", "0.54778546", "0.54763263", "0.5460014", "0.5459798", "0.5449887", "0.5445929", "0.5445003", "0.542573", "0.54130113", "0.54020804", "0.5393319", "0.5388977", "0.53830373", "0.53733367", "0.5372494", "0.53546655", "0.53526896", "0.53473294", "0.5338209", "0.5338209", "0.53221726", "0.52920586", "0.5286184", "0.5285471", "0.5284785", "0.5282982", "0.5268104", "0.52662015", "0.5265863", "0.5263314", "0.5258606", "0.52497023", "0.52468926", "0.52466124", "0.52348006", "0.522983", "0.5218788", "0.52168", "0.5216287", "0.52122915", "0.52097046", "0.5208081", "0.5204903", "0.5204835", "0.5188298", "0.51861185", "0.5185881", "0.5182296", "0.5182296", "0.51816654", "0.5181562" ]
0.796581
0
Returns the external network that connects the tenant router to the outside world.
def _external_network(self): try: router = next(self._connection.network.routers.all()) except StopIteration: raise errors.ImproperlyConfiguredError('Could not find tenancy router.') return self._connection.network.networks.get(router.external_gateway_info['network_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _tenant_network(self):\n port = self._connection.network.ports.find_by_device_owner('network:router_interface')\n if port:\n return self._connection.network.networks.get(port.network_id)\n else:\n raise errors.ImproperlyConfiguredError('Could not find tenancy network')", "def get_network(self):\n return self.get_ip_network()[-1]", "def network(self) -> str:\n return pulumi.get(self, \"network\")", "def cosmo_find_external_net(self):\n nets = self.list_networks()['networks']\n ls = [net for net in nets if net.get('router:external')]\n if len(ls) == 1:\n return ls[0]\n if len(ls) != 1:\n raise RuntimeError(\n \"Expected exactly one external network but found {0}\".format(\n len(ls)))", "def get_network(self):\n return self._network", "def __get_external_network_test_helper__(self):\n external_network_id = None\n external_network_list = self.neutron_operations.find_networks(router_external=True)\n if len(external_network_list) != 0:\n external_net_region = self.conf[PROPERTIES_CONFIG_REGION][PROPERTIES_CONFIG_REGION_EXTERNAL_NET]\n if self.region_name in external_net_region:\n ext_net_config = external_net_region[self.region_name]\n for external_network in external_network_list:\n if external_network['name'] == ext_net_config:\n external_network_id = external_network['id']\n if external_network_id is None:\n external_network_id = external_network_list[0]['id']\n self.assertIsNotNone(external_network_id, \"No external networks found\")\n\n return external_network_id", "def network(self):\n return self.__network", "def external_network_id(self) -> str:\n return pulumi.get(self, \"external_network_id\")", "def network(self):\n return self._network", "def network(self):\n return self._network", "def network(self):\n return self._network", "def wan_address(self):\n if self._community.dispersy:\n host = self._community.dispersy.wan_address[0]\n\n if host == \"0.0.0.0\":\n host = self._community.dispersy.lan_address[0]\n\n else:\n host = \"0.0.0.0\"\n\n _, port = self._socket.getsockname()\n return (host, port)", "def internet_address(self) -> str:\n return pulumi.get(self, \"internet_address\")", "def public_network(self, **kwargs):\r\n return self._get_network('frontend', **kwargs)", "def private_network(self) -> str:\n return pulumi.get(self, \"private_network\")", "def network(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"network\")", "def get_network(self) -> Optional[str]:\n return self.get_value(self._network_attribute)", "def network_interface(self): \n return self._network_interface", "def network_address(self):\n\n return self._network_address", "def internet_domain(self) -> str:\n return pulumi.get(self, \"internet_domain\")", "def host_network(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"host_network\")", "def host_network(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"host_network\")", "def getnetwork(ipaddr):\n return '192.168.1.0/24'", "def get_network(self) -> EthereumNetwork:\n return EthereumNetwork(int(self.w3.net.version))", "def get_azure_ips_connection(self):\n return self.m_connection.azure_ips", "def network(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network\")", "def external_IP(self):\r\n return self._external_ip", "def get_network_type(self):\n net_type = self._data['type']\n if net_type == 'Shared':\n return 'guest'\n elif net_type == 'Isolated':\n return 'isolated'", "def get_network_default_gateway(self):\n return self.mycam.devicemgmt.GetNetworkDefaultGateway()", "def get_nt_server():\n # TODO: Make dynamic based on self.team_number\n return '10.59.87.2'", "def getOsmHost(self):\n return self.osm_host", "def internet_port(self) -> str:\n return pulumi.get(self, \"internet_port\")", "def get_external_ip():\n try:\n r = requests.get(\n METADATA_NETWORK_INTERFACE_URL,\n headers={'Metadata-Flavor': 'Google'},\n timeout=2)\n return r.text\n except requests.RequestException:\n logging.info('Metadata server could not be reached, assuming local.')\n return 'localhost'", "def network_config(self) -> pulumi.Output['outputs.PrivateCloudNetworkConfig']:\n return pulumi.get(self, \"network_config\")", "def get_network_on_vc(options):\n datacenter = get_datacenter(options)\n networks = datacenter.network\n\n name = get_network_name(options)\n for network in networks:\n if re.search(name, network.name):\n return network", "def get_device(self):\n addr = self.address\n servers = [server for server in pyrax.cloudservers.list()\n if addr in server.networks.get(\"private\", \"\")]\n try:\n return servers[0]\n except IndexError:\n return None", "def network(self):\n address = unicode(\"%s/%s\" % (self.address, _get_cidr(self.netmask)))\n return IPv4Network(address, strict=False)", "def get_network_id(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetNetworkId', self.handle)", "def guess_network(self):\n # decide what sort of network we are going to use\n # return the actual type\n # right now we just use the first host only network and that's it\n host_only = list(HostOnlyNetwork.find_networks())\n if host_only:\n return host_only[0]\n else:\n return NewHostOnlyNetwork()", "def getPeerToPeerNetwork(self):\r\n raise NotImplementedError()", "def lan_address(self):\n _, port = self._socket.getsockname()\n return (\"127.0.0.1\", port)", "def private_network(self, **kwargs):\r\n return self._get_network('backend', **kwargs)", "def intranet_address(self) -> str:\n return pulumi.get(self, \"intranet_address\")", "def network_access(self):\n return self._network_access", "def external_port(self):\r\n return self._external_port", "def GetCurrentNetwork(self, iwconfig=None):\n current_network = str(self.wifi.GetCurrentNetwork(iwconfig))\n return current_network", "def network_config(self) -> Optional[pulumi.Input['PrivateCloudNetworkConfigArgs']]:\n return pulumi.get(self, \"network_config\")", "def get_internal_host(self):\n prefer_internal_ip = self.charm_config.get(\"prefer-internal-ip\")\n fqdn = socket.getfqdn()\n ip = socket.gethostbyname(fqdn)\n if prefer_internal_ip:\n return ip\n return fqdn", "def get_stored_network(cls):\n store = cls.get_store()\n try:\n network_dict = store['network']\n except KeyError:\n network_dict = {}\n network_name = network_dict.get(\n 'value', ChainID.MAINNET.name)\n network = ChainID[network_name]\n return network", "def infradevice(self):\n return self.broker.infradevice(**{\"DeviceRouteID\": self.DeviceRouteID})", "def network_config(self) -> Optional[pulumi.Input['NodeNetworkConfigArgs']]:\n return pulumi.get(self, \"network_config\")", "def get_network_interfaces(self):\n return self.mycam.devicemgmt.GetNetworkInterfaces()", "def remote(self):\n return self.client_address", "def wan_address(self):\n return self._wan_address", "def address(self):\n if self.con_strategy == \"local\":\n return self.address_local()\n if self.con_strategy == \"remote\":\n return self.address_remote()\n return None", "def get_network_adapter() -> network.NetworkAdapter:\n if (ip := os.getenv('ref_ip')) is not None: # noqa: SIM112\n return network.get_adapter_containing_ip(ip)\n # get next available loopback adapter\n return next(adapter for adapter in network.get_adapters() if adapter.is_loopback)", "def network_cloud(self) -> Optional[pulumi.Input['VirtualNetworksPropertiesNetworkCloudArgs']]:\n return pulumi.get(self, \"network_cloud\")", "def network_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(NetworkManagementClient)\n return self.client", "def getlan():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('10.255.255.255', 1))\n lan = str(s.getsockname()[0])\n s.close()\n except socket.error:\n s.close()\n sys.exit('>> Unable to find LAN IP')\n\n return lan", "def intranet_port(self) -> str:\n return pulumi.get(self, \"intranet_port\")", "def get_internal_url(self):\n prefer_internal_ip = self.charm_config.get(\"prefer-internal-ip\")\n fqdn = socket.getfqdn()\n ip = socket.gethostbyname(fqdn)\n if prefer_internal_ip:\n return \"http://{}:8008\".format(ip)\n return \"http://{}:8008\".format(fqdn)", "def network_configuration(self) -> Optional[pulumi.Input['ServiceNetworkConfigurationArgs']]:\n return pulumi.get(self, \"network_configuration\")", "def network_configuration(self) -> Optional[pulumi.Input['ServiceNetworkConfigurationArgs']]:\n return pulumi.get(self, \"network_configuration\")", "def virtual_network(self):\n return self.broker.virtual_network(**{\"VirtualNetworkMemberID\": self.VirtualNetworkMemberID})", "def network_configuration(self) -> pulumi.Output['outputs.ServiceNetworkConfiguration']:\n return pulumi.get(self, \"network_configuration\")", "def public_network_access(self) -> Optional[str]:\n return pulumi.get(self, \"public_network_access\")", "def public_network_access(self) -> Optional[str]:\n return pulumi.get(self, \"public_network_access\")", "def public_network_access(self) -> Optional[str]:\n return pulumi.get(self, \"public_network_access\")", "def nat_gateway(self) -> Optional['outputs.SubResourceResponse']:\n return pulumi.get(self, \"nat_gateway\")", "def get_external_domain(self):\n if self.charm_config[\"external-domain\"]:\n return self.charm_config[\"external-domain\"]\n return self.get_server_name()", "def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")", "def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")", "def address(self):\n \n return self.__ip", "def get_self_address(self):\n return self.self_host, self.self_port", "def getLocalIpAddress() :\n \n if (platform.system() == 'Linux') :\n cmd = \"ifconfig wlan0 | grep 'inet addr:' | cut -d: -f2 | awk '{print $1}'\"\n return subprocess.check_output(cmd, shell=True) \n else : # Darwin\n return socket.gethostbyname(socket.gethostname())", "def base(self):\n\n if self.discovery_address:\n return ('http://%s:%s' % self.discovery_address_tuple)\n\n elif self.multicast_address:\n return ('http://%s:%s' % self.multicast_address_tuple)\n\n elif self.address == \"0.0.0.0\":\n return ('http://%s:%s' % self.address_tuple)\n\n else:\n return ('http://%s:%s' % self.address_tuple)", "def get_network_type(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetNetworkType', self.handle)", "def host():\n return platform.node()", "def ip_address(self) -> str:\n return self._device.ip if self.is_connected else None", "def network_config(self) -> pulumi.Input['PrivateCloudNetworkConfigArgs']:\n return pulumi.get(self, \"network_config\")", "def underlying_url(self):\n return 'http://{}:{}'.format(names.azure_url(self.dns_name), self.port)", "def guest_dev(self):\n if self.netns is None:\n raise ex.excError(\"could not determine netns\")\n with open(\"/proc/net/dev\", \"r\") as filep:\n local_devs = [line.split(\":\", 1)[0] for line in filep.readlines() if \":\" in line]\n\n cmd = [rcEnv.syspaths.nsenter, \"--net=\"+self.netns, \"ip\" , \"link\"]\n out, err, ret = justcall(cmd)\n used = []\n for line in out.splitlines():\n if \": eth\" not in line:\n continue\n idx = line.split()[1].replace(\":\", \"\").replace(\"eth\", \"\")\n if \"@\" in idx:\n # strip \"@if<n>\" suffix\n idx = idx[:idx.index(\"@\")]\n try:\n used.append(int(idx))\n except ValueError:\n # user named interface. ex: eth-metier\n continue\n idx = 0\n nspid = self.get_nspid()\n while True:\n guest_dev = \"eth%d\" % idx\n local_dev = \"v%spl%s\" % (guest_dev, nspid)\n if idx not in used and local_dev not in local_devs:\n return guest_dev\n idx += 1", "def get(profile):\n client = boto3client.get(\"ec2\", profile)\n return client.describe_internet_gateways()", "def network_configuration(self) -> Optional['outputs.ScheduleTargetEcsParametersNetworkConfiguration']:\n return pulumi.get(self, \"network_configuration\")", "def get_local_host_ip(self) -> str:", "def tenant_internet_gateway_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"tenant_internet_gateway_ids\")", "def getManageIp(self):\n dev = self.device()\n if dev: return dev.getManageIp()\n return \"\"", "def GetExternalIp():\n h = httplib2.Http(tempfile.gettempdir(), timeout=10)\n url = 'http://whatismyip.akamai.com'\n resp, content = h.request(url, 'GET')\n if resp.status == 200:\n return content\n for provider in (UltraDNSAuth(), MyResolverInfo()):\n answer = provider.GetClientIp()\n if answer:\n return answer", "def net(self):\n if self._net is None:\n self._net = Net(name=self.name)\n return self._net", "def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())", "def create_external_network(self, extnet_info, ignore_privious=False):\n LOG_OBJ.debug(\"Creating External Network : \")\n _tenant_name = config.cloud_admin_project\n _net_name = extnet_info['extnet_name']\n _gateway = extnet_info['gateway']\n _cidr = extnet_info['cidr']\n _start_ip = extnet_info['start_ip']\n _end_ip = extnet_info['end_ip']\n\n if not ignore_privious:\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n output = json.loads(response.data)\n if output is None:\n LOG_OBJ.error(\"No response from server while getting\"\n \" networks.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Getting networks list Failed with status %s \" %\n response.status)\n return response.status\n\n for nets in output['networks']:\n if nets['router:external']:\n LOG_OBJ.info(\"External Network already created\")\n return\n\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _extnet_info = {\"network\": {\n \"tenant_id\": self.cloud_admin_info[\"project_id\"],\n \"name\": _net_name,\n \"router:external\": \"True\",\n \"admin_state_up\": True}}\n _body = json.dumps(_extnet_info)\n\n response = self.request(\"POST\", _url, _headers, _body)\n output = json.loads(response.data)\n if output is None:\n LOG_OBJ.error(\"No response from server while creating ext net.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Create ext network Failed with status %s \" %\n response.status)\n return response.status\n\n _ext_net_id = output['network']['id']\n LOG_OBJ.debug(\"External Network created successfully. ID:%s\" %\n _ext_net_id)\n\n # Creating External Subnet\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _ext_subnet_info = {\"subnet\": {\n \"ip_version\": 4,\n \"allocation_pools\": [{\"start\": _start_ip,\n \"end\": _end_ip}],\n \"gateway_ip\": _gateway,\n \"enable_dhcp\": \"False\",\n \"network_id\": _ext_net_id,\n \"tenant_id\": self.cloud_admin_info[\"project_id\"],\n \"cidr\": _cidr,\n \"name\": _net_name + \"-sub\"}}\n _body = json.dumps(_ext_subnet_info)\n output = self.request(\"POST\", _url, _headers, _body)\n if output is None:\n LOG_OBJ.error(\"No response from server while creating ext-subet\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Create subnet Failed with status %s \" %\n response.status)\n return response.status\n\n return _ext_net_id", "def local(self):\n return self.server.server_address", "def intranet_domain(self) -> str:\n return pulumi.get(self, \"intranet_domain\")", "def connected_endpoint(self):\n try:\n if self._connected_interface:\n return self._connected_interface\n except ObjectDoesNotExist:\n pass\n try:\n if self._connected_circuittermination:\n return self._connected_circuittermination\n except ObjectDoesNotExist:\n pass\n return None", "def north(self):\n return self.north_west.ns", "def test_external_networks(self):\n network_list = self.neutron_operations.find_networks(router_external=True)\n self.assertNotEqual(len(network_list), 0, \"No external networks found\")", "def get_connection(self):\n\n return self.REMOTE_CONNECTION", "def test_get_default_network(self):\n pass", "def bridgeIP(self):\r\n return self._bridgeIP", "def network_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_id\")" ]
[ "0.7765178", "0.7262986", "0.6995677", "0.6902639", "0.6894767", "0.67648536", "0.6673926", "0.66279644", "0.6617179", "0.6617179", "0.6617179", "0.64150774", "0.64139897", "0.6407208", "0.63487035", "0.6331511", "0.6266315", "0.6229477", "0.6182302", "0.6143386", "0.61390054", "0.61390054", "0.61199605", "0.60994834", "0.60911405", "0.6057097", "0.6045322", "0.5997323", "0.59958565", "0.5979683", "0.5959694", "0.5934507", "0.5919682", "0.5890233", "0.58738977", "0.58608425", "0.5842221", "0.58162946", "0.5815004", "0.5814106", "0.57993937", "0.5777344", "0.57601243", "0.57419086", "0.5730198", "0.5712786", "0.5684254", "0.56559694", "0.5633773", "0.5623119", "0.56170046", "0.56157327", "0.56068975", "0.5602106", "0.5597168", "0.5588772", "0.5587715", "0.5582617", "0.5581454", "0.5568138", "0.5556987", "0.55523425", "0.55523425", "0.554323", "0.55257463", "0.5525363", "0.5525363", "0.5525363", "0.5521836", "0.5507376", "0.5494546", "0.5494546", "0.5492214", "0.5489527", "0.5483873", "0.54669833", "0.54642916", "0.5460367", "0.5456026", "0.5453428", "0.5449167", "0.54471856", "0.54436475", "0.5442005", "0.54418135", "0.5439141", "0.54320186", "0.5430359", "0.5425375", "0.5417086", "0.5413796", "0.54055434", "0.53929895", "0.53899217", "0.5375524", "0.53676534", "0.53675246", "0.5364768", "0.5362844", "0.53484774" ]
0.8163727
0
Returns the cluster manager for the tenancy.
def cluster_manager(self): # Lazily instantiate the cluster manager the first time it is asked for. if not hasattr(self, '_cluster_manager'): if self._cluster_engine: self._cluster_manager = self._cluster_engine.create_manager( self._username, self._tenancy ) else: self._cluster_manager = None # If there is still no cluster manager, clusters are not supported if not self._cluster_manager: raise errors.UnsupportedOperationError( 'Clusters are not supported for this tenancy.' ) return self._cluster_manager
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetManager(self):\r\n\r\n return self.manager", "def cluster(self):\n return self._cluster", "def cluster(self):\n return self._cluster", "def get_manager(api_version=None):\n from manager import get_keystone_manager\n return get_keystone_manager(get_local_endpoint(), get_admin_token(),\n api_version)", "def get_cluster(self) -> 'AioCluster':\n return AioCluster(self)", "def getManager(self):\n return self._manager", "def management_cluster(self) -> Optional[pulumi.Input['PrivateCloudManagementClusterArgs']]:\n return pulumi.get(self, \"management_cluster\")", "def management_cluster(self) -> pulumi.Output['outputs.PrivateCloudManagementCluster']:\n return pulumi.get(self, \"management_cluster\")", "def is_mgr():\n if get_cluster_vendor() == \"sgi\":\n return sgi_cluster.is_sac()\n elif get_cluster_vendor() == \"ibm\": \n return ibm_cluster.is_xcat_mgr()\n\n return False", "def get_cluster_def():\n if settings.NO_OP:\n return None\n\n ensure_in_custer()\n\n cluster = os.getenv('POLYAXON_CLUSTER', None)\n try:\n return json.loads(cluster) if cluster else None\n except (ValueError, TypeError):\n print('Could get cluster definition, '\n 'please make sure this is running inside a polyaxon job.')\n return None", "def get_manager():\n return __manager__", "def management_cluster(self) -> pulumi.Input['PrivateCloudManagementClusterArgs']:\n return pulumi.get(self, \"management_cluster\")", "def get_manager():\n\n return multiprocessing.Manager()", "def get_manager(self, name):\n\n if name == \"control\":\n manager = self._control_manager\n elif name == \"alarm\":\n manager = self._alarm_manager\n elif name == \"state\":\n manager = self._machine_manager\n else:\n manager = self._function_manager\n\n return manager", "def mgmt_tool(self) -> MgmtClient:\n return self._mgmt_tool", "def _get_package_manager():\n\n cosmos_url = _get_cosmos_url()\n cosmos_manager = cosmospackage.Cosmos(cosmos_url)\n if cosmos_manager.enabled():\n return cosmos_manager\n else:\n msg = (\"This version of the DCOS CLI is not supported for your \"\n \"cluster. Please downgrade the CLI to an older version: \"\n \"https://dcos.io/docs/usage/cli/update/#downgrade\"\n )\n raise DCOSException(msg)", "def gke_cluster(self) -> Optional['outputs.MembershipEndpointGkeCluster']:\n return pulumi.get(self, \"gke_cluster\")", "def resource_type(self):\n return 'cluster'", "def init_mc(self) -> ManagedCluster:\n # Initialize a ManagedCluster object with mandatory parameter location.\n mc = self.models.ManagedCluster(\n location=self.context.get_location(),\n )\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def fetch_mc(self) -> ManagedCluster:\n mc = self.client.get(self.context.get_resource_group_name(), self.context.get_name())\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc", "def get_cluster_config(cohesity_client):\n config = cohesity_client.cluster.get_cluster()\n return config", "def get_cluster_id(options):\n cluster = options.cluster\n datacenter = get_datacenter(options)\n for item in datacenter.hostFolder.childEntity:\n if (item.name == cluster):\n return item._GetMoId()", "def getFeatureManager(address=None):\n return __mgr_cache__[address]", "def get_cluster(self,cluster_name,project_id=''):\n print( f'>>>>>>{self.project_id}')\n if project_id == '':\n project_id = self.project_id\n return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))", "def existing_mc(self) -> ManagedCluster:\n return self.__existing_mc", "def version_cluster(self):\n response = self._request_call('/version')\n return response.version_etcdcluster", "def get_cluster(self, label):\n try:\n return self._clusters[label]\n except KeyError:\n return None", "def get_cluster(self, profile):\n if self._value.has_option(profile, 'cluster'):\n if self._value.has_option(profile, 'cluster'):\n cluster = self._value.get(profile, 'cluster')\n self.logger.info(\"Connecting to: %s cluster\" % cluster)\n else:\n self.logger.error(\n \"No cluster parameter found\"\n )\n exit(1)\n else:\n self.logger.error(\n \"No profile found. Please define a default profile, \\\n or specify a named profile using `--profile`\"\n )\n exit(1)\n return cluster", "def get_clusterer(name, kwargs):\n\n if name == 'KMeans':\n from sklearn.cluster import KMeans\n return KMeans(**kwargs)\n if name == 'MiniBatchKMeans':\n from sklearn.cluster import MiniBatchKMeans\n return MiniBatchKMeans(**kwargs)", "def manager(self):\n if \"manager\" in self._prop_dict:\n if isinstance(self._prop_dict[\"manager\"], OneDriveObjectBase):\n return self._prop_dict[\"manager\"]\n else :\n self._prop_dict[\"manager\"] = DirectoryObject(self._prop_dict[\"manager\"])\n return self._prop_dict[\"manager\"]\n\n return None", "def get_entity_manager(self):\n return self.game.entity_manager", "def _get_cluster_list(self):\n return self.__cluster_list", "def petsc_manager():\n return PetscManager()", "def get_cluster_name(cls):\n\n mid = Machineid()\n if mid.is_sps_cluster:\n return cls.SPS\n if mid.is_spts_cluster:\n return cls.SPTS\n if mid.is_mdfl_cluster:\n return cls.MDFL\n\n return cls.LOCAL", "def get_cluster_command(cls):\n if 'cluster_command' in cls.global_settings:\n return cls.global_settings['cluster_command']\n else:\n return None", "def _get_monasca_client(self):\n\n monasca_url = self.token_helper.get_service_endpoint('monitoring')\n keystone_url = self.token_helper.get_service_endpoint('identity') + 'v3'\n # All monasca data is stored in the admin project, so get a token\n # to that project\n token = self.token_helper.get_token_for_project('admin')\n\n return client.Client(api_version=api_version,\n endpoint=monasca_url,\n token=token,\n auth_url=keystone_url,\n project_name='admin',\n project_domain_name='Default',\n insecure=get_conf(\"insecure\"),\n user_agent=api.USER_AGENT)", "def fusion_api_get_fabric_manager(self, uri=None, param='', api=None, headers=None):\n return self.fabricmanager.get(uri=uri, api=api, headers=headers, param=param)", "def get_cluster_id(self):\n cmd = \"svcinfo lscluster -delim :\"\n\n output = self._svc_command(cmd)[0]\n\n if len(output) != 2:\n return None\n\n header = output[0].split(':')\n values = output[1].split(':')\n index = header.index(SVC_CLUSTER_ID)\n cluster_id = values[index]\n return cluster_id", "def getBestCluster():\r\n global bestCluster\r\n return bestCluster", "def get_mgr(cls, id):\n assert id in cls.s_memory_mgrs, 'invalid id[%s] for memory managers' % (\n id)\n return cls.s_memory_mgrs[id]", "def cluster_type(self) -> str:\n return pulumi.get(self, \"cluster_type\")", "def cluster_id(self):\n return self._cluster_id", "def plugins_get_mgr():\n global pluginmgr\n return pluginmgr", "def get_cluster_entry(self):\n\n cert_data = self.cluster_description.get(\"certificateAuthority\", {}).get(\"data\", \"\")\n endpoint = self.cluster_description.get(\"endpoint\")\n arn = self.cluster_description.get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])", "def clustering(self) -> 'outputs.ClusteringResponse':\n return pulumi.get(self, \"clustering\")", "def get_component_manager(\n token: str = Depends(get_api_token),\n) -> ComponentOperations:\n session = BaseUrlSession(base_url=CONTAXY_API_ENDPOINT)\n session.headers = {\"Authorization\": f\"Bearer {token}\"}\n return ComponentClient(session)", "def new_manager() -> SyncManager:\n return Manager()", "def cluster_name(self):\n return self.name", "def get_cluster_token(\n self,\n manager_name,\n role,\n ):\n # Gets the node IP address.\n ip = self.get_node_ip(manager_name)\n\n # Gets the token.\n token = docker_utils.swarm_token(\n role=role,\n hostname=ip,\n ssh_port=SSH_PORT,\n ssh_username=self.get_ssh_username(manager_name),\n ssh_private_key_file=self.get_ssh_private_key_file(manager_name),\n executor=manager_name,\n logger=self._logger,\n )\n\n return token", "def get_instance(cls):\n global DNS_MANAGER_API\n if not DNS_MANAGER_API:\n DNS_MANAGER_API = cls()\n return DNS_MANAGER_API", "def fusion_api_get_hypervisor_manager(self, uri=None, param='', api=None, headers=None):\n return self.hypervisor_mgr.get(uri=uri, api=api, headers=headers, param=param)", "def poll_cluster(self, server, obj, name):\n\n return self._poll_group('cluster', server, obj, name)", "def get_data_manager(self):\n\n return self._data_manager", "def get_metadata_manager(config):\n\n context = config.contextualization_type\n metadata_manager_class = '%sMetadataManager' % context\n if not (metadata_manager_class in globals()):\n raise NotImplementedError('Implementation for %s not available' % context)\n return (globals()[metadata_manager_class])(config)", "def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")", "def getProxyManager(address=None):\n return __mgr_cache__[address]", "def GetOwnerManager(self):\r\n\r\n return self._owner_mgr", "def region_instance_group_manager(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region_instance_group_manager\")", "def _tenant_network(self):\n port = self._connection.network.ports.find_by_device_owner('network:router_interface')\n if port:\n return self._connection.network.networks.get(port.network_id)\n else:\n raise errors.ImproperlyConfiguredError('Could not find tenancy network')", "def clusters(self,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n project_id = project_id if project_id != '' else self.__project_id\n return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))", "def get_collection_manager(self, *args, **kwargs):\n return CollectionManager(self, *args, **kwargs)", "def resource_manager():\n return visa.ResourceManager()", "def _FindPaneManager(self):\n event = aui.AuiManagerEvent(aui.wxEVT_AUI_FIND_MANAGER)\n self.ProcessEvent(event)\n return event.GetManager()", "def get_instance_group_manager(self, name, zone):\n return self.call_api('/zones/%s/instanceGroupManagers/%s' % (zone, name))", "def fusion_api_get_san_manager(self, uri=None, param='', api=None, headers=None):\n return self.dm.get(uri=uri, api=api, headers=headers, param=param)", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def get_novaclient(self):\n # TODO: We ought to be able to derive this from the keystone client,\n # but it's proving trickier than I expected --isd\n return novaclient.Client(self.cluster_account.cluster_user_name,\n self.cluster_account.cluster_password,\n self.name,\n self.cluster_account.cluster.auth_url)", "def fusion_api_get_deployment_manager(self, uri=None, param='', api=None, headers=None):\n return self.dep_mgr.get(uri=uri, api=api, headers=headers, param=param)", "def cluster(self, cluster_id, serve_nodes=3):\n return Cluster(cluster_id, self, serve_nodes=serve_nodes)", "def get_clusters(self):\n\n return self.__clusters", "def get_clusters(self):\n return self._clusters", "def get_coe_cluster(self, name_or_id, filters=None):\n return _utils._get_entity(self, 'coe_cluster', name_or_id, filters)", "def cluster_constraint(self):\n return self._cluster_constraint", "def internal_global_clustering(self, node_list):\n clustering = self.local_clustering()\n internal_clustering = clustering[node_list].mean()\n return internal_clustering", "def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body", "def get_clusters(self):\r\n\r\n return self.__clusters", "def GetAuiManager(self):\r\n\r\n return self._mgr", "def load(name):\n\n clovr = pymongo.Connection().clovr\n clusters = clovr.clusters\n instances = clovr.instances\n \n cluster = clusters.find_one(dict(name=name))\n if not cluster:\n raise ClusterDoesNotExist(name)\n\n\n return cluster", "def get_controller(self):\n node_id, _host, _port, _rack = self.client.cluster.controller\n return node_id", "def get_cluster(t2_url, t2_token, id):\n response = requests.get(f\"{t2_url}/api/clusters/{id}\", headers={ \"t2-token\": t2_token })\n if(response.status_code != 200):\n log(f\"API call to get cluster returned error code {response.status_code}\")\n return None\n return response.json()", "def serveradmin(self):\n return self._sdk_dependencies.administration_client", "def get_one_cluster_by_name(ctx, cluster_name, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].get()\n pprint(cluster.data)", "def get_cataloging_rules_manager(self):\n raise Unimplemented()", "def get_cluster_name(admin_socket):\n\n m = ADMIN_SOCKET_PATTERN.match(admin_socket)\n name = None\n if m:\n name = m.group(1)\n return name", "def get_cluster_config(\n cluster_type: str,\n cluster_name: str | None = None,\n kafka_topology_base_path: str | None = None,\n) -> ClusterConfig:\n if not kafka_topology_base_path:\n config_dirs = get_conf_dirs()\n else:\n config_dirs = [kafka_topology_base_path]\n\n topology = None\n for config_dir in config_dirs:\n try:\n topology = TopologyConfiguration(\n cluster_type,\n config_dir,\n )\n except MissingConfigurationError:\n pass\n if not topology:\n raise MissingConfigurationError(\n f\"No available configuration for type {cluster_type}\",\n )\n\n if cluster_name:\n return topology.get_cluster_by_name(cluster_name)\n else:\n return topology.get_local_cluster()", "def region_instance_group_manager(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"region_instance_group_manager\")", "def region_instance_group_manager(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"region_instance_group_manager\")", "def get_master_node_by_role(self, role_name, excluded_nodes_fqdns=()):\n nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, role_name)\n nodes = [node for node in nodes\n if node['fqdn'] not in set(excluded_nodes_fqdns)]\n with self.fuel_web.get_ssh_for_nailgun_node(nodes[0]) as remote:\n stdout = remote.check_call(\n 'pcs status cluster | grep \"Current DC:\"')[\"stdout\"][0]\n for node in nodes:\n if node['fqdn'] in stdout:\n return node", "def get_cluster_idx(_cluster):\n\n return _cluster.cluster_idx", "def get_task_manager(task_manager=None):\n global _task_manager\n if _task_manager is None:\n if task_manager is None:\n _task_manager = TaskManagerImpl()\n else:\n constructor = dynamic_import(task_manager)\n _task_manager = constructor()\n\n return _task_manager", "def create_manager(self, username, tenancy):\n raise NotImplementedError", "def getServiceManager( cHost=\"localhost\", cPort=\"8100\" ):\n global goServiceManager\n if not goServiceManager:\n # Get the uno component context from the PyUNO runtime\n oLocalContext = uno.getComponentContext()\n # Create the UnoUrlResolver on the Python side.\n oLocalResolver = oLocalContext.ServiceManager.createInstanceWithContext(\n \"com.sun.star.bridge.UnoUrlResolver\", oLocalContext )\n # Connect to the running OpenOffice.org and get its context.\n oContext = oLocalResolver.resolve( \"uno:socket,host=\" + cHost + \",port=\" + cPort + \";urp;StarOffice.ComponentContext\" )\n # Get the ServiceManager object\n goServiceManager = oContext.ServiceManager\n return goServiceManager", "def get_ca(user='keystone', group='keystone'):\n if not ssl.CA_SINGLETON:\n ensure_ssl_dir()\n d_name = '_'.join(SSL_CA_NAME.lower().split(' '))\n ca = ssl.JujuCA(name=SSL_CA_NAME, user=user, group=group,\n ca_dir=os.path.join(SSL_DIR,\n '%s_intermediate_ca' % d_name),\n root_ca_dir=os.path.join(SSL_DIR,\n '%s_root_ca' % d_name))\n\n # Ensure a master is elected. This should cover the following cases:\n # * single unit == 'oldest' unit is elected as master\n # * multi unit + not clustered == 'oldest' unit is elcted as master\n # * multi unit + clustered == cluster leader is elected as master\n ensure_ssl_cert_master()\n\n ssl.CA_SINGLETON.append(ca)\n\n return ssl.CA_SINGLETON[0]", "def get_jobqueue_cluster(walltime='12:00', ncpus=1, cores=1, local_directory=None, memory='15GB', env_extra=None, **kwargs):\n import dask\n # this is necessary to ensure that workers get the job script from stdin\n dask.config.set({\"jobqueue.lsf.use-stdin\": True})\n from dask_jobqueue import LSFCluster\n import os\n\n if env_extra is None:\n env_extra = [\n \"export NUM_MKL_THREADS=1\",\n \"export OPENBLAS_NUM_THREADS=1\",\n \"export OPENMP_NUM_THREADS=1\",\n \"export OMP_NUM_THREADS=1\",\n ]\n\n if local_directory is None:\n local_directory = '/scratch/' + os.environ['USER'] + '/'\n\n cluster = LSFCluster(queue='normal',\n walltime=walltime,\n ncpus=ncpus,\n cores=cores,\n local_directory=local_directory,\n memory=memory,\n env_extra=env_extra,\n job_extra=[\"-o /dev/null\"],\n **kwargs)\n return cluster" ]
[ "0.66724825", "0.665754", "0.665754", "0.66434646", "0.663374", "0.6481853", "0.6345078", "0.6344062", "0.62795883", "0.6278968", "0.62144953", "0.60009164", "0.59885573", "0.5977996", "0.59189504", "0.5905458", "0.5904256", "0.5867709", "0.5859221", "0.58005214", "0.5737495", "0.5717247", "0.57003874", "0.568488", "0.56325376", "0.5597151", "0.55655074", "0.5547065", "0.5533378", "0.5509962", "0.5509673", "0.5497298", "0.5490301", "0.54409605", "0.5437669", "0.5428299", "0.54264885", "0.54151964", "0.54048336", "0.5380418", "0.5380299", "0.5364102", "0.5348269", "0.5336938", "0.5333733", "0.53309715", "0.531205", "0.5289903", "0.5260352", "0.52585876", "0.52584773", "0.5253463", "0.5252752", "0.5246122", "0.5243525", "0.5236996", "0.5236996", "0.5233742", "0.5225212", "0.52139443", "0.52128285", "0.5211931", "0.5208308", "0.52072734", "0.520462", "0.52025616", "0.5199387", "0.5198383", "0.5198383", "0.5198383", "0.5198383", "0.5198383", "0.5183978", "0.518318", "0.5179094", "0.5167594", "0.5167272", "0.5162519", "0.51552093", "0.5153495", "0.5138454", "0.51378196", "0.5137075", "0.5132422", "0.5129534", "0.5128421", "0.51275975", "0.5119677", "0.511646", "0.51160204", "0.5112944", "0.51080686", "0.5106866", "0.5102639", "0.51004577", "0.5098529", "0.50925636", "0.5088137", "0.5082265", "0.5063543" ]
0.85531485
0
Fix up the cluster with any OpenStackspecific changes.
def _fixup_cluster(self, cluster): # Remove injected parameters from the cluster params params = { k: v for k, v in cluster.parameter_values.items() if k != 'cluster_network' } # Add any tags attached to the stack try: stack = self._connection.orchestration.stacks.find_by_stack_name(cluster.name) except rackit.NotFound: stack = None # We use this format because tags might exist on the stack but be None stack_tags = tuple(getattr(stack, 'tags', None) or []) original_error = (cluster.error_message or '').lower() # Convert quota-related error messages based on known OpenStack errors if any(m in original_error for m in {'quota exceeded', 'exceedsavailablequota'}): if 'floatingip' in original_error: error_message = ( 'Could not find an external IP for deployment. ' 'Please ensure an external IP is available and try again.' ) else: error_message = ( 'Requested resources exceed at least one quota. ' 'Please check your tenancy quotas and try again.' ) elif cluster.error_message: error_message = ( 'Error during cluster configuration. ' 'Please contact support.' ) else: error_message = None return cluster._replace( parameter_values = params, tags = cluster.tags + stack_tags, error_message = error_message )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def two_clusters_reconfiguration(self):\n\n self.show_step(1)\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(2)\n cluster_id_1 = self.fuel_web.create_cluster(\n name=\"env1\",\n mode=settings.DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": settings.NEUTRON_SEGMENT_TYPE,\n }\n )\n cluster_id_2 = self.fuel_web.create_cluster(\n name=\"env2\",\n mode=settings.DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": settings.NEUTRON_SEGMENT_TYPE,\n }\n )\n\n self.fuel_web.update_nodes(\n cluster_id_1,\n {\n 'slave-01': ['compute'],\n 'slave-02': ['controller']\n })\n\n self.fuel_web.update_nodes(\n cluster_id_2,\n {\n 'slave-03': ['compute'],\n 'slave-04': ['controller']\n })\n\n networks_1 = self.fuel_web.client.get_networks(\n cluster_id_1)[\"networks\"]\n self.change_default_range(networks_1,\n number_excluded_ips=30,\n cut_from_start=True)\n helpers.wait(lambda: not self.is_update_dnsmasq_running(\n self.fuel_web.client.get_tasks()), timeout=60,\n timeout_msg=\"Timeout exceeded while waiting for task \"\n \"'update_dnsmasq' is finished!\")\n floating_list = [self.fuel_web.get_floating_ranges()[0][0]]\n networking_parameters = {\n \"floating_ranges\": floating_list}\n self.fuel_web.client.update_network(\n cluster_id_1,\n networks=networks_1,\n networking_parameters=networking_parameters\n )\n\n networks_2 = self.fuel_web.client.get_networks(\n cluster_id_2)[\"networks\"]\n self.change_default_range(networks_2,\n number_excluded_ips=30,\n cut_from_start=False)\n helpers.wait(lambda: not self.is_update_dnsmasq_running(\n self.fuel_web.client.get_tasks()), timeout=60,\n timeout_msg=\"Timeout exceeded while waiting for task \"\n \"'update_dnsmasq' is finished!\")\n floating_list = [self.fuel_web.get_floating_ranges()[0][1]]\n\n vlan_range_1 = self.fuel_web.client.get_networks(\n cluster_id_1)[\"networking_parameters\"][\"vlan_range\"]\n vlan_range_2 = [vlan_range_1[-1] + 1, vlan_range_1[-1] + 31]\n\n networking_parameters = {\n \"floating_ranges\": floating_list,\n \"vlan_range\": vlan_range_2}\n self.fuel_web.client.update_network(\n cluster_id_2,\n networks=networks_2,\n networking_parameters=networking_parameters\n )\n self.show_step(3)\n self.fuel_web.verify_network(cluster_id_1)\n self.show_step(4)\n self.fuel_web.verify_network(cluster_id_2)\n self.show_step(5)\n self.fuel_web.deploy_cluster_wait(cluster_id_1, check_services=False)\n self.show_step(6)\n self.fuel_web.run_ostf(cluster_id=cluster_id_1)\n self.show_step(7)\n self.fuel_web.deploy_cluster_wait(cluster_id_2, check_services=False)\n self.show_step(8)\n self.fuel_web.run_ostf(cluster_id=cluster_id_2)\n\n self.show_step(9)\n config_new = utils.get_config_template('nova_cpu')\n structured_config = get_structured_config_dict(config_new)\n self.fuel_web.client.upload_configuration(config_new,\n cluster_id_1,\n role=\"controller\")\n\n service_name = \"nova-scheduler\"\n\n controller_env_1 = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id_1, ['controller'])\n controller_env_2 = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id_2, ['controller'])\n uptimes = self.get_service_uptime(controller_env_1, service_name)\n task = self.fuel_web.client.apply_configuration(cluster_id_1,\n role=\"controller\")\n\n self.show_step(10)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(11)\n self.check_service_was_restarted(controller_env_1,\n uptimes,\n service_name)\n\n self.show_step(12)\n self.check_config_on_remote(controller_env_1, structured_config)\n\n self.show_step(13)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id_1))\n\n self.check_overcommit_ratio(os_conn, cluster_id_1)\n\n self.show_step(14)\n config_revert = utils.get_config_template('nova_cpu_old')\n structured_config_revert = get_structured_config_dict(config_revert)\n self.fuel_web.client.upload_configuration(config_revert,\n cluster_id_2,\n role=\"controller\")\n uptimes = self.get_service_uptime(controller_env_2, service_name)\n task = self.fuel_web.client.apply_configuration(cluster_id_2,\n role=\"controller\")\n self.show_step(15)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(16)\n self.check_service_was_restarted(controller_env_2,\n uptimes,\n service_name)\n\n self.show_step(17)\n self.check_config_on_remote(controller_env_2,\n structured_config_revert)\n\n self.env.make_snapshot(\"two_clusters_reconfiguration\")", "def test_update_hyperflex_cluster(self):\n pass", "def reconfiguration_scalability(self):\n\n self.check_run('reconfiguration_scalability')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_nova_ephemeral_disk\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n config = utils.get_config_template('nova_disk')\n structured_config_nova = get_structured_config_dict(config)\n config = utils.get_config_template('keystone')\n structured_config_keystone = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='controller')\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='controller')\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(4)\n self.check_config_on_remote(controllers, structured_config_keystone)\n\n self.show_step(5)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n time_expiration = config[\n 'keystone_config']['token/expiration']['value']\n self.check_token_expiration(os_conn, time_expiration)\n\n self.show_step(6)\n bs_nodes = [x for x in self.env.d_env.get_nodes()\n if x.name == 'slave-05' or x.name == 'slave-06']\n self.env.bootstrap_nodes(bs_nodes)\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-05': ['compute', 'cinder']})\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-06': ['controller']})\n\n self.show_step(7)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(8)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(9)\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.show_step(10)\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_controller = [x for x in controllers\n if 'slave-06' in x['name']]\n target_compute = [x for x in computes\n if 'slave-05' in x['name']]\n self.check_config_on_remote(target_controller,\n structured_config_keystone)\n\n self.show_step(11)\n self.check_config_on_remote(target_compute, structured_config_nova)\n\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.show_step(16)\n\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n hypervisor_name = target_compute[0]['fqdn']\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.show_step(17)\n self.check_token_expiration(os_conn, time_expiration)\n\n self.env.make_snapshot(\"reconfiguration_scalability\", is_make=True)", "def apply_maintenance_update(self):\n logger.info(\"Applying maintenance updates on master node\")\n self.env.admin_install_updates()\n\n logger.info(\"Applying maintenance updates on slaves\")\n slaves_mu_script_url = (\n \"https://github.com/Mirantis/tools-sustaining/\"\n \"raw/master/scripts/mos_apply_mu.py\")\n\n path_to_mu_script = \"/tmp/mos_apply_mu.py\"\n\n with self.env.d_env.get_admin_remote() as remote:\n remote.check_call(\"wget {uri} -O {path}\".format(\n uri=slaves_mu_script_url,\n path=path_to_mu_script)\n )\n\n remote.check_call(\n \"python {path} \"\n \"--env-id={identifier} \"\n \"--user={username} \"\n \"--pass={password} \"\n \"--tenant={tenant_name} --update\".format(\n path=path_to_mu_script,\n identifier=self.cluster_id,\n **conf.KEYSTONE_CREDS\n )\n )\n\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, roles=['controller', ])\n\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, roles=['compute', ])\n\n logger.info(\"Restarting all OpenStack services\")\n\n logger.info(\"Restarting services on controllers\")\n ha_services = (\n \"p_heat-engine\",\n \"p_neutron-plugin-openvswitch-agent\",\n \"p_neutron-dhcp-agent\",\n \"p_neutron-metadata-agent\",\n \"p_neutron-l3-agent\")\n non_ha_services = (\n \"heat-api-cloudwatch\",\n \"heat-api-cfn\",\n \"heat-api\",\n \"cinder-api\",\n \"cinder-scheduler\",\n \"nova-objectstore\",\n \"nova-cert\",\n \"nova-api\",\n \"nova-consoleauth\",\n \"nova-conductor\",\n \"nova-scheduler\",\n \"nova-novncproxy\",\n \"neutron-server\",\n )\n for controller in controllers:\n with self.fuel_web.get_ssh_for_nailgun_node(\n controller) as remote:\n for service in ha_services:\n remote_ops.manage_pacemaker_service(remote, service)\n for service in non_ha_services:\n remote_ops.manage_service(remote, service)\n\n logger.info(\"Restarting services on computes\")\n compute_services = (\n \"neutron-plugin-openvswitch-agent\",\n \"nova-compute\",\n )\n for compute in computes:\n with self.fuel_web.get_ssh_for_nailgun_node(compute) as remote:\n for service in compute_services:\n remote_ops.manage_service(remote, service)", "def test_patch_hyperflex_cluster(self):\n pass", "def test_replace_cluster_network(self):\n pass", "def reconfigure(self, consensus=None):\r\n pass", "def reconfigure_with_new_fields(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config_controller = utils.get_config_template('new_fields_controller')\n structured_config = get_structured_config_dict(config_controller)\n self.fuel_web.client.upload_configuration(config_controller,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n service_list = ['neutron-server', 'neutron-dhcp-agent',\n 'neutron-l3-agent', 'neutron-metadata-agent',\n 'nova-scheduler', 'nova-novncproxy', 'nova-conductor',\n 'nova-api', 'nova-consoleauth', 'nova-cert']\n services_uptime = {}\n for service_name in service_list:\n services_uptime[service_name] = self.get_service_uptime(\n controllers, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n for service_name in service_list:\n self.check_service_was_restarted(\n controllers,\n services_uptime[service_name],\n service_name)\n\n self.show_step(6)\n self.check_config_on_remote(controllers, structured_config)\n\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n\n self.show_step(7)\n config_copmute = utils.get_config_template('new_fields_compute')\n structured_config = get_structured_config_dict(config_copmute)\n self.fuel_web.client.upload_configuration(config_copmute,\n cluster_id,\n role='compute')\n\n self.show_step(8)\n uptimes_nova = self.get_service_uptime(computes, 'nova-compute')\n\n self.show_step(9)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='compute')\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(10)\n self.check_service_was_restarted(computes,\n uptimes_nova,\n 'nova-compute')\n\n self.show_step(11)\n self.check_config_on_remote(computes, structured_config)\n self.env.make_snapshot(\"reconfigure_with_new_fields\")", "def test_patch_cluster_network(self):\n pass", "def deploy():\n update_treesheets()\n restart_treesheets()", "def preservation_config_after_reset_and_preconfigured_deploy(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_ml2_vlan_range\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n self.fuel_web.stop_reset_env_wait(cluster_id)\n\n self.show_step(3)\n config = utils.get_config_template('nova_cpu')\n structured_config_nova = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='controller')\n config = utils.get_config_template('neutron')\n structured_config_neutron = get_structured_config_dict(config)\n\n self.show_step(4)\n self.fuel_web.wait_nodes_get_online_state(\n self.env.d_env.nodes().slaves[:4], timeout=10 * 60)\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(5)\n self.fuel_web.run_ostf(\n cluster_id=cluster_id)\n\n self.show_step(6)\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n structured_config = {}\n structured_config.update(structured_config_neutron)\n structured_config.update(structured_config_nova)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(7)\n self.show_step(8)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n self.check_ml2_vlan_range(os_conn)\n\n self.show_step(9)\n self.show_step(10)\n self.check_overcommit_ratio(os_conn, cluster_id)\n\n snapshot = \"preservation_config_after_reset_and_preconfigured_deploy\"\n self.env.make_snapshot(snapshot, is_make=True)", "def basic_env_for_reconfiguration(self):\n snapshot_name = 'basic_env_for_reconfiguration'\n self.check_run(snapshot_name)\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(1, initialize=True)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=settings.DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": settings.NEUTRON_SEGMENT_TYPE,\n }\n )\n self.show_step(2)\n self.show_step(3)\n\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['compute'],\n 'slave-02': ['controller'],\n 'slave-03': ['controller'],\n 'slave-04': ['controller']\n })\n\n self.show_step(4)\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n\n self.show_step(5)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(6)\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.env.make_snapshot(\"basic_env_for_reconfiguration\", is_make=True)", "def createcluster(self):\n for hostitem in OTHER_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n alive = str(REMAINING_NODES)[1:-1]\n print \"{}\\nThe following nodes are alive in cluster:{}\\n {}\".format(\n RED, WHITE, alive)\n print \"\\n\\nTo boostrap a new cluster you need to switch them off\\n\"\n os.sys.exit(1)\n else:\n if self.mode == \"new\" and not self.force:\n ask('\\nThis operation will destroy the local data')\n clean_dir(self.datadir)\n initialize_mysql(self.datadir)\n bootstrap_mysql(self.mode)\n if self.mode == \"new\":\n create_monitor_table()\n ALL_NODES.append(\"localhost\")\n for creditem in CREDENTIALS:\n create_users(creditem)\n print \"\"\n drop_anonymous()", "def cluster_reboot(cluster):\n map(reboot, cluster)", "def test_replace(self):\n disco = create_disco()\n node1 = create_node(\"somewhere\")\n node2 = create_node(\"somewhere2\")\n node3 = create_node(\"somewhere3\")\n node4 = create_node(\"somewhere4\")\n disco.onMessage(None, NodeActive(node1))\n disco.onMessage(None, NodeActive(node2))\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node3, node4]))\n self.assertEqual(knownNodes(disco, \"myservice\", \"sandbox\"), [node3, node4])", "def setup(self, cluster):\n raise NotImplementedError()", "def multiple_repair_test(self):\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n session = self.patient_cql_connection(node1)\n create_ks(session, 'ks', 3)\n create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})\n\n debug(\"insert data\")\n\n insert_c1c2(session, keys=range(1, 50), consistency=ConsistencyLevel.ALL)\n node1.flush()\n\n debug(\"bringing down node 3\")\n node3.flush()\n node3.stop(gently=False)\n\n debug(\"inserting additional data into node 1 and 2\")\n insert_c1c2(session, keys=range(50, 100), consistency=ConsistencyLevel.TWO)\n node1.flush()\n node2.flush()\n\n debug(\"restarting and repairing node 3\")\n node3.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node3.repair()\n else:\n node3.nodetool(\"repair -par -inc\")\n\n # wait stream handlers to be closed on windows\n # after session is finished (See CASSANDRA-10644)\n if is_win:\n time.sleep(2)\n\n debug(\"stopping node 2\")\n node2.stop(gently=False)\n\n debug(\"inserting data in nodes 1 and 3\")\n insert_c1c2(session, keys=range(100, 150), consistency=ConsistencyLevel.TWO)\n node1.flush()\n node3.flush()\n\n debug(\"start and repair node 2\")\n node2.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node2.repair()\n else:\n node2.nodetool(\"repair -par -inc\")\n\n debug(\"replace node and check data integrity\")\n node3.stop(gently=False)\n node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5', 9042))\n cluster.add(node5, False)\n node5.start(replace_address='127.0.0.3', wait_other_notice=True)\n\n assert_one(session, \"SELECT COUNT(*) FROM ks.cf LIMIT 200\", [149])", "def fail_without_replace_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3)\n node1, node2, node3 = cluster.nodelist()\n cluster.seeds.remove(node3)\n NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')\n if DISABLE_VNODES:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 1})\n else:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': NUM_TOKENS})\n cluster.start()\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n mark = None\n for auto_bootstrap in (True, False):\n debug(\"Stopping node 3.\")\n node3.stop(gently=False)\n\n # completely delete the data, commitlog, and saved caches\n for d in chain([os.path.join(node3.get_path(), \"commitlogs\")],\n [os.path.join(node3.get_path(), \"saved_caches\")],\n node3.data_directories()):\n if os.path.exists(d):\n rmtree(d)\n\n node3.set_configuration_options(values={'auto_bootstrap': auto_bootstrap})\n debug(\"Starting node 3 with auto_bootstrap = {val}\".format(val=auto_bootstrap))\n node3.start(wait_other_notice=False)\n node3.watch_log_for('Use cassandra.replace_address if you want to replace this node', from_mark=mark, timeout=20)\n mark = node3.mark_log()", "def cluster_regenerate(self):\n\n self._client.post(\n \"{}/regenerate\".format(LKECluster.api_endpoint), model=self\n )", "def test_patch_cluster_role(self):\n pass", "def fin():\n result_list = list()\n result_list.append(\n ll_clusters.updateCluster(\n positive=True,\n cluster=conf.CLUSTER_NAME[0],\n scheduling_policy=conf.POLICY_NONE\n )\n )\n result_list.append(\n ll_sch_policies.remove_scheduling_policy(\n policy_name=conf.AFFINITY_POLICY_NAME\n )\n )\n assert all(result_list)", "def test_replace_cluster_role(self):\n pass", "def cluster_shutdown():\n map(shutdown, cluster)", "def update_clusters(examples, cluster_averages):\n # find the closest cluster and move the example to that one\n for example in examples:\n example.type = closest_cluster(cluster_averages, example)", "def update(self, force, verbose):\n\n # Print the job config diffs\n print('Update Peloton cluster \"%s\" to new config: ' % self.name)\n for app in self.apps:\n self.diff_config(app, verbose)\n\n if not force and not yesno(\"Proceed with the update ?\"):\n return\n\n updated_apps = []\n for app in self.apps:\n updated_apps.append(app)\n if not app.update_or_create_job(update_callback):\n # Rollback the updates for all apps that have been updated\n self.rollback(updated_apps)\n return False\n\n return True", "def patch_cluster(self, cluster, *args, **kwargs):\n raise NotImplementedError", "def cleanup(self) -> None:\n try:\n self._cluster_client.delete_cluster(\n request={\n 'project_id': self.cluster_metadata.project_id,\n 'region': self.cluster_metadata.region,\n 'cluster_name': self.cluster_metadata.cluster_name,\n })\n except Exception as e:\n if e.code == 403:\n _LOGGER.error(\n 'Due to insufficient project permissions, '\n 'unable to clean up the default cluster: %s',\n self.cluster_metadata.cluster_name)\n raise ValueError(\n 'You cannot delete a cluster in project: {}'.format(\n self.cluster_metadata.project_id))\n elif e.code == 404:\n _LOGGER.error(\n 'Cluster does not exist: %s', self.cluster_metadata.cluster_name)\n raise ValueError(\n 'Cluster was not found: {}'.format(\n self.cluster_metadata.cluster_name))\n else:\n _LOGGER.error(\n 'Failed to delete cluster: %s', self.cluster_metadata.cluster_name)\n raise e", "def cleanup_cluster(self, cluster):\n self.log.info(\"removing xdcr/nodes settings\")\n rest = RestConnection(cluster.get_master_node())\n rest.remove_all_replications()\n rest.remove_all_remote_clusters()\n rest.remove_all_recoveries()\n cluster.cleanup_cluster(\"upgradeXDCR\")", "def update_all(self):\n self.update_head_node_ip()\n self.get_database_info()\n self.update_users()", "def dvs_update_network(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n self.show_step(3)\n os_conn.neutron.update_network(net_1[\"id\"],\n {\"network\": {\"name\": 'net_2'}})\n\n assert_true(os_conn.get_network('net_2')['id'] == net_1['id'])\n\n self.show_step(4)\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n os_conn.neutron.update_network(\n default_net.id, {\"network\": {\"name\": 'spring'}})\n\n assert_true(os_conn.get_network('spring')['id'] == default_net.id)", "def _rename_clusters(self):\n all_clusters = []\n temp_clusters = self._clusters.copy()\n for clu in temp_clusters:\n all_clusters.append(self._clusters.pop(clu))\n idx = 0\n for clu in all_clusters:\n label = 'S' + str(idx)\n clu.rename(label)\n self._clusters[label] = clu\n idx += 1", "def multiple_apply_config(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfiguration_scalability\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_compute = computes[0]\n config = utils.get_config_template('nova_disk')\n structured_config_old = get_structured_config_dict(config)\n\n config['nova_config'][\n 'DEFAULT/default_ephemeral_format']['value'] = 'ext3'\n structured_config_new = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n node_id=target_compute['id'])\n\n self.show_step(3)\n service_name = 'nova-compute'\n uptimes = self.get_service_uptime([target_compute], service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(\n cluster_id,\n node_id=target_compute['id'])\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n self.check_service_was_restarted([target_compute],\n uptimes, service_name)\n\n self.show_step(6)\n for compute in computes:\n if compute == target_compute:\n self.check_config_on_remote([compute], structured_config_new)\n target_hypervisor_name = compute['fqdn']\n else:\n hypervisor_name = compute['fqdn']\n self.check_config_on_remote([compute], structured_config_old)\n\n self.show_step(7)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=target_hypervisor_name,\n fs_type='ext3')\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.env.make_snapshot(\"multiple_apply_config\")", "def __init_cluster(self, cluster):\n self.___init_nodes(cluster)\n self.__clusterop.async_rebalance(\n cluster.get_nodes(),\n cluster.get_nodes()[1:],\n []).result()", "def test_replaceIsEnvironmentSpecific(self):\n node = create_node(\"somewhere\", \"myservice\", \"env1\")\n node2 = create_node(\"somewhere2\", \"myservice\", \"env2\")\n node3 = create_node(\"somewhere3\", \"myservice\", \"env2\")\n disco = create_disco()\n disco.onMessage(None, NodeActive(node))\n disco.onMessage(None, NodeActive(node2))\n disco.onMessage(None, ReplaceCluster(node3.service, node3.environment,\n [node3]))\n self.assertEqual((knownNodes(disco, \"myservice\", \"env1\"),\n knownNodes(disco, \"myservice\", \"env2\")),\n ([node], [node3]))", "def refresh_metadata(self):\n #self.node_index = None\n #self.edge_index = None\n #self._calc_edge_centers = False\n #self._calc_cell_centers = False\n #self._calc_vcenters = False\n self._node_to_edges = None\n self._node_to_cells = None", "def reconfigure_keystone_to_use_ldap(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n devops_pr_controller = self.fuel_web.get_nailgun_primary_node(\n self.env.d_env.nodes().slaves[0])\n\n pr_controller = self.fuel_web.get_nailgun_node_by_devops_node(\n devops_pr_controller)\n\n self.show_step(2)\n config = utils.get_config_template('keystone_ldap')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(\n config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(4)\n self.fuel_web.task_wait(task, timeout=3600, interval=30)\n\n self.show_step(5)\n self.check_config_on_remote([pr_controller], structured_config)\n logger.info(\"New configuration was applied\")\n\n self.env.make_snapshot(\"reconfigure_keystone_to_use_ldap\")", "def test_update_hyperflex_cluster_profile(self):\n pass", "def add_delete_compute_cinder_ceph(self):\n\n self.env.revert_snapshot('ready_with_9_slaves')\n\n self.show_step(1, initialize=True)\n data = {\n 'volumes_lvm': True,\n 'volumes_ceph': False,\n 'images_ceph': True,\n 'osd_pool_size': '2',\n 'tenant': 'scalegroup5',\n 'user': 'scalegroup5',\n 'password': 'scalegroup5',\n \"net_provider\": 'neutron',\n \"net_segment_type\": settings.NEUTRON_SEGMENT['tun']\n }\n\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n settings=data\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller'],\n 'slave-02': ['controller'],\n 'slave-03': ['controller'],\n 'slave-04': ['compute', 'ceph-osd', 'cinder'],\n 'slave-05': ['compute', 'ceph-osd', 'cinder']\n }\n )\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(2)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(3)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(4)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-06': ['compute', 'ceph-osd', 'cinder']\n }\n )\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(5)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(6)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(7)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-07': ['compute', 'ceph-osd', 'cinder']\n }\n )\n\n with self.fuel_web.get_ssh_for_node('slave-04') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-04': ['compute', 'ceph-osd', 'cinder']\n },\n pending_addition=False,\n pending_deletion=True\n )\n\n self.show_step(8)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(9)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(10)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(11)\n with self.fuel_web.get_ssh_for_node('slave-07') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-07': ['compute', 'ceph-osd', 'cinder']\n },\n pending_addition=False,\n pending_deletion=True\n )\n self.show_step(12)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(13)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(14)\n self.fuel_web.run_ostf(cluster_id)\n\n self.env.make_snapshot('add_delete_compute_cinder_ceph')", "def resume_cluster(self):\n self.log.info(\"Loading info from the IaaS\")\n if not isfile(self.save_file):\n self.log.info(\"No existing created cluster\")\n saved_nodes = []\n else:\n saved_cluster = loads(open(self.save_file, 'r').read())\n saved_nodes = saved_cluster['clients']\n\n in_nodes = Node.get_all_nodes(check_active=True)\n for n in in_nodes:\n if n.name not in saved_nodes:\n if \"orchestrator\" in n.name:\n global orchestrator\n orchestrator = n\n self.log.debug('Found orchestrator %s' % n.name)\n continue\n else:\n self.all_nodes.append(n)\n #sort nodes by name\n self.all_nodes.sort(key=lambda x: x.name)", "def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]", "def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()", "def full_deploy():\n refresh_cts()\n push_mockups()\n deploy()", "def resize_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n new_cluster_config = {\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].patch(\n **new_cluster_config)\n pprint(cluster.data)", "def _update_clusters(partial, to_update, grids):\n df, settings = partial\n clusters = settings['clusters']\n primary_key = settings['idCol']\n cluster_id = settings['predCol']\n column = settings['feature']\n init, end, end2 = grids\n\n if len(df) > 0:\n tmp = df.apply(lambda row: _inblock(row, column, init, end2), axis=1)\n df = df.loc[tmp]\n df.drop_duplicates([primary_key], inplace=False, ignore_index=True)\n\n for key in to_update:\n if key in clusters:\n df.loc[df[cluster_id] == key, cluster_id] = to_update[key]\n\n df.loc[df[cluster_id].str.contains(\"-0\", na=False), cluster_id] = -1\n df = df.drop([primary_key], axis=1)\n\n return df", "def package_upgrade():\n\n if (do_action_package_upgrade('nova-common',\n do_openstack_upgrade,\n CONFIGS)):\n # we should restart the container scoped (subordinate) plugins after a\n # managed openstack upgrade see: BUG#1835557\n for rid in relation_ids('neutron-plugin'):\n neutron_plugin_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-ceilometer'):\n nova_ceilometer_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-vgpu'):\n nova_vgpu_joined(rid, remote_restart=True)\n # NOTE(ajkavanagh) - if unit is paused (usually true for managed\n # upgrade) then the config_changed() function is a no-op\n config_changed()", "def reconfigure_nova_ephemeral_disk(self):\n self.check_run('reconfigure_nova_ephemeral_disk')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_overcommit_ratio\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n\n self.show_step(2)\n existing_configs = self.fuel_web.client.list_configuration(\n cluster_id)\n for existing_config in existing_configs:\n self.fuel_web.client.delete_configuration(existing_config[\"id\"])\n\n self.show_step(3)\n config = utils.get_config_template('nova_disk')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='compute')\n\n service_name = \"nova-compute\"\n\n uptimes = self.get_service_uptime(computes, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='compute')\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(computes, uptimes, service_name)\n\n self.show_step(7)\n self.check_config_on_remote(computes, structured_config)\n\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.show_step(12)\n self.check_nova_ephemeral_disk(os_conn, cluster_id)\n\n self.env.make_snapshot(\"reconfigure_nova_ephemeral_disk\",\n is_make=True)", "def test_three_nodes_cluster_teardown(three_nodes_cluster, ssh_key,\n test_config, module_tmpdir, logger):\n node1, node2, node3 = three_nodes_cluster\n nodes_list = [node1, node2, node3]\n logger.info('Asserting cluster status')\n _assert_cluster_status(node1.client, logger)\n\n logger.info('Installing example deployment')\n example = get_example_deployment(node1, ssh_key, logger,\n 'cluster_teardown', test_config)\n example.inputs['server_ip'] = node1.ip_address\n example.upload_and_verify_install()\n\n logger.info('Removing example deployment')\n example.uninstall()\n logger.info('Removing cluster')\n for node in nodes_list:\n for config_name in ['manager', 'rabbit', 'db']:\n node.run_command('cfy_manager remove -v -c /etc/cloudify/'\n '{0}_config.yaml'.format(config_name))\n\n credentials = _get_new_credentials()\n logger.info('New credentials: %s', credentials)\n\n for node in nodes_list:\n node.install_config = copy.deepcopy(node.basic_install_config)\n\n logger.info('Installing Cloudify cluster again')\n run_cluster_bootstrap(nodes_list, nodes_list, nodes_list,\n skip_bootstrap_list=[], pre_cluster_rabbit=True,\n high_security=True, use_hostnames=False,\n tempdir=module_tmpdir, test_config=test_config,\n credentials=credentials)\n node1.download_rest_ca(force=True)\n\n logger.info('Asserting cluster status')\n _assert_cluster_status(node1.client, logger)", "def automerge_clusters(self):\n all_clusters = self.get_clusters().copy()\n\n if not self._single: # if not in single mode mode\n # initialize the variable to check if some change has happened \n changed = False\n for cl_1 in all_clusters: # cycle over clusters\n c_c1 = all_clusters[cl_1]\n for cl_2 in all_clusters: # inner cycle over clusters\n c_c2 = all_clusters[cl_2]\n # if two clusters have the same speaker and have different \n # cluster identifiers\n if cl_1 != cl_2 and c_c1.get_speaker() != 'unknown' and c_c1.get_speaker() == c_c2.get_speaker() and self._clusters.has_key(cl_1) and self._clusters.has_key(cl_2):\n changed = True\n # merge the clusters an record that something changed\n self._merge_clusters(cl_1, cl_2)\n if changed: # if something has changed\n # rename all the clusters starting from S0\n self._rename_clusters()\n # remove also the old waves and seg files of the old clusters\n shutil.rmtree(self.get_file_basename())\n # rebuild all seg files\n self.generate_seg_file(set_speakers=False)\n # resplit the original wave file according to the new clusters\n self._to_trim()", "def main():\n\n # Handling arguments\n args = get_args()\n all_clusters = args.all_clusters\n all_datacenters = args.all_datacenters\n all_hosts = args.all_hosts\n clusters = []\n if args.clusters:\n clusters = args.clusters\n debug = args.debug\n allow_fqdn = args.allow_fqdn\n datacenters = []\n if args.datacenters:\n datacenters = args.datacenters\n hosts = []\n if args.hosts:\n hosts = args.hosts\n host_configure_agent = args.host_configure_agent\n hosts_file = None\n if args.hosts_file:\n hosts_file = args.hosts_file\n hv_username = None\n if args.hv_username:\n hv_username = args.hv_username\n hv_password = None\n if args.hv_password:\n hv_password = args.hv_password\n hv_management_network = None\n if args.hv_management_network:\n hv_management_network = args.hv_management_network\n hv_data_network = None\n if args.hv_data_network:\n hv_data_network = args.hv_data_network\n hv_vm_network = None\n if args.hv_vm_network:\n hv_vm_network = args.hv_vm_network\n hv_mc_network = None\n if args.hv_mc_network:\n hv_mc_network = args.hv_mc_network\n log_file = None\n if args.logfile:\n log_file = args.logfile\n nuage_enterprise = args.nuage_enterprise\n nuage_host = args.nuage_host\n nuage_port = args.nuage_port\n nuage_password = None\n if args.nuage_password:\n nuage_password = args.nuage_password\n nuage_username = args.nuage_username\n nuage_vrs_ovf = None\n if args.nuage_vrs_ovf:\n nuage_vrs_ovf = args.nuage_vrs_ovf\n nosslcheck = args.nosslcheck\n verbose = args.verbose\n vcenter_host = args.vcenter_host\n vcenter_name = vcenter_host\n if args.vcenter_name:\n vcenter_name = args.vcenter_name\n vcenter_https_port = args.vcenter_https_port\n vcenter_http_port = args.vcenter_http_port\n vcenter_password = None\n if args.vcenter_password:\n vcenter_password = args.vcenter_password\n vcenter_username = args.vcenter_username\n\n # Logging settings\n if debug:\n log_level = logging.DEBUG\n elif verbose:\n log_level = logging.INFO\n else:\n log_level = logging.WARNING\n\n logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level)\n logger = logging.getLogger(__name__)\n\n # Input checking\n if not all_datacenters and len(datacenters) < 1:\n logger.critical('Not all datacenters have to be present in the Nuage Deployment tool (--all-datacenters option NOT enabled), but also no datacenters specified (at least one --datacenter)')\n return 1\n if not all_clusters and len(clusters) < 1:\n logger.critical('Not all clusters have to be present in the Nuage Deployment tool (--all-clusters option NOT enabled), but also no clusters specified (at least one --cluster)')\n return 1\n if not all_hosts and len(hosts) < 1 and not hosts_file:\n logger.critical('Not all hosts have to be present in the Nuage Deployment tool (--all-hosts option NOT enabled), but also no hosts specified (at least one --host or specify a file with the host information via --hosts-file)')\n return 1\n if all_datacenters and len(datacenters) > 0:\n logger.warning('You enabled all datacenters and added individual datacenter options, --all-datacenters takes precendence and overwrites the specified datacenters.')\n datacenters = []\n if all_clusters and len(clusters) > 0:\n logger.warning('You enabled all clusters and added individual cluster options, --all-clusters takes precendence and overwrites the specified clusters.')\n clusters = []\n if all_hosts and len(hosts) > 0 and not hosts_file:\n logger.warning('You enabled all hosts and added individual hosts options, --all-hosts takes precendence and overwrites the specified hosts.')\n hosts = []\n elif all_hosts and len(hosts) < 1 and hosts_file:\n logger.warning('You enabled all hosts and provided a hosts file, the hosts file takes precendence over the --all-hosts flag and this flag will be ignored.')\n all_hosts = False\n elif not all_hosts and len(hosts) > 0 and hosts_file:\n logger.warning('You specified host with the --host argument and provided a hosts file, the hosts file takes precendence over the --host paramerters and these will be ignored.')\n hosts = []\n\n # CSV Handling\n hosts_list = None\n if hosts_file:\n hosts_list = {}\n # CSV fields:\n # VM Name, Resource Pool, Folder, MAC Address, Post Script\n logger.debug('Parsing csv %s' % hosts_file)\n\n if not os.path.isfile(hosts_file):\n logger.critical('CSV file %s does not exist, exiting' % hosts_file)\n return 1\n\n with open(hosts_file, 'rb') as hostlist:\n hosts_list_raw = csv.reader(hostlist, delimiter=',', quotechar='\"')\n for row in hosts_list_raw:\n logger.debug('Found CSV row: %s' % ','.join(row))\n # Adding IP to the hosts variable so it can also be used in further handling if it's a valid IP\n if allow_fqdn or ip_address_is_valid(row[0]):\n hosts_list[row[0]] = row\n hosts.append(row[0])\n else:\n logger.warning('Found an invalid IP %s in the hosts file and FQDNs are not allowed, skipping line' % row[0])\n\n # Getting user password for Nuage connection\n if nuage_password is None:\n logger.debug('No command line Nuage password received, requesting Nuage password from user')\n nuage_password = getpass.getpass(prompt='Enter password for Nuage host %s for user %s: ' % (nuage_host, nuage_username))\n\n # Getting user password for vCenter connection\n if vcenter_password is None:\n logger.debug('No command line vCenter password received, requesting vCenter password from user')\n vcenter_password = getpass.getpass(prompt='Enter password for vCenter host %s for user %s: ' % (vcenter_host, vcenter_username))\n\n # Getting user password for hosts\n if hv_password is None:\n logger.debug('No command line Host password received, requesting Host password from user')\n hv_password = getpass.getpass(prompt='Enter password for the hosts inside vCenter %s for user %s: ' % (vcenter_host, hv_username))\n\n try:\n vc = None\n nc = None\n\n # Connecting to Nuage\n try:\n logger.info('Connecting to Nuage server %s:%s with username %s' % (nuage_host, nuage_port, nuage_username))\n nc = vsdk.NUVSDSession(username=nuage_username, password=nuage_password, enterprise=nuage_enterprise, api_url=\"https://%s:%s\" % (nuage_host, nuage_port))\n nc.start()\n except IOError:\n pass\n\n if not nc or not nc.is_current_session():\n logger.error('Could not connect to Nuage host %s with user %s and specified password' % (nuage_host, nuage_username))\n return 1\n\n # Connecting to vCenter\n try:\n logger.info('Connecting to vCenter server %s:%s with username %s' % (vcenter_host, vcenter_https_port, vcenter_username))\n if nosslcheck:\n vc = SmartConnectNoSSL(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_https_port))\n else:\n vc = SmartConnect(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_https_port))\n\n except IOError:\n pass\n\n if not vc:\n logger.error('Could not connect to vCenter host %s with user %s and specified password' % (vcenter_host, vcenter_username))\n return 1\n\n logger.debug('Registering vCenter disconnect at exit')\n atexit.register(Disconnect, vc)\n\n logger.info('Connected to both Nuage & vCenter servers')\n\n # Check if the vCenter exists in Nuage vCenter Deployment Tool\n nuage_vcenter = None\n logger.debug('Checking if vCenter %s is already present in Nuage vCenter Deployment Tool' % vcenter_name)\n for nvc in nc.user.vcenters.get():\n if nvc.ip_address == vcenter_host:\n logger.debug('Found vCenter %s, not recreating' % vcenter_name)\n nuage_vcenter = nvc\n break\n\n # If th vCenter does not exist in Nuage vCenter Deployment Tool, create it\n if not nuage_vcenter:\n logger.debug('vCenter %s with IP %s not found in the Nuage vCenter Deployment Tool, creating' % (vcenter_name, vcenter_host))\n nuage_vcenter = vsdk.NUVCenter(name=vcenter_name, ip_address=vcenter_host, user_name=vcenter_username, password=vcenter_password, http_port=vcenter_http_port, https_port=vcenter_https_port, ovf_url=nuage_vrs_ovf)\n nc.user.create_child(nuage_vcenter)\n logger.info('Created vCenter %s in the Nuage vCenter Deployment Tool' % vcenter_name)\n\n # Datacenter Handling\n # Gathering all Datacenters inside the vCenter\n logger.debug('Gathering all Datacenters from vCenter')\n content = vc.content\n obj_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.Datacenter], True)\n vc_dc_list = obj_view.view\n obj_view.Destroy()\n\n # Gathering all Datacenters inside the Nuage vCenter\n logger.debug('Gathering all Datacenter from the Nuage vCenter entry')\n nc_dc_list = nuage_vcenter.vcenter_data_centers.get()\n\n # Parsing all datacenters\n for vc_dc in vc_dc_list:\n if all_datacenters or vc_dc.name in datacenters:\n logger.debug('vCenter Datacenter %s is in list that has to be present in the Nuage vCenter Deployment Tool, checking if it already exists.' % vc_dc.name)\n handle_vdt_datacenter(logger=logger, nc=nc, vc=vc, nuage_vcenter=nuage_vcenter, vc_dc=vc_dc, nc_dc_list=nc_dc_list, vcenter_name=vcenter_name, all_clusters=all_clusters, all_hosts=all_hosts, clusters=clusters, hosts=hosts, hosts_list=hosts_list, hv_username=hv_username, hv_password=hv_password, hv_management_network=hv_management_network, hv_data_network=hv_data_network, hv_vm_network=hv_vm_network, hv_mc_network=hv_mc_network, host_configure_agent=host_configure_agent, allow_fqdn=allow_fqdn)\n\n logger.info('Completed all tasks.')\n return 0\n\n except vmodl.MethodFault as e:\n logger.critical('Caught vmodl fault: %s' % e.msg)\n return 1\n except Exception as e:\n logger.critical('Caught exception: %s' % str(e))\n return 1", "def shutdown_cluster(self):\n self.cluster.shutdown()", "def _update_deploy_specs(self):\n for cluster in self.CLUSTERS:\n deployspec_name = PushUtil.get_deployspec_name(cluster)\n QueueClusterConfigUpdates.update_deployspec(\n deployspec_name, cluster, self._release_name)", "def test_cluster_works_fine_after_deleting_CA_folder(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n random_nodes = random.sample(self.servers[1:self.nodes_init], 1)\n self.log.info(\"Uploading root certs from {0}\".format(random_nodes[0]))\n self.x509.upload_root_certs(random_nodes[0])\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n self.x509.delete_unused_out_of_the_box_CAs(server=self.master)\n self.x509.upload_client_cert_settings(server=self.master)\n shell = RemoteMachineShellConnection(random_nodes[0])\n shell.remove_directory(self.x509.install_path + x509main.CHAINFILEPATH +\n \"/\" + x509main.TRUSTEDCAPATH)\n shell.disconnect()\n\n failover_nodes = random_nodes\n nodes_in_cluster = self.servers[:self.nodes_init]\n for operation in [\"recovery\", \"out\"]:\n shell = RemoteMachineShellConnection(failover_nodes[0])\n shell.stop_server()\n self.cluster.async_failover(self.servers[:self.nodes_init],\n failover_nodes,\n graceful=False)\n self.wait_for_failover_or_assert(1)\n if operation == \"out\":\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n rest = RestConnection(self.master)\n otp_nodes = []\n ejected_nodes = []\n for node in nodes_in_cluster:\n otp_nodes.append('ns_1@'+node.ip)\n for node in failover_nodes:\n ejected_nodes.append('ns_1@' + node.ip)\n status = rest.rebalance(otpNodes=otp_nodes, ejectedNodes=ejected_nodes)\n if not status:\n shell.start_server(failover_nodes[0])\n self.fail(\"rebalance/failover failed\")\n CbServer.use_https = https_val\n nodes_in_cluster = nodes_in_cluster.remove(failover_nodes[0])\n shell.start_server(failover_nodes[0])\n if operation == \"recovery\":\n rest = RestConnection(self.master)\n for node in failover_nodes:\n rest.set_recovery_type(\"ns_1@\" + node.ip, recoveryType=\"delta\")\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])\n self.wait_for_rebalance_to_complete(task)\n CbServer.use_https = https_val\n self.auth(servers=nodes_in_cluster)", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def multiple_full_repairs_lcs_test(self):\n cluster = self.cluster\n cluster.populate(2).start(wait_for_binary_proto=True)\n node1, node2 = cluster.nodelist()\n for x in xrange(0, 10):\n node1.stress(['write', 'n=100k', 'no-warmup', '-rate', 'threads=10', '-schema', 'compaction(strategy=LeveledCompactionStrategy,sstable_size_in_mb=10)', 'replication(factor=2)'])\n cluster.flush()\n cluster.wait_for_compactions()\n node1.nodetool(\"repair -full keyspace1 standard1\")", "def resumable_replace_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n\n # bring back node1 and invoke nodetool bootstrap to resume bootstrapping\n node1.start()\n node4.nodetool('bootstrap resume')\n # check if we skipped already retrieved ranges\n node4.watch_log_for(\"already available. Skipping streaming.\")\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\")\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def config(self, cluster_name, name, username, version, int_netmask, int_ip_low,\n int_ip_high, ext_netmask, ext_ip_low, ext_ip_high, gateway, dns_servers,\n encoding, sc_zonename, smartconnect_ip, join_cluster, compliance, txn_id):\n logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper())\n resp = {'content' : {}, 'error': None, 'params': {}}\n logger.info('Task starting')\n nodes = vmware.show_onefs(username)\n node = nodes.get(name, None)\n if not node:\n error = \"No node named {} found\".format(name)\n resp['error'] = error\n logger.error(error)\n return resp\n elif node['meta']['configured']:\n error = \"Cannot configure a node that's already configured\"\n resp['error'] = error\n logger.error(error)\n else:\n # Lets set it up!\n logger.info('Found node')\n console_url = node['console']\n if join_cluster:\n logger.info('Joining node to cluster {}'.format(cluster_name))\n setup_onefs.join_existing_cluster(console_url, cluster_name, compliance, logger)\n else:\n logger.info('Setting up new cluster named {}'.format(cluster_name))\n setup_onefs.configure_new_cluster(version=version,\n console_url=console_url,\n cluster_name=cluster_name,\n int_netmask=int_netmask,\n int_ip_low=int_ip_low,\n int_ip_high=int_ip_high,\n ext_netmask=ext_netmask,\n ext_ip_low=ext_ip_low,\n ext_ip_high=ext_ip_high,\n gateway=gateway,\n dns_servers=dns_servers,\n encoding=encoding,\n sc_zonename=sc_zonename,\n smartconnect_ip=smartconnect_ip,\n compliance=compliance,\n logger=logger)\n node['meta']['configured'] = True\n vmware.update_meta(username, name, node['meta'])\n logger.info('Task complete')\n return resp", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def multiple_subsequent_repair_test(self):\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n debug(\"Inserting data with stress\")\n node1.stress(['write', 'n=5M', 'no-warmup', '-rate', 'threads=10', '-schema', 'replication(factor=3)'])\n\n debug(\"Flushing nodes\")\n cluster.flush()\n\n debug(\"Waiting compactions to finish\")\n cluster.wait_for_compactions()\n\n if self.cluster.version() >= '2.2':\n debug(\"Repairing node1\")\n node1.nodetool(\"repair\")\n debug(\"Repairing node2\")\n node2.nodetool(\"repair\")\n debug(\"Repairing node3\")\n node3.nodetool(\"repair\")\n else:\n debug(\"Repairing node1\")\n node1.nodetool(\"repair -par -inc\")\n debug(\"Repairing node2\")\n node2.nodetool(\"repair -par -inc\")\n debug(\"Repairing node3\")\n node3.nodetool(\"repair -par -inc\")\n\n # Using \"print\" instead of debug() here is on purpose. The compactions\n # take a long time and don't print anything by default, which can result\n # in the test being timed out after 20 minutes. These print statements\n # prevent it from being timed out.\n print \"compacting node1\"\n node1.compact()\n print \"compacting node2\"\n node2.compact()\n print \"compacting node3\"\n node3.compact()\n\n # wait some time to be sure the load size is propagated between nodes\n debug(\"Waiting for load size info to be propagated between nodes\")\n time.sleep(45)\n\n load_size_in_kb = float(sum(map(lambda n: n.data_size(), [node1, node2, node3])))\n load_size = load_size_in_kb / 1024 / 1024\n debug(\"Total Load size: {}GB\".format(load_size))\n\n # There is still some overhead, but it's lot better. We tolerate 25%.\n expected_load_size = 4.5 # In GB\n assert_almost_equal(load_size, expected_load_size, error=0.25)", "def test_patch_hyperflex_cluster_profile(self):\n pass", "def _init_cluster(self):\n self._Init_Cluster()", "def test_redeploy_edges(self):\n pass", "def cluster_reset(\n self, soft: bool = True, target_nodes: Optional[\"TargetNodesT\"] = None\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER RESET\", b\"SOFT\" if soft else b\"HARD\", target_nodes=target_nodes\n )", "def reconfigure_nova_quota(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config = utils.get_config_template('nova_quota')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n uptimes = self.get_service_uptime(controllers, 'nova-api')\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(controllers, uptimes, 'nova-api')\n\n self.show_step(7)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(8)\n self.show_step(9)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.check_nova_quota(os_conn, cluster_id)\n\n self.env.make_snapshot(\"reconfigure_nova_quota\")", "def reconfigure_ml2_vlan_range_for_suite_of_nodes(self):\n self.show_step(1)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n controller_ids = [int(ctrl['id']) for ctrl in controllers]\n\n self.show_step(2)\n config = utils.get_config_template('neutron')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n node_ids=controller_ids)\n\n self.show_step(3)\n service_name = 'neutron-server'\n uptimes = self.get_service_uptime(controllers, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(controllers, uptimes, service_name)\n\n self.show_step(7)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(8)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n self.check_ml2_vlan_range(os_conn)\n\n snapshotname = \"reconfigure_ml2_vlan_range_for_suite_of_nodes\"\n self.env.make_snapshot(snapshotname)", "def test_replaceDoesNotMutate(self):\n disco = create_disco()\n node = create_node(\"somewhere\")\n disco.onMessage(None, NodeActive(node))\n resolved_node = resolve(disco, \"myservice\", \"1.0\")\n\n node2 = create_node(\"somewhere\")\n node2.version = \"1.3\"\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node2]))\n self.assertEqual(resolved_node.version, \"1.0\")", "def fix_all(self):\n\n altered_tables = {}\n\n for ingestible_db_conf in self.ingestible_db_conf_repo.get_ingestible_dbs():\n target_db= ingestible_db_conf.target_db_name\n db_type = ingestible_db_conf.db_type\n self.logger.info(\"Fixing consistency for DB Type: %s, Target DB: %s\" % (db_type, target_db))\n self.prepare_database(target_db)\n consistency_checker = HiveConsistencyChecker(target_db, db_type)\n\n unused_tables = consistency_checker.get_unused_tables()\n self.remove_unused_tables(unused_tables)\n\n new_tables = consistency_checker.get_new_tables()\n self.create_new_tables(new_tables)\n\n inconsistent_tables = consistency_checker.get_inconsistent_tables()\n self.fix_inconsistent_tables(inconsistent_tables, db_type)\n\n # Combine lists of inconsistent and unused tables\n altered_tables[db_type] = map(lambda qualified_table: qualified_table.split(\".\")[1],\n inconsistent_tables.keys() + unused_tables)\n\n self.logger.debug(\"Altered Tables: %s\" % altered_tables)\n return altered_tables", "def k8scluster(revert_snapshot, request, config,\n hardware, underlay, k8s_actions):\n # If no snapshot was reverted, then try to revert the snapshot\n # that belongs to the fixture.\n # Note: keep fixtures in strict dependences from each other!\n if not revert_snapshot:\n if hardware.has_snapshot(ext.SNAPSHOT.k8s_deployed) and \\\n hardware.has_snapshot_config(ext.SNAPSHOT.k8s_deployed):\n hardware.revert_snapshot(ext.SNAPSHOT.k8s_deployed)\n\n # Create k8s cluster\n if config.k8s.kube_host == '0.0.0.0':\n kube_settings = getattr(request.instance, 'kube_settings',\n settings.DEFAULT_CUSTOM_YAML)\n LOG.info('Kube settings are {}'.format(kube_settings))\n\n k8s_actions.install_k8s(\n custom_yaml=kube_settings,\n lvm_config=underlay.config_lvm)\n hardware.create_snapshot(ext.SNAPSHOT.k8s_deployed)\n\n else:\n # 1. hardware environment created and powered on\n # 2. config.underlay.ssh contains SSH access to provisioned nodes\n # (can be passed from external config with TESTS_CONFIGS variable)\n # 3. config.k8s.* options contain access credentials to the already\n # installed k8s API endpoint\n pass\n\n return k8s_actions", "def deploy_create_delete_ip_n_times_nova_flat(self):\n self.env.revert_snapshot(\"ready_with_3_slaves\")\n\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=hlp_date.DEPLOYMENT_MODE\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller'],\n 'slave-02': ['compute']\n }\n )\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.fuel_web.verify_network(cluster_id)\n self.fuel_web.run_ostf_repeatably(cluster_id)\n\n self.env.make_snapshot(\"create_delete_ip_n_times_nova_flat\")", "def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError", "def main():\n session, cluster = create_database()\n \n drop_tables(session)\n create_tables(session)\n\n session.shutdown()\n cluster.shutdown()", "def reboot(self, node):", "def unsafe_replace_test(self):\n debug('Starting cluster with 3 nodes.')\n cluster = self.cluster\n cluster.populate(3)\n cluster.set_batch_commitlog(enabled=True)\n node1, node2, node3 = cluster.nodelist()\n cluster.seeds.remove(node3)\n NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')\n if DISABLE_VNODES:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 1})\n else:\n cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': NUM_TOKENS})\n cluster.start()\n\n debug('Inserting Data...')\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])\n cluster.flush()\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from {} LIMIT 1'.format(stress_table), consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n for set_allow_unsafe_flag in [False, True]:\n debug('Stopping node 3.')\n node3.stop(gently=False)\n\n # completely delete the system keyspace data plus commitlog and saved caches\n for d in node3.data_directories():\n system_data = os.path.join(d, 'system')\n if os.path.exists(system_data):\n rmtree(system_data)\n\n for d in ['commitlogs', 'saved_caches']:\n p = os.path.join(node3.get_path(), d)\n if os.path.exists(p):\n rmtree(p)\n\n node3.set_configuration_options(values={'auto_bootstrap': False})\n mark = node3.mark_log()\n\n if set_allow_unsafe_flag:\n debug('Starting node3 with auto_bootstrap = false and replace_address = 127.0.0.3 and allow_unsafe_replace = true')\n node3.start(replace_address='127.0.0.3', wait_for_binary_proto=True, jvm_args=['-Dcassandra.allow_unsafe_replace=true'])\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node3)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)\n else:\n debug('Starting node 3 with auto_bootstrap = false and replace_address = 127.0.0.3')\n node3.start(replace_address='127.0.0.3', wait_other_notice=False)\n node3.watch_log_for('To perform this operation, please restart with -Dcassandra.allow_unsafe_replace=true',\n from_mark=mark, timeout=20)", "def multi_dc_replace_with_rf1_test(self):\n cluster = self.cluster\n cluster.populate([1, 1])\n cluster.start()\n node1, node2 = cluster.nodelist()\n\n node1 = cluster.nodes['node1']\n yaml_config = \"\"\"\n # Create the keyspace and table\n keyspace: keyspace1\n keyspace_definition: |\n CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};\n table: users\n table_definition:\n CREATE TABLE users (\n username text,\n first_name text,\n last_name text,\n email text,\n PRIMARY KEY(username)\n ) WITH compaction = {'class':'SizeTieredCompactionStrategy'};\n insert:\n partitions: fixed(1)\n batchtype: UNLOGGED\n queries:\n read:\n cql: select * from users where username = ?\n fields: samerow\n \"\"\"\n with tempfile.NamedTemporaryFile(mode='w+') as stress_config:\n stress_config.write(yaml_config)\n stress_config.flush()\n node1.stress(['user', 'profile=' + stress_config.name, 'n=10k', 'no-warmup',\n 'ops(insert=1)', '-rate', 'threads=50'])\n\n session = self.patient_cql_connection(node1)\n\n # change system_auth keyspace to 2 (default is 1) to avoid\n # \"Unable to find sufficient sources for streaming\" warning\n if cluster.cassandra_version() >= '2.2.0':\n session.execute(\"\"\"\n ALTER KEYSPACE system_auth\n WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};\n \"\"\")\n\n # Save initial data\n stress_table = 'keyspace1.users'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.TWO)\n initial_data = rows_to_list(session.execute(query))\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node2.stop(wait_other_notice=True)\n\n node3 = new_node(cluster, data_center='dc2')\n node3.start(replace_address='127.0.0.2', wait_for_binary_proto=True)\n\n assert_bootstrap_state(self, node3, 'COMPLETED')\n\n # Check that keyspace was replicated from dc1 to dc2\n self.assertFalse(node3.grep_log(\"Unable to find sufficient sources for streaming range\"))\n\n # query should work again with node1 stopped\n node1.stop(wait_other_notice=True)\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node3)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.LOCAL_ONE)", "def test_replaceEmpty(self):\n disco = create_disco()\n node1 = create_node(\"somewhere\")\n node2 = create_node(\"somewhere2\")\n disco.onMessage(None, ReplaceCluster(\"myservice\",\n SANDBOX_ENV,\n [node1, node2]))\n self.assertEqual(knownNodes(disco, \"myservice\", \"sandbox\"), [node1, node2])", "def upgrade_and_downgrade(self, fault_on_pool_upgrade=False):\n # (1)Setup\n self.log.info(\"(1)==Setup and show rpm, dmg and daos versions on all hosts.\")\n hosts_client = self.hostlist_clients\n hosts_server = self.hostlist_servers\n all_hosts = include_local_host(hosts_server)\n self.upgrade_repo = self.params.get(\"upgrade_repo\", '/run/interop/*')\n self.downgrade_repo = self.params.get(\"downgrade_repo\", '/run/interop/*')\n num_attributes = self.params.get(\"num_attributes\", '/run/attrtests/*')\n ior_api = self.params.get(\"api\", '/run/ior/*')\n mount_dir = self.params.get(\"mount_dir\", '/run/dfuse/*')\n self.show_daos_version(all_hosts, hosts_client)\n\n # (2)Create pool container and pool attributes\n self.log.info(\"(2)==Create pool attributes.\")\n self.add_pool(connect=False)\n pool_id = self.pool.identifier\n self.add_container(self.pool)\n self.container.open()\n self.daos_cmd = self.get_daos_command()\n pool_attr_dict = self.create_data_set(num_attributes)\n self.pool.pool.set_attr(data=pool_attr_dict)\n self.verify_pool_attrs(pool_attr_dict)\n self.container.close()\n self.pool.disconnect()\n\n # (3)Setup and run IOR\n self.log.info(\"(3)==Setup and run IOR.\")\n result = run_pcmd(hosts_client, \"mkdir -p {}\".format(mount_dir))\n ior_timeout = self.params.get(\"ior_timeout\", '/run/ior/*')\n iorflags_write = self.params.get(\"write_flg\", '/run/ior/iorflags/*')\n iorflags_read = self.params.get(\"read_flg\", '/run/ior/iorflags/*')\n testfile = os.path.join(mount_dir, \"testfile\")\n testfile_sav = os.path.join(mount_dir, \"testfile_sav\")\n testfile_sav2 = os.path.join(mount_dir, \"testfile_sav2\")\n symlink_testfile = os.path.join(mount_dir, \"symlink_testfile\")\n # (3.a)ior dfs\n if ior_api in (\"DFS\", \"POSIX\"):\n self.log.info(\"(3.a)==Run non-HDF5 IOR write and read.\")\n self.ior_cmd.flags.update(iorflags_write)\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=True, create_cont=True, stop_dfuse=False)\n self.ior_cmd.flags.update(iorflags_read)\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n\n # (3.b)ior hdf5\n elif ior_api == \"HDF5\":\n self.log.info(\"(3.b)==Run IOR HDF5 write and read.\")\n hdf5_plugin_path = self.params.get(\"plugin_path\", '/run/hdf5_vol/')\n self.ior_cmd.flags.update(iorflags_write)\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=True, create_cont=True, stop_dfuse=False)\n self.ior_cmd.flags.update(iorflags_read)\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n else:\n self.fail(\"##(3)Unsupported IOR api {}\".format(ior_api))\n\n # (3.c)ior posix test file with symlink\n if ior_api == \"POSIX\":\n self.log.info(\"(3.c)==Symlink mounted testfile.\")\n result = run_pcmd(hosts_client, \"cd {}\".format(mount_dir))\n result = run_pcmd(hosts_client, \"ls -l {}\".format(testfile))\n result = run_pcmd(hosts_client, \"cp {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"cp {0} {1}\".format(testfile, testfile_sav2))\n self.check_result(result)\n result = run_pcmd(\n hosts_client, \"ln -vs {0} {1}\".format(testfile_sav2, symlink_testfile))\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"ls -l {}\".format(symlink_testfile))\n self.check_result(result)\n self.container.close()\n self.pool.disconnect()\n result = run_pcmd(hosts_client, \"fusermount3 -u {}\".format(mount_dir))\n self.check_result(result)\n\n # Verify pool attributes before upgrade\n self.log.info(\"(3.2)==verify pool attributes before upgrade.\")\n self.verify_pool_attrs(pool_attr_dict)\n\n # (4)dmg system stop\n self.log.info(\"(4)==Dmg system stop.\")\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n\n # (5)Upgrade\n self.log.info(\"(5)==Upgrade RPMs to 2.2.\")\n self.upgrade(hosts_server, hosts_client)\n\n self.log.info(\"==sleeping 30 more seconds\")\n time.sleep(30)\n # (6)Restart servers\n self.log.info(\"(6)==Restart servers.\")\n self.restart_servers()\n\n # (7)Verification after upgrade\n # Restart agent\n self.log.info(\"(7.1)====Restarting rel_2.2 agent after upgrade.\")\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n\n self.get_dmg_command().pool_list(verbose=True)\n self.get_dmg_command().pool_query(pool=pool_id)\n self.daos_cmd.pool_query(pool=pool_id)\n\n # Verify pool attributes\n self.log.info(\"(7.2)====Verifying pool attributes after upgrade.\")\n self.verify_pool_attrs(pool_attr_dict)\n self.daos_ver_after_upgraded(hosts_client)\n\n # Verify IOR data and symlink\n self.log.info(\"(7.3)====Verifying container data IOR read.\")\n if ior_api == \"DFS\":\n self.log.info(\"(7.a)==Run IOR DFS read verification.\")\n self.run_ior_with_pool(\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n elif ior_api == \"HDF5\":\n self.log.info(\"(7.b)==Run IOR HDF5 read verification.\")\n self.run_ior_with_pool(\n plugin_path=hdf5_plugin_path, mount_dir=mount_dir,\n timeout=ior_timeout, create_pool=False, create_cont=False, stop_dfuse=False)\n else:\n self.log.info(\"(7.c)==Run Symlink check after upgraded.\")\n result = run_pcmd(\n hosts_client,\n \"dfuse --mountpoint {0} --pool {1} --container {2}\".format(\n mount_dir, pool_id, self.container))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(testfile, testfile_sav))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"diff {0} {1}\".format(symlink_testfile, testfile_sav2))\n self.check_result(result)\n\n # (8)Dmg pool get-prop\n self.log.info(\"(8)==Dmg pool get-prop after RPMs upgraded before Pool upgraded\")\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool_id))\n self.check_result(result)\n\n # (9)Pool property verification after upgraded\n self.log.info(\"(9)==Dmg pool upgrade and get-prop after RPMs upgraded\")\n\n if fault_on_pool_upgrade and self.has_fault_injection(hosts_client):\n self.log.info(\"(9.1a)==Pool upgrade with fault-injection.\")\n self.pool_upgrade_with_fault(hosts_client, pool_id)\n else:\n self.log.info(\"(9.1b)==Pool upgrade.\")\n result = run_pcmd(hosts_client, \"dmg pool upgrade {}\".format(pool_id))\n self.check_result(result)\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool_id))\n self.check_result(result)\n self.log.info(\"(9.2)==verify pool attributes after pool-upgraded.\")\n self.verify_pool_attrs(pool_attr_dict)\n self.pool.destroy()\n\n # (10)Create new pool\n self.log.info(\"(10)==Create new pool after rpms Upgraded\")\n self.add_pool(connect=False)\n pool2_id = self.pool.identifier\n self.get_dmg_command().pool_list(verbose=True)\n self.get_dmg_command().pool_query(pool=pool2_id)\n self.daos_cmd.pool_query(pool=pool2_id)\n result = run_pcmd(hosts_client, \"dmg pool get-prop {}\".format(pool2_id))\n self.check_result(result)\n\n # (11)Downgrade and cleanup\n self.log.info(\"(11)==Downgrade and cleanup.\")\n if ior_api == \"POSIX\":\n result = run_pcmd(hosts_client, \"fusermount3 -u {}\".format(mount_dir))\n self.check_result(result)\n self.container.close()\n self.pool.disconnect()\n self.pool.destroy()\n self.get_dmg_command().system_stop()\n errors = []\n errors.extend(self._stop_managers(self.server_managers, \"servers\"))\n errors.extend(self._stop_managers(self.agent_managers, \"agents\"))\n self.log.info(\"(11.1)==Downgrade RPMs to 2.0.3.\")\n self.downgrade(hosts_server, hosts_client)\n self.log.info(\"==sleeping 30 more seconds\")\n time.sleep(30)\n\n # (12)Cleanup restart server and agent\n self.log.info(\"(12)==Restart 2.0 servers and agent.\")\n self.restart_servers()\n self._start_manager_list(\"agent\", self.agent_managers)\n self.show_daos_version(all_hosts, hosts_client)\n if fault_on_pool_upgrade and not self.has_fault_injection(hosts_client):\n self.fail(\"##(12)Upgraded-rpms did not have fault-injection feature.\")\n self.log.info(\"==(12)Test passed\")", "def test_update_cloud(self):\n pass", "def remove_cluster(config, nova, neutron, cinder, conn):\n\n cluster_info = OSClusterInfo(nova, neutron, cinder, config, conn)\n masters = cluster_info.get_instances(\"node\")\n workers = cluster_info.get_instances(\"master\")\n\n tasks = [host.delete(neutron) for host in masters if host]\n tasks += [host.delete(neutron) for host in workers if host]\n if tasks:\n LOGGER.debug(\"Deleting Instances ...\")\n loop = asyncio.get_event_loop()\n loop.run_until_complete(asyncio.wait(tasks))\n loop.close()\n\n LoadBalancer(config, conn).delete()\n\n sg_name = '%s-sec-group' % config['cluster-name']\n secg = conn.list_security_groups({\"name\": sg_name})\n if secg:\n LOGGER.debug(\"Deleting SecurityGroup %s ...\", sg_name)\n for sg in secg:\n for rule in sg.security_group_rules:\n conn.delete_security_group_rule(rule['id'])\n\n for port in conn.list_ports():\n if sg.id in port.security_groups:\n conn.delete_port(port.id)\n conn.delete_security_group(sg_name)\n\n # This needs to be replaced with OpenStackAPI in the future\n for vol in cinder.volumes.list():\n try:\n if config['cluster-name'] in vol.name and vol.status != 'in-use':\n try:\n vol.delete()\n except (BadRequest, NotFound):\n pass\n\n except TypeError:\n continue\n\n # delete the cluster key pair\n conn.delete_keypair(config['cluster-name'])", "def add_delete_controller_cinder_ceph(self):\n\n self.env.revert_snapshot('ready_with_9_slaves')\n\n data = {\n 'volumes_lvm': True,\n 'volumes_ceph': False,\n 'images_ceph': True,\n 'objects_ceph': True,\n 'tenant': 'scalegroup5',\n 'user': 'scalegroup5',\n 'password': 'scalegroup5',\n \"net_provider\": 'neutron',\n \"net_segment_type\": settings.NEUTRON_SEGMENT['tun']\n }\n\n self.show_step(1, initialize=True)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n settings=data\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller', 'cinder', 'ceph-osd'],\n 'slave-02': ['controller', 'cinder', 'ceph-osd'],\n 'slave-03': ['controller', 'cinder', 'ceph-osd'],\n 'slave-04': ['compute'],\n 'slave-05': ['compute']\n }\n )\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(2)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(3)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(4)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-06': ['controller', 'cinder', 'ceph-osd']\n }\n )\n\n self.show_step(5)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(6)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(7)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(8)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-07': ['controller', 'cinder', 'ceph-osd']\n }\n )\n\n with self.fuel_web.get_ssh_for_node('slave-02') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-02': ['controller', 'cinder', 'ceph-osd']\n },\n pending_addition=False,\n pending_deletion=True\n )\n\n self.show_step(9)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(10)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(11)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(12)\n with self.fuel_web.get_ssh_for_node('slave-03') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-03': ['controller', 'cinder', 'ceph-osd']\n },\n pending_addition=False,\n pending_deletion=True\n )\n\n self.show_step(13)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(14)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(15)\n self.fuel_web.run_ostf(cluster_id)\n\n self.env.make_snapshot('add_delete_controller_cinder_ceph')", "def reconfigure_overcommit_ratio(self):\n self.check_run('reconfigure_overcommit_ratio')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n config_new = utils.get_config_template('nova_cpu')\n structured_config = get_structured_config_dict(config_new)\n self.fuel_web.client.upload_configuration(config_new,\n cluster_id,\n role=\"controller\")\n\n service_name = \"nova-scheduler\"\n\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n uptimes = self.get_service_uptime(controllers, service_name)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(4)\n self.check_service_was_restarted(controllers, uptimes, service_name)\n\n self.show_step(5)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(6)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.check_overcommit_ratio(os_conn, cluster_id)\n\n self.show_step(7)\n config_revert = utils.get_config_template('nova_cpu_old')\n structured_config_revert = get_structured_config_dict(config_revert)\n self.fuel_web.client.upload_configuration(config_revert,\n cluster_id,\n role=\"controller\")\n uptimes = self.get_service_uptime(controllers, service_name)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n self.show_step(8)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(9)\n self.check_service_was_restarted(controllers, uptimes, service_name)\n\n self.show_step(10)\n self.check_config_on_remote(controllers, structured_config_revert)\n\n self.env.make_snapshot(\"reconfigure_overcommit_ratio\",\n is_make=True)", "def reset_openstack(openstack_cleanup):\n openstack_cleanup.delete_vms()\n\n openstack_cleanup.delete_networks(skip_list=OpenstackCleanup.DEFAULT_NETWORKS)\n openstack_cleanup.delete_ports_on_default_network()\n\n openstack_cleanup.delete_images(skip_list=OpenstackCleanup.DEFAULT_IMAGES)\n openstack_cleanup.delete_flavors(skip_list=OpenstackCleanup.DEFAULT_FLAVORS)", "def reload(self):\n cluster_kubeconfig = self.ocp.cluster_kubeconfig\n self.data = self.get()\n self.__init__(**self.data)\n self.ocp.cluster_kubeconfig = cluster_kubeconfig", "def delete_cluster(self):", "def cluster(self):\n assert False", "def ModifyCluster(self, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n body = kwargs\n\n return self._SendRequest(HTTP_PUT,\n \"/%s/modify\" % GANETI_RAPI_VERSION, query, body)", "def version_recluster(self, serial, cluster,\n update_statistics_ancestors_depth=None):\n\n props = self.version_get_properties(serial)\n if not props:\n return\n node = props[NODE]\n size = props[SIZE]\n oldcluster = props[CLUSTER]\n if cluster == oldcluster:\n return\n\n mtime = time()\n self.statistics_update_ancestors(node, -1, -size, mtime, oldcluster,\n update_statistics_ancestors_depth)\n self.statistics_update_ancestors(node, 1, size, mtime, cluster,\n update_statistics_ancestors_depth)\n\n q = \"update versions set cluster = ? where serial = ?\"\n self.execute(q, (cluster, serial))", "def add_delete_compute_cinder_ceph_ephemeral(self):\n\n self.env.revert_snapshot('ready_with_9_slaves')\n\n self.show_step(1, initialize=True)\n data = {\n 'volumes_lvm': True,\n 'volumes_ceph': False,\n 'images_ceph': True,\n 'ephemeral_ceph': True,\n 'osd_pool_size': '2',\n 'tenant': 'scalegroup6',\n 'user': 'scalegroup6',\n 'password': 'scalegroup6'\n }\n\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n settings=data\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller'],\n 'slave-02': ['controller'],\n 'slave-03': ['controller'],\n 'slave-04': ['compute'],\n 'slave-05': ['ceph-osd', 'cinder'],\n 'slave-06': ['ceph-osd', 'cinder']\n }\n )\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(2)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(3)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(4)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-07': ['ceph-osd', 'cinder']\n }\n )\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(5)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(6)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(7)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-08': ['ceph-osd', 'cinder']\n }\n )\n with self.fuel_web.get_ssh_for_node('slave-05') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-05': ['ceph-osd', 'cinder']\n },\n pending_addition=False,\n pending_deletion=True\n )\n\n self.show_step(8)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(9)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(10)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(11)\n with self.fuel_web.get_ssh_for_node('slave-08') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-08': ['ceph-osd', 'cinder']\n },\n pending_addition=False,\n pending_deletion=True\n )\n self.show_step(12)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(13)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(14)\n self.fuel_web.run_ostf(cluster_id)\n self.env.make_snapshot(\"add_delete_compute_cinder_ceph_ephemeral\")", "def upscale_cluster_info(VMname, master=False):\n with open('TemporaryInfo.json', mode='r') as jsonfile:\n TemporaryInfo = json.load(jsonfile)\n privateIP = TemporaryInfo.get(\"privateIpAddress\")\n publicIP = TemporaryInfo.get(\"publicIpAddress\")\n jsonfile.close()\n\n with open('ClusterInfo.json', mode='r') as jsonfile:\n if len(jsonfile.readline()) == 0:\n sys.exit('Error: ClusterInfo.json file appears to be empty.')\n else:\n jsonfile.seek(0,0) # Return the pointer to the beginning of the file\n ClusterInfo = json.load(jsonfile)\n nrSlaves = ClusterInfo[0].get(\"NumberSlaves\")\n jsonfile.close()\n\n with open('ClusterInfoUpdated.json', mode='w') as jsonfile:\n if master:\n if ClusterInfo[0][\"ExistMaster\"]:\n sys.exit('Error: Trying to add a master while according to ClusterInfo there already is one.')\n else:\n newmaster = {}\n newmaster['privateIP'] = privateIP\n newmaster['publicIP'] = publicIP\n newmaster['role'] = 'Master_and_Slave'\n newmaster['VMname'] = VMname\n nrSlaves += 1 # Adding a new slave to the count\n ClusterInfo[0][\"ExistMaster\"] = True\n ClusterInfo.append(newmaster)\n\n if not ClusterInfo[0][\"ExistMaster\"]:\n sys.exit('Error: Trying to add a slave while according to ClusterInfo there is no master.')\n if not master:\n nrSlaves += 1 # Adding a new slave to the count\n newslave = {}\n newslave['privateIP'] = privateIP\n newslave['publicIP'] = publicIP\n newslave['VMname'] = VMname\n newslave['SlaveID'] = str(nrSlaves)\n newslave['role'] = 'Slave'\n ClusterInfo.append(newslave)\n\n ClusterInfo[0][\"NumberSlaves\"] = nrSlaves\n json.dump(ClusterInfo, jsonfile)\n jsonfile.close()\n\n return", "def replace_with_reset_resume_state_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n node1.start()\n\n # restart node4 bootstrap with resetting bootstrap state\n node4.stop()\n mark = node4.mark_log()\n node4.start(jvm_args=[\n \"-Dcassandra.replace_address_first_boot=127.0.0.3\",\n \"-Dcassandra.reset_bootstrap_progress=true\"\n ])\n # check if we reset bootstrap state\n node4.watch_log_for(\"Resetting bootstrap progress to start fresh\", from_mark=mark)\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\", from_mark=mark)\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)", "def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()", "def test_crud_cluster(self):\n # create the object\n response = self._create_cluster()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED,\n response.content)\n\n # list the object\n cluster_id = self._list_cluster()\n # Assert that the originally created cluster id is the same as the one\n # returned by list\n self.assertEquals(response.data['id'], cluster_id)\n self.assertEquals(response.data['default_vm_type'], 'm5.24xlarge')\n self.assertEquals(response.data['default_zone']['name'], 'us-east-1b')\n\n # check details\n cluster_id = self._check_cluster_exists(cluster_id)\n\n # update cluster\n response = self._update_cluster(cluster_id)\n self.assertEquals(response['name'], 'new_name')\n\n # delete the object\n response = self._delete_cluster(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data)\n\n # check it no longer exists\n self._check_no_clusters_exist()", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def test__in_place_update_hostmgr_restart(self, failure_tester):\n # need extra retry attempts, since in-place update would need more time\n # to process given hostmgr would be restarted\n job1 = failure_tester.stateless_job(\n job_file=\"test_stateless_job_spec.yaml\",\n config=IntegrationTestConfig(max_retry_attempts=300),\n )\n job1.create()\n job1.wait_for_all_pods_running()\n\n update1 = failure_tester.update(\n job=job1,\n updated_job_file=\"test_update_stateless_job_spec.yaml\",\n )\n update1.create(in_place=True)\n\n assert 0 != failure_tester.fw.restart(failure_tester.hostmgr, \"leader\")\n\n update1.wait_for_state(goal_state=\"SUCCEEDED\")", "def test_rollback(self):\n os.system('rm config.txt; touch config.txt')\n test_oplog, primary_conn, mongos, solr = self.get_new_oplog()\n\n if not start_cluster():\n self.fail('Cluster could not be started successfully!')\n\n solr = DocManager()\n test_oplog.doc_manager = solr\n solr._delete() # equivalent to solr.delete(q='*: *')\n\n mongos['test']['test'].remove({})\n mongos['test']['test'].insert( \n {'_id': ObjectId('4ff74db3f646462b38000001'),\n 'name': 'paulie'},\n safe=True\n )\n while (mongos['test']['test'].find().count() != 1):\n time.sleep(1)\n cutoff_ts = test_oplog.get_last_oplog_timestamp()\n\n first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),\n 'ns': 'test.test',\n '_id': ObjectId('4ff74db3f646462b38000001')}\n\n #try kill one, try restarting\n kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])\n\n new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))\n admin = new_primary_conn['admin']\n while admin.command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n time.sleep(5)\n count = 0\n while True:\n try:\n mongos['test']['test'].insert({\n '_id': ObjectId('4ff74db3f646462b38000002'),\n 'name': 'paul'}, \n safe=True)\n break\n except OperationFailure:\n count += 1\n if count > 60:\n self.fail('Call to insert doc failed too many times')\n time.sleep(1)\n continue\n while (mongos['test']['test'].find().count() != 2):\n time.sleep(1)\n kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])\n start_mongo_proc(PORTS_ONE['PRIMARY'], \"demo-repl\", \"/replset1a\",\n \"/replset1a.log\", None)\n\n #wait for master to be established\n while primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n\n start_mongo_proc(PORTS_ONE['SECONDARY'], \"demo-repl\", \"/replset1b\",\n \"/replset1b.log\", None)\n\n #wait for secondary to be established\n admin = new_primary_conn['admin']\n while admin.command(\"replSetGetStatus\")['myState'] != 2:\n time.sleep(1)\n while retry_until_ok(mongos['test']['test'].find().count) != 1:\n time.sleep(1)\n\n self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])\n self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])\n\n last_ts = test_oplog.get_last_oplog_timestamp()\n second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),\n 'ns': 'test.test', \n '_id': ObjectId('4ff74db3f646462b38000002')}\n\n test_oplog.doc_manager.upsert(first_doc)\n test_oplog.doc_manager.upsert(second_doc)\n\n test_oplog.rollback()\n test_oplog.doc_manager.commit()\n results = solr._search()\n\n assert(len(results) == 1)\n\n self.assertEqual(results[0]['name'], 'paulie')\n self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))\n\n #test_oplog.join()", "def fixup(self):\n raise Exception(\"Fixup not implemented yet!\")", "def update_cluster_hosts(self, hosts):\n self._hosts = hosts\n self._collect_hosts_d = True", "def test_redeploy(self):\n pass", "def main():\n\n parser = cli.Parser()\n parser.add_required_arguments(cli.Argument.CLUSTER_NAME)\n parser.add_custom_argument('--key', required=True, action='store',\n help='Name of ESXi Advanced Setting to update')\n parser.add_custom_argument('--value', required=True, action='store',\n help='Value of the ESXi Advanced Setting to update')\n args = parser.get_args()\n try:\n si = service_instance.connect(args)\n\n content = si.RetrieveContent()\n\n cluster = pchelper.get_obj(content, [vim.ClusterComputeResource], args.cluster_name)\n\n hosts = cluster.host\n for host in hosts:\n option_manager = host.configManager.advancedOption\n option = vim.option.OptionValue(key=args.key,\n value=int(args.value))\n print(\"Updating %s on ESXi host %s \"\n \"with value of %s\" % (args.key, host.name, args.value))\n if option_manager.UpdateOptions(changedValue=[option]):\n print(\"Settings updated!\")\n\n except vmodl.MethodFault as ex:\n print(\"Caught vmodl fault : \" + ex.msg)\n return -1\n except Exception as ex:\n print(\"Caught exception : \" + str(ex))\n return -1\n\n return 0", "def reset(self):\n self._clusters = {}\n self._clusters_val = {}\n self._centroids = {}\n self.store()", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)" ]
[ "0.656177", "0.6422964", "0.64025843", "0.61629826", "0.61626065", "0.60646015", "0.6063085", "0.6026249", "0.59341043", "0.59069663", "0.5800315", "0.57841843", "0.57784903", "0.5644694", "0.5628729", "0.5616941", "0.56099975", "0.56072444", "0.55292267", "0.5507146", "0.5497578", "0.54975605", "0.54830027", "0.54678273", "0.5451214", "0.54503584", "0.5447989", "0.54419434", "0.54411006", "0.54313254", "0.54288507", "0.541953", "0.541616", "0.5401138", "0.5399389", "0.5394358", "0.5391138", "0.538967", "0.5388284", "0.5381629", "0.53254944", "0.5315606", "0.53127104", "0.5307578", "0.5306005", "0.5301128", "0.528705", "0.5282007", "0.52797157", "0.5273993", "0.5270375", "0.5267086", "0.5262179", "0.52597886", "0.52591515", "0.5255057", "0.52335143", "0.52228844", "0.522138", "0.52207613", "0.5204439", "0.5174629", "0.51715356", "0.51566017", "0.51537466", "0.5153486", "0.51533175", "0.51454633", "0.5145187", "0.51379", "0.5133489", "0.5129872", "0.51292", "0.5125025", "0.51118684", "0.5110749", "0.5110749", "0.5106386", "0.51039493", "0.5102955", "0.5102033", "0.5101082", "0.5091685", "0.5086259", "0.50804734", "0.5076241", "0.5050366", "0.50465775", "0.5039984", "0.5034466", "0.50263095", "0.5025117", "0.50224614", "0.5020213", "0.5016771", "0.5010771", "0.50107133", "0.5009938", "0.50048864", "0.49997574" ]
0.5728941
13
Route for front end to obtain the data for the Location of choice.
async def location_data(location: LocationDataRequest): # Make sure location paramater is a string in the form of "City, State" location = str(location) location = location.replace('location=', "") location = location.replace("'", "") # Queries for data response #pop_query = """SELECT "2019 Population" FROM CitySpire WHERE "Location" = %s""", [location] #rent_query = """SELECT "2019 Rental Rates" FROM CitySpire WHERE "Location" = %s""", [location] #walk_query = """SELECT "2019 Walk Score" FROM CitySpire WHERE "Location" = %s""", [location] #live_query = """SELECT "2019 Livability Score" FROM CitySpire WHERE "Location" = %s""", [location] cursor.execute("""SELECT "2019 Population" FROM cityspire WHERE "Location" = %s;""", [location]) pop = cursor.fetchone() #pop = pop[0][0] # This is slice slice the tuple value from the list of tuples cursor.execute("""SELECT "2019 Rental Rates" FROM cityspire WHERE "Location" = %s;""", [location]) rent = cursor.fetchone() #rent = rent[0][0] # This is slice slice the tuple value from the list of tuples cursor.execute("""SELECT "Walk Score" FROM cityspire WHERE "Location" = %s;""", [location]) walk = cursor.fetchone() #walk = walk[0][0] # This is slice slice the tuple value from the list of tuples cursor.execute("""SELECT "Livability Score" FROM cityspire WHERE "Location" = %s;""", [location]) live = cursor.fetchone() #live = live[0][0] # This is slice slice the tuple value from the list of tuples # Close the cursor and connection (this breaks the API) #cursor.close() #connection.close() # Return the data that was requested and queried return { "city_name": str(location), "population": int(pop[0]), "rent_per_month": int(rent[0]), "walk_score": int(walk[0]), "livability_score": int(live[0]) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_location(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/location\"\n })", "def carslocation():\n # Check if user is loggedin\n if 'loggedin' in session:\n\n response = requests.get(\"http://localhost:8080/api/carslocation\")\n print(response.text)\n locations = json.loads(response.text)\n\n # users is loggedin show them the home page\n return render_template('map.html', location=locations)\n # return render_template('map.html')\n # users is not loggedin redirect to login page\n return redirect(url_for('site.login'))", "def _get_data_for_location(self):\r\n student = self._student('GET')\r\n location = self.get_params.get('location')\r\n\r\n # Do not return data if we're missing the student param\r\n # or the problem has not yet been registered.\r\n if student is None or location not in self.server.problems:\r\n self._error_response()\r\n\r\n else:\r\n self._success_response({\r\n 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],\r\n 'count_required': student.num_required,\r\n 'count_graded': student.num_graded,\r\n 'count_available': student.num_pending\r\n })", "def route(self):\n pass", "def lookup():\n \"\"\" OR station info for current selection\"\"\"\n\n # check which arguements are present\n if request.args.get(\"city\") and request.args.get(\"state\"):\n # get all stations for a location\n\n city = request.args.get(\"city\")\n state = request.args.get(\"state\")\n\n station_list = Station.query.join(Place).\\\n filter(Place.city == city, Place.state == state).all()\n\n result = geo_stations.dump(station_list)\n\n if request.args.get(\"stream\"):\n # get station for specified url\n\n url = request.args.get(\"stream\")\n\n station_list = Station.query.join(Place).\\\n filter(Station.url_stream == url).all()\n\n result = geo_stations.dump(station_list)\n\n return jsonify(result.data)", "def location():\n countries = locations(path_to_db)\n\n kwargs = dict(\n ascents=None,\n countries=countries,\n difficulties=DIFFICULTIES\n )\n\n country = request.args.get('country')\n diff = request.args.get('difficulty')\n\n if not country and not diff:\n return render_template(\"location.html\", **kwargs)\n\n db = DataBase(path_to_db)\n if diff != \"\":\n ascents = db.execute_selection_by_difficulty(country, diff)\n else:\n ascents = db.execute_selection_by_country(country)\n\n kwargs['ascents'] = ascents\n\n return render_template(\"location.html\", **kwargs)", "def getDataAtLocation(loc: ghidra.program.util.ProgramLocation) -> ghidra.program.model.listing.Data:\n ...", "def home():\n return(\n f\"Available Routes: <br/>\"\n\n f\"For Precipitation: /api/v1.0/precipitation<br/>\"\n f\"Returns Jsonify dictionary of dates and Precepitation<br/><br/>\"\n\n f\"For list of Stations: /api/v1.0/stations<br/>\"\n f\"Returns Jasonify list of stations <br/><br/>\"\n\n f\"For last year temperatures: /api/v1.0/tobs<br/>\"\n f\"Returns Jsonify dictionary of Temperature Observations for last year<br/><br/>\"\n\n f\"Temperature result from the date in format (yyyy-mm-dd): /api/v1.0/yyyy-mm-dd<br/>\"\n f\"Returns an Average, Max, and Min temperatures from given start date of dataset<br/><br/>\"\n\n f\"Temperature result from start date to end date in format (yyyy-mm-dd): /api/v1.0/yyyy-mm-dd/yyyy-mm-dd<br/>\"\n f\"Returns an Average, Max, and Min temperatures for a given date range\"\n\n )", "def get(self):\n response = view_locations()\n return marshal(response, view_locations_model), SUCCESS", "def get(self, id):\n response = view_location(id)\n if type(response) == dict:\n return marshal(response, view_location_model), SUCCESS\n else:\n return response", "def location_data(self) -> pulumi.Output[Optional['outputs.LocationDataResponse']]:\n return pulumi.get(self, \"location_data\")", "def get_location(self):\n return self.location", "def back_home():\n column = \"ninja this is a GET request\"\n return render_template('data.html',\n title='Data on a Map!',\n column=column)", "def _get(self):\n return self.request(method=\"get\", path=self.router.fields)", "def get(self, location, data=None, headers={}):\n return self._communicate(vxg.core.request.GetRequest,\n location, data, headers)", "def foodtruckByLocation(request, format=None):\n\tif not 'latitude' in request.GET or not request.GET['latitude']:\n\t\treturn Response(status=status.HTTP_400_BAD_REQUEST)\n\telse:\n\t\tlatitude = request.GET['latitude']\n\n\tif not 'longitude' in request.GET or not request.GET['longitude']:\n\t\treturn Response(status=status.HTTP_400_BAD_REQUEST)\n\telse:\n\t\tlongitude = request.GET['longitude']\n\n\tradius = Decimal(1)\n\tlimit = 15\n\tif 'radius' in request.GET and request.GET['radius']:\n\t\tradius = Decimal(request.GET['radius'])\n\tif 'limit' in request.GET and request.GET['limit']:\n\t\tlimit = request.GET['limit']\n\t\n\tfoodtrucks = findFoodtrucksByLocation(latitude, longitude, radius, limit)\n\tif request.method == 'GET':\n\t\tserializer = FoodTruckSerializer(foodtrucks, many=True)\n\t\treturn Response(serializer.data)", "def locations(request):\n locations = Location.objects.all()\n context = {'locations': locations}\n return render(request, 'std/Locations.html', context)", "def do_GET(self):\n try:\n if self.path.endswith(\"/restaurants\"):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n output = render_template('../templates/restaurant/index.html')\n self.wfile.write(output)\n return\n\n if self.path.endswith(\"/restaurant/new\"):\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.end_headers()\n\n output = render_template('../templates/restaurant/new.html')\n self.wfile.write(output)\n return\n\n if self.path.endswith(\"/edit\"):\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.end_headers()\n\n output = render_template(\"../templates/restaurant/edit.html\")\n self.wfile.write(output)\n return\n\n except IOError:\n self.send_response(404, \"File Not Found %s\" % self.path)", "def route(self):\n # TODO: wenn keine url, herausfinden, welche ????\n # TODO: wenn url = hostname (fqdn), dann -> google.ch\n if not (self.META.has_key('REMOTE_ADDR') and \n self.GET.has_key('provider')):\n #self.GET.has_key('url')):\n #return HttpResponseRedirect('/index.php')\n # TODO: Auf die Fehlerseite Link zu back.php\n return render_to_response('error.htm', {\n 'error': \"Falsche Parameter auf route.php\",\n })\n src_ip = self.META['REMOTE_ADDR']\n prov = self.GET['provider']\n url = \"http://www.google.ch\"\n if self.GET.has_key('url'):\n url = self.GET['url']\n # Add and save new route\n add_active_route(src_ip = src_ip, prov = prov)\n return HttpResponseRedirect(url)", "def get_location_by_id(self, location_id):", "def get(self, request):\n pass", "def home():\n\n # Provide the date range (from the most distant to the recent date) for\n # filtering in the last two API routes\n session = Session(engine)\n start_limit = session.query(Measurement.date).filter(Measurement.date).\\\n order_by(Measurement.date).first()\n end_limit = session.query(Measurement.date).filter(Measurement.date).\\\n order_by(Measurement.date.desc()).first()\n\n return (\n f'Available Routes:<br/>'\n f'<br/>'\n f'/api/v1.0/precipitation<br/>'\n f'/api/v1.0/stations<br/>'\n f'/api/v1.0/tobs<br/>'\n f'<br/>'\n f'/api/v1.0/start<br/>'\n f'/api/v1.0/start/end<br/>'\n f'<br/>'\n f'*Please use \"yyyy-mm-dd\" as the date format to replace the \"start\" and/or \"end\" parameter(s) in the last two API routes in order to filter summarized temperature results based on desired date range:<br/>'\n f'The earliest date available in this dataset is {start_limit[0]}<br/>'\n f'The most recent date available in this dataset is {end_limit[0]}<br/>'\n )", "def home(request):\n lender = request.GET.get('lender', '')\n metro = request.GET.get('metro')\n context = {}\n if lender and len(lender) > 1 and lender[0].isdigit():\n query = Institution.objects.filter(agency_id=int(lender[0]))\n query = query.filter(ffiec_id=lender[1:])\n query = query.select_related('agency', 'zip_code')\n lender = query.first()\n if lender:\n context['lender'] = lender\n if metro:\n query = Geo.objects.filter(geo_type=Geo.METRO_TYPE,\n geoid=metro)\n metro = query.first()\n if metro:\n context['metro'] = metro\n\n return render(request, 'index.html', context)", "def home():\n return(\n f\"Available Routes:<br/>\"\n f\"Precipitation: /api/v1.0/precipitation<br/>\"\n f\"List of Stations: /api/v1.0/stations<br/>\"\n f\"Temperature for one year: /api/v1.0/tobs<br/>\"\n f\"Temperature stat from the start date(yyyy-mm-dd): /api/v1.0/min_max_avg/<start><br/>\"\n f\"Temperature stat from start to end dates(yyyy-mm-dd): /api/v1.0/min_max_avg/<start><br/>\"\n )", "def route( request, c ):", "def get_all_locations(self):", "def location_data(self) -> Optional[pulumi.Input['LocationDataArgs']]:\n return pulumi.get(self, \"location_data\")", "def get_data(location):\n # This is factored out so we can use a different retrieval method if required.\n # Originally used urllib2, but it had SSL issues on my machine\n response = requests.get(location)\n return response.content", "def fetch(*args, **kwargs):\n raise InvalidEndpoint('Not a valid location on this endpoint')", "def fetch(*args, **kwargs):\n raise InvalidEndpoint('Not a valid location on this endpoint')", "def get_locations(self):\n try:\n output_json = {}\n total_locations = list(self.mongo_db_object.find_all(AppConfigurations.MONGO_DATABASE,\n AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME))\n output_json = total_locations\n return AppConstants.result_success_template(output_json)\n\n except Exception as e:\n print(\"Error while fetching the Location Data.\", str(e))", "def on_path(route, query, urgency=1, close_to=None,size=20):\n\n base_url='https://places.cit.api.here.com/places/v1/browse'\n #import ipdb; ipdb.set_trace()\n route_str = '['+str('|'.join([','.join([str(i[0]),str(i[1])]) for i in route]))+']'\n payload = {'app_id':HERE_ID, \n 'app_code':HERE_CODE,\n 'q':query,\n 'route':route_str,\n 'urgency':urgency,\n 'size':size}\n resp = requests.get(base_url, params=payload)\n data = json.loads(resp.content)\n results = data['results']['items']\n ret = []\n for loc in results:\n ret.append({'title' : loc['title'],\n 'latlon' : loc['position'],\n 'address': loc['vicinity']})\n return ret", "def location(self):\r\n return self._get('location', {})", "def get(self, request):\n context = self.getContext(GeoPostForm())\n return render(request, 'geopost/home.html', context)", "def get(self, request):\n return Response(services.get_gsa_locations(request.query_params, request.META['HTTP_JWT']))", "def location():\n\n tablename = \"%s_%s\" % (module, resourcename)\n table = db[tablename]\n\n # Allow prep to pass vars back to the controller\n vars = {}\n\n # @ToDo: Clean up what needs to be done only for interactive views,\n # vs. what needs to be done generally. E.g. some tooltips are defined\n # for non-interactive.\n # Pre-processor\n def prep(r, vars):\n\n def get_location_info():\n query = (db.gis_location.id == r.id)\n return db(query).select(db.gis_location.lat,\n db.gis_location.lon,\n db.gis_location.level,\n limitby=(0, 1)).first()\n\n # Restrict access to Polygons to just MapAdmins\n if deployment_settings.get_security_map() and not s3_has_role(\"MapAdmin\"):\n table.code.writable = False\n if r.method == \"create\":\n table.code.readable = False\n table.gis_feature_type.writable = table.gis_feature_type.readable = False\n table.wkt.writable = table.wkt.readable = False\n elif r.interactive:\n table.code.comment = DIV(_class=\"tooltip\",\n _title=\"%s|%s\" % (T(\"Code\"),\n T(\"For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.\")))\n table.wkt.comment = DIV(_class=\"stickytip\",\n _title=\"WKT|%s %s%s %s%s\" % (T(\"The\"),\n \"<a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>\",\n T(\"Well-Known Text\"),\n \"</a>\",\n T(\"representation of the Polygon/Line.\")))\n\n if r.method == \"update\" and r.id:\n # We don't allow converting a location group to non-group and\n # vice versa. We also don't allow taking away all the members of\n # a group -- setting \"notnull\" gets the \"required\" * displayed.\n # Groups don't have parents. (This is all checked in onvalidation.)\n # NB r.id is None for update.url\n location = get_location_info()\n if location.level == \"GR\":\n table.level.writable = False\n table.parent.readable = table.parent.writable = False\n table.members.notnull = True\n # Record that this is a group location. Since we're setting\n # level to not writable, it won't be in either form.vars or\n # request.vars. Saving it while we have it avoids another\n # db access.\n response.s3.location_is_group = True\n else:\n table.members.writable = table.members.readable = False\n response.s3.location_is_group = False\n\n if r.interactive:\n if not \"group\" in r.request.vars:\n # Hide the Members List (a big download when many records are entered)\n table.members.writable = table.members.readable = False\n # Don't show street address, postcode for hierarchy on read or update.\n if r.method != \"create\" and r.id:\n try:\n location\n except:\n location = get_location_info()\n if location.level:\n table.addr_street.writable = table.addr_street.readable = False\n table.addr_postcode.writable = table.addr_postcode.readable = False\n\n # Options which are only required in interactive HTML views\n table.level.comment = DIV(_class=\"tooltip\",\n _title=\"%s|%s\" % (T(\"Level\"),\n T(\"If the location is a geographic area, then state at what level here.\")))\n parent_comment = DIV(_class=\"tooltip\",\n _title=\"%s|%s\" % (T(\"Parent\"),\n T(\"The Area which this Site is located within.\")))\n if r.representation == \"popup\":\n table.parent.comment = parent_comment\n else:\n # Include 'Add Location' button\n table.parent.comment = DIV(A(ADD_LOCATION,\n _class=\"colorbox\",\n _href=URL(r=request, c=\"gis\", f=\"location\",\n args=\"create\",\n vars=dict(format=\"popup\",\n child=\"parent\")),\n _target=\"top\",\n _title=ADD_LOCATION),\n parent_comment),\n table.osm_id.comment = DIV(_class=\"stickytip\",\n _title=\"OpenStreetMap ID|%s%s%s\" % (T(\"The\"),\n \" <a href='http://openstreetmap.org' target=_blank>OpenStreetMap</a> ID. \",\n T(\"If you know what the OSM ID of this location is then you can enter it here.\")))\n table.geonames_id.comment = DIV(_class=\"stickytip\",\n _title=\"Geonames ID|%s%s%s\" % (T(\"The\"),\n \" <a href='http://geonames.org' target=_blank>Geonames</a> ID. \",\n T(\"If you know what the Geonames ID of this location is then you can enter it here.\")))\n table.comments.comment = DIV(_class=\"tooltip\",\n _title=\"%s|%s\" % (T(\"Comments\"),\n T(\"Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.\")))\n\n if r.representation == \"iframe\":\n # De-duplicator needs to be able to access UUID fields\n table.uuid.readable = table.uuid.writable = True\n table.uuid.label = \"UUID\"\n table.uuid.comment = DIV(_class=\"stickytip\",\n _title=\"UUID|%s%s%s\" % (T(\"The\"),\n \" <a href='http://eden.sahanafoundation.org/wiki/UUID#Mapping' target=_blank>Universally Unique ID</a>. \",\n T(\"Suggest not changing this field unless you know what you are doing.\")))\n\n if r.method in (None, \"list\") and r.record is None:\n # List\n pass\n elif r.method in (\"delete\", \"search\"):\n pass\n else:\n # Add Map to allow locations to be found this way\n config = gis.get_config()\n lat = config.lat\n lon = config.lon\n zoom = config.zoom\n feature_queries = []\n\n if r.method == \"create\":\n add_feature = True\n add_feature_active = True\n else:\n if r.method == \"update\":\n add_feature = True\n add_feature_active = False\n else:\n # Read\n add_feature = False\n add_feature_active = False\n\n try:\n location\n except:\n location = get_location_info()\n if location and location.lat is not None and location.lon is not None:\n lat = location.lat\n lon = location.lon\n # Same as a single zoom on a cluster\n zoom = zoom + 2\n\n # @ToDo: Does map make sense if the user is updating a group?\n # If not, maybe leave it out. OTOH, might be nice to select\n # admin regions to include in the group by clicking on them in\n # the map. Would involve boundaries...\n _map = gis.show_map(lat = lat,\n lon = lon,\n zoom = zoom,\n feature_queries = feature_queries,\n add_feature = add_feature,\n add_feature_active = add_feature_active,\n toolbar = True,\n collapsed = True)\n\n # Pass the map back to the main controller\n vars.update(_map=_map)\n return True\n response.s3.prep = lambda r, vars=vars: prep(r, vars)\n\n # Options\n _vars = request.vars\n filters = []\n\n parent = _vars.get(\"parent_\", None)\n # Don't use 'parent' as the var name as otherwise it conflicts with the form's var of the same name & hence this will be triggered during form submission\n if parent:\n # We want to do case-insensitive searches\n # (default anyway on MySQL/SQLite, but not PostgreSQL)\n _parent = parent.lower()\n\n # Can't do this using a JOIN in DAL syntax\n # .belongs() not GAE-compatible!\n query = (db.gis_location.name.lower().like(_parent))\n filters.append((db.gis_location.parent.belongs(db(query).select(db.gis_location.id))))\n # ToDo: Make this recursive - want descendants not just direct children!\n # Use new gis.get_children() function\n\n # ToDo\n # bbox = _vars.get(\"bbox\", None):\n\n if filters:\n from operator import __and__\n response.s3.filter = reduce(__and__, filters)\n\n caller = _vars.get(\"caller\", None)\n if caller:\n # We've been called as a Popup\n if \"gis_location_parent\" in caller:\n # Hide unnecessary rows\n table.addr_street.readable = table.addr_street.writable = False\n else:\n parent = _vars.get(\"parent_\", None)\n # Don't use 'parent' as the var name as otherwise it conflicts with the form's var of the same name & hence this will be triggered during form submission\n if parent:\n table.parent.default = parent\n\n # Hide unnecessary rows\n table.level.readable = table.level.writable = False\n table.geonames_id.readable = table.geonames_id.writable = False\n table.osm_id.readable = table.osm_id.writable = False\n table.source.readable = table.source.writable = False\n table.url.readable = table.url.writable = False\n\n level = _vars.get(\"level\", None)\n if level:\n # We've been called from the Location Selector widget\n table.addr_street.readable = table.addr_street.writable = False\n\n output = s3_rest_controller(module, resourcename)\n\n _map = vars.get(\"_map\", None)\n if _map and isinstance(output, dict):\n output.update(_map=_map)\n\n return output", "def get(self, request):\n source = request.GET.get(\"source\", \"BLR\")\n destination = request.GET.get(\"destination\", \"DEL\")\n dateofdeparture = request.GET.get(\"date_of_departure\", \"20191027\")\n resp = get_flights(source, destination, dateofdeparture)\n return Response(resp)", "def detail(request, location_id):\n location = get_object_or_404(Location, pk=location_id)\n\n return render(request, \"locations/detail.html\", context=fill_context({\"location\": location}))", "def get(self, request):\n LOGGER.info(\"Retrieving career planning data\")\n request_type = request.GET.get(\"request_type\")\n\n if request_type == \"SEARCH\":\n name = request.GET.get(\"menu\")\n result = CareerPlanning.objects.filter(manu__icontains=name)\n else:\n result = CareerPlanning.objects.all()\n career_planning_list = []\n\n for career_planning in result:\n career_planning_dict = model_to_dict(career_planning)\n career_planning_dict.pop(\"content\")\n career_planning_list.append(career_planning_dict)\n return Response({\"status\": \"SUCCESS\", \"data\": career_planning_list})", "def index():\n if request.method =='POST':\n session[\"place\"] = request.form[\"place\"] # Stores \"place\" input in session\n return redirect(url_for(\"nearest\"))\n else:\n return render_template(\"index.html\")", "def mainland(request):\n # TODO: read measurement data from shared file share\n return render(request, 'mainland.html')", "def get_location(self):\n\t\treturn self.location", "def get_location(self):\r\n return None", "def location_callback(self,msg):\n self.location = msg.data", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start_date(yyyy-mm-dd)<br/>\"\n f\"/api/v1.0/start_date(yyyy-mm-dd)/end_date(yyyy-mm-dd)<br/>\")", "def restock_locations(request, *args, **kwargs):\n checkout_locations = Location.objects.checkout().order_by('name')\n return render(request, \"admin/restock_locations.html\", {'locations': checkout_locations})", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs\"\n )", "def get_traffic_json_resource(self, location_data: tuple, location_type: str, zoom: int) -> str:\n if location_type == \"latlon\":\n (col, row) = self.get_tile(*location_data, zoom)\n elif location_type == \"colrow\":\n (col, row) = location_data\n\n quadkey = self.get_quadkeys(col, row, zoom)\n total_url = self.json_tile_base_url + 'app_id=' + self.app_id + \\\n '&app_code=' + self.app_code + '&quadkey=' + quadkey + '&responseattributes=sh,fc'\n\n return total_url", "def get(self, request, pk, format=None):\n user = self.get_user(pk=pk)\n \n if 'longitude' in request.GET and 'latitude' in request.GET: #this means we want to see if we are by any of the reminders\n current_lon = float(request.GET['longitude']) #getting user's latitude and longitude\n current_lat = float(request.GET['latitude'])\n\n else: #this means we just want all of the reminders.\n serializer = LocationReminderSerializer(user.location_reminders, many=True)\n return Response(serializer.data)\n\n\n serializer = LocationReminderSerializer(user.location_reminders, many=True)\n length = len(serializer.data)\n nearby = []\n for i in range(length):\n lat = float(serializer.data[i]['latitude']) #get events latitude and longitude\n lon = float(serializer.data[i]['longitude'])\n\n distance_in_meters = calcDistance(current_lat, current_lon, lat, lon)\n radius = serializer.data[i]['radius'] \n if distance_in_meters < radius: #if we are within the distance specified by the user\n nearby.append(serializer.data[i])\n\n return Response(nearby)", "def do_GET(self):\n\n\t\t# Delegate the request to a specialised method appropriate for each route.\n\t\t# Creating a \"switch\" construct for choosing the right delegate method.\n\t\tdelegates = {'/contact.html' : self.processContactRequest,\n\t\t\t\t\t\t'/products.html' : self.processProductsRequest,\n\t\t\t\t\t\t'/report.html' : self.processReportRequest}\n\t\ttry:\n\t\t\tself.render(delegates[self.path]())\n\t\texcept:\n\t\t\tself.render(self.page_not_found())", "def get_location_tree(request):\n atrtibute_tree = Location.populate_dropdown()\n return JsonResponse({'data': atrtibute_tree})", "def get_data():\n pass", "def show_routes(request):\n\n routes = get_route_list_db('sf-muni')\n return HttpResponse(json.dumps(routes), content_type='application/json')", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/preciptation<br/>\"\n f\"/api/v1.0/Stations\"\n )", "def weather():\n latlong = request.form.get(\"latlong\")\n latlong = latlong.split(\",\")\n data = lookup_weather(latlong[0],latlong[1])\n return render_template(\"weather.html\", data = data)", "def show_all_locations():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n locations = Location.query.all()\n \n ## this is ALL locations\n \n return render_template(\"locations_display.html\", locations = locations)", "def get_locations(self, location, **kwargs):\n StockLocation = self.env[\"stock.location\"]\n location.ensure_one()\n\n if location.usage == \"view\":\n return StockLocation.browse()\n return location", "def index():\n return (\n f\"Welcome to my Hawaii trip info!<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def browse_location(self, level=0, URL_location=None):", "def __start_petition__(self):\n url = self.__url__.format(lat=self.location.get_lat(), long=self.location.get_long())\n # make the request\n with requests.get(url) as r:\n # parse the data to json object\n data = json.loads(r.text)[0]\n # if server return an eror\n if \"error\" in data.keys():\n raise ValueError(data[\"error\"])\n if \"politics\" in data.keys():\n locations = data[\"politics\"]\n if locations is not None and locations != \"null\": # if have samething in locations:\n for l in locations:\n if \"country\" == l[\"friendly_type\"]:\n self.country = l[\"name\"]\n elif \"state\" == l[\"friendly_type\"]:\n self.state = l[\"name\"]\n elif \"city\" == l[\"friendly_type\"]:\n self.city = l[\"name\"]\n r.close()", "def hospital_viewer():\r\n name = request.args[\"address\"]\r\n hospitals = get_zipcode_hospitals(name)\r\n hospitals['coordinate'] = 'end_point='+hospitals['name'].astype(str)+'&'+'end_lng=' + hospitals['lon'].astype(str)+'&'+'end_lat='+hospitals['lat'].astype(str)\r\n\r\n\r\n if len(hospitals) > 0:\r\n\r\n #genetrate folium map\r\n hospitals_coordinates = hospitals[[\"lat\", \"lon\"]].values.tolist()\r\n\r\n map=make_folium_map(hospitals_coordinates)\r\n\r\n return render_template(\r\n \"page3_2h.html\",\r\n num_hospitals=get_num_hospitals(name),\r\n address=name,\r\n hospitals=hospitals[[\"name\", \"address\", \"contact\", \"coordinate\"]].values,\r\n map=map._repr_html_()\r\n )\r\n else:\r\n\r\n lng=get_address(name)[1]\r\n lat=get_address(name)[0]\r\n near_hospital = find_5near_hospitals(lng, lat)\r\n near_hospital['coordinate'] = 'end_point='+near_hospital['name'].astype(str)+'&'+'end_lng=' + near_hospital['lon'].astype(str)+'&'+'end_lat='+near_hospital['lat'].astype(str)\r\n\r\n return render_template(\r\n \"page3_2h_nohospital.html\",\r\n address=name,\r\n near_hospital_table=near_hospital[[\"name\", \"address\", \"contact\", \"coordinate\", \"distance\"]].values,\r\n )", "def get_location(self):\r\n response = self.connection.make_request('GET', self.name,\r\n query_args='location')\r\n body = response.read()\r\n if response.status == 200:\r\n rs = ResultSet(self)\r\n h = handler.XmlHandler(rs, self)\r\n xml.sax.parseString(body, h)\r\n return rs.LocationConstraint\r\n else:\r\n raise self.connection.provider.storage_response_error(\r\n response.status, response.reason, body)", "def _send_request(self):\n route_chosen = self.comboBox_route_list.currentText()\n route_id = route_chosen.split(',')[0] #to get the id of the route\n trip_headsign_chosen = self.comboBox_trip_headsign_list.currentText()\n stop_chosen = self.comboBox_stop_list.currentText()\n self.request(route_id, trip_headsign_chosen, stop_chosen)", "def location_details_gen(self, args):\n lat, longt, city, code, timezone = faker.location_on_land()\n\n if args == 'lattlong':\n return lat, longt\n \n elif args == 'city':\n return city\n \n elif args == 'timezone':\n return timezone\n \n elif args == 'code':\n return code", "def _handle_get(self, request, *args, **kwargs):\n self.URL_VARIABLES = {\n 'vendor_location_id': kwargs.get('vendor_location_id'),\n 'vendor_id': kwargs.get('vendor_id')\n }\n\n results = Meal.objects.prefetch_related('vendor_location__vendor__images', 'images').filter(\n vendor_location__pk=kwargs.get('vendor_location_id'),\n vendor_location__vendor__pk=kwargs.get('vendor_id')).order_by('available_starting')\n\n show_deleted = request.QUERY_PARAMS.get('show_deleted', False)\n\n if show_deleted in ['false', 0, False]:\n results = results.filter(is_deleted=False)\n\n return self.list_results(request, results, MealSerializer, use_cache=True, cache_time=self.CACHE_30_DAYS,\n cache_version=1)", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n )", "def cinema_in_location(request, suburb):\n if request.method == 'GET':\n #The following is a JOIN query on the Cinema and Address tables\n cinemas = Cinema.objects.filter(address__suburb__iexact=suburb)\n serializer = MovieSerializer(cinemas, many=True)\n return JSONResponse(serializer.data)\n else:\n return HttpResponse(status=status.HTTP_405_METHOD_NOT_ALLOWED)", "def location_details(self, **kwargs):\n \n self.options.update(kwargs)\n self.options['action'] = 'locator.location.details'\n return self.call(self.options)", "def get(self):\n self.get_or_post(method='GET')", "def get_map_josn(request):\n if request.method == 'GET':\n data = get_json()\n print('Responsed')\n return Response(data,status=status.HTTP_200_OK)", "def retrieval():\n try:\n if request.method == 'GET':\n country = request.args.get('country') # If no key then null\n year = request.args.get('year') # If no key then null\n return spout(country, year)\n except Exception as e:\n # Unfortunately I'm not going to wrap this in indv. strings\n r = Response(response=error_msg+str(e),\n status=404,\n mimetype=\"application/xml\")\n r.headers[\"Content-Type\"] = \"text/xml; charset=utf-8\"\n return r", "def get(self, location, authorization_required=True):\n url = 'https://{}/api/v1/{}'.format(self.host, location.strip('/'))\n headers = {\n 'Content-Type': 'application/json',\n }\n if authorization_required:\n if not self.authorized:\n self.login()\n if self.token:\n headers['Authorization'] = 'Bearer {}'.format(self.token)\n request = self.session.get(\n url, headers=headers,\n verify=self.verify)\n return request", "def location_resolve():\n\n # @ToDo: Error gracefully if conditions not satisfied\n locID1 = request.vars.locID1\n locID2 = request.vars.locID2\n\n # Shortcut\n locations = db.gis_location\n\n # Remove the comment and replace it with buttons for each of the fields\n count = 0\n for field in locations:\n id1 = str(count) + \"Right\" # Gives a unique number to each of the arrow keys\n id2 = str(count) + \"Left\"\n count = count + 1\n\n # Comment field filled with buttons\n field.comment = DIV(TABLE(TR(TD(INPUT(_type=\"button\", _id=id1, _class=\"rightArrows\", _value=\"-->\")),\n TD(INPUT(_type=\"button\", _id=id2, _class=\"leftArrows\", _value=\"<--\")))))\n record = locations[locID1]\n myUrl = URL(r=request, c=\"gis\", f=\"location\")\n form1 = SQLFORM(locations, record, _id=\"form1\", _action=(\"%s/%s\" % (myUrl, locID1)))\n\n # For the second location remove all the comments to save space.\n for field in locations:\n field.comment = None\n record = locations[locID2]\n form2 = SQLFORM(locations, record,_id=\"form2\", _action=(\"%s/%s\" % (myUrl, locID2)))\n return dict(form1=form1, form2=form2, locID1=locID1, locID2=locID2)", "def _get_location_details(self, location):\n resp = requests.get(\n self.base_url,\n params = {\n 'address': ''.join(location.split(' ')),\n 'key': GOOGLE_API_KEY,\n }\n )\n return resp.json()", "def show_place(locid):\n t0 = time.time()\n u_context = UserContext(user_session, current_user, request)\n try:\n # Open database connection and start transaction\n # readservice -> Tietokantapalvelu\n # reader ~= Toimialametodit\n\n with PlaceReader(\"read\", u_context) as service:\n # reader = PlaceReader(readservice, u_context)\n res = service.get_places_w_events(locid)\n\n if res[\"status\"] == Status.NOT_FOUND:\n print(f'bp.scene.routes.show_place: {_(\"Place not found\")}')\n # return redirect(url_for('virhesivu', code=1, text=f'Ei löytynyt yhtään'))\n if res[\"status\"] != Status.OK:\n print(\n f'bp.scene.routes.show_place: {_(\"Place not found\")}: {res.get(\"statustext\")}'\n )\n # return redirect(url_for('virhesivu', code=1, text=f'Virhetilanne'))\n\n except KeyError as e:\n traceback.print_exc()\n return redirect(url_for(\"virhesivu\", code=1, text=str(e)))\n\n cnt = len(res.get(\"events\")) if res.get(\"events\", False) else 0\n stk_logger(u_context, f\"-> bp.scene.routes.show_place n={cnt}\")\n return render_template(\n \"/scene/place_events.html\",\n place=res.get(\"place\"),\n pl_hierarchy=res.get(\"hierarchy\"),\n events=res.get(\"events\"),\n user_context=u_context,\n elapsed=time.time() - t0,\n )", "def home_page():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n )", "def routes(self, body):\n pass", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def get(self, request, **kwargs):\n player = get_player_from_request(request)\n if player is None or player.room is not None:\n return redirect(\"rooms:redirect\")\n\n rooms = Room.objects.all()\n return render(request, self.template_name, {\"rooms\": rooms})", "def home(request):\n\n context = {\n \"resource_id\": request.GET.get(\"resource_id\"),\n \"aggregation_id\": request.GET.get(\"aggregation_path\"),\n \"geoserver_url\": app.get_custom_setting(\"geoserver_url\"),\n \"hydroserver_url\": app.get_custom_setting(\"hydroserver_url\"),\n \"max_layers\": app.get_custom_setting(\"max_layers\")\n }\n\n return render(request, 'hydroshare_data_viewer/home.html', context)", "async def get(self):\n\n # if the train number is specified through\n # a GET parameter, redirect to the perma link\n self.train_number = self.request.rel_url.query.get('train', None)\n if self.train_number:\n return web.HTTPMovedPermanently(\n '/%s/train/%s' % (self.request.language, self.train_number)\n )\n\n self.train_number = self.request.match_info.get('train')\n return await super(TrainView, self).get()", "def routing_area(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"routing_area\"), kwargs)", "def get(self, request, pk, *args, **kwargs):\n self.restaurant = get_object_or_404(Restaurant, id=pk)\n self.page_title = \"{} Information\".format(self.restaurant.name)\n\n def get_list_or_none(klass, *args, **kwargs):\n queryset = _get_queryset(klass)\n obj_list = list(queryset.filter(*args, **kwargs))\n if obj_list:\n return obj_list\n return []\n\n self.dishes = get_list_or_none(Dish, restaurant=self.restaurant)\n self.orders = get_list_or_none(Order, restaurant=self.restaurant)\n return super().get(request, *args, **kwargs)", "def get(self, slug = None):\n\n # prefill some values if there's already a previous event\n obj = {}\n el = self.barcamp.eventlist\n if len(el):\n e = el[-1]\n #obj['date'] = e['date'] + datetime.timedelta(days=1)\n obj['start_time'] = e['start_time']\n obj['end_time'] = e['end_time']\n obj['size'] = e['size']\n else:\n obj['date'] = self.barcamp.start_date\n obj['start_time'] = \"12:00\"\n obj['end_time'] = \"23:00\"\n\n form = EventForm(self.request.form, config = self.config, **obj)\n\n # get countries and translate them\n try:\n trans = gettext.translation('iso3166', pycountry.LOCALES_DIR,\n languages=[str(self.babel_locale)])\n except IOError:\n # en only has iso3166_2\n trans = gettext.translation('iso3166_2', pycountry.LOCALES_DIR,\n languages=[str(self.babel_locale)])\n \n countries = [(c.alpha_2, trans.ugettext(c.name)) for c in pycountry.countries]\n form.location_country.choices = countries\n\n\n if self.request.method == 'POST' and form.validate():\n f = form.data\n f['location'] = {\n 'name' : f['location_name'],\n 'street' : f['location_street'],\n 'city' : f['location_city'],\n 'zip' : f['location_zip'],\n 'country' : f['location_country'],\n 'email' : f['location_email'],\n 'phone' : f['location_phone'],\n 'url' : f['location_url'],\n 'description' : f['location_description'],\n }\n\n # retrieve geo location (but only when not in test mode as we might be offline)\n # and if user hasn't provided own coors\n if self.request.form.get('own_coords', \"no\") != \"yes\" and f['location_street'] and not self.config.testing and f['own_location']:\n street = f['location']['street']\n city = f['location']['city']\n zip = f['location']['zip']\n country = f['location']['country'] \n try:\n lat, lng = self.retrieve_location(street, zip, city, country)\n f['location']['lat'] = lat\n f['location']['lng'] = lng\n except LocationNotFound:\n self.flash(self._(\"the city was not found in the geo database\"), category=\"danger\")\n\n if self.request.form.get('own_coords', 'no') == \"yes\":\n f['location']['lat'] = f['location_lat']\n f['location']['lng'] = f['location_lng']\n\n # create and save the event object inside the barcamp\n eid = f['_id'] = unicode(uuid.uuid4())\n event = db.Event(f)\n self.barcamp.events[eid] = event\n self.barcamp.save()\n\n self.flash(self._(\"The event has been created\"), category=\"info\")\n return redirect(self.url_for(\".events\", slug=slug))\n return self.render(form = form, slug = slug, events = self.barcamp.eventlist)", "def getlocation(location):\n response = requests.get(location)\n return LocationData(size=len(response.content), elapsed=response.elapsed)", "def home():\n return (\n f\"These are the available routes:</br>\"\n f\"/api/v1.0/precipitation</br>\"\n f\"/api/v1.0/stations</br>\"\n f\"/api/v1.0/tobs</br>\"\n f\"/api/v1.0/< start ></br>\"\n f\"/api/v1.0/< start >/< end ></br>\"\n )", "def popData(data, location):\n\n start_time = time.time()\n\n location['street'] = data.get('short').get('route')\n location['location'] = dict(latitude=data.get('location').get('lat'), longitude=data.get('location').get('lng'))\n\n print('--- Tiempo de ejecucion popData: {} segundos ---'.format((time.time() - start_time)))\n return location", "def closest_hospitals(self, request):\n longitude = request.GET.get(\"lon\", None)\n latitude = request.GET.get(\"lat\", None)\n\n if longitude and latitude:\n user_location = Point(float(longitude), float(latitude), srid=4326)\n closest_hospitals = Hospital.objects.filter(\n geom__distance_lte=(user_location, D(km=3))\n )\n serializer = self.get_serializer_class()\n serialized_hospitals = serializer(closest_hospitals, many=True)\n return Response(serialized_hospitals.data, status=status.HTTP_200_OK)\n return Response(status=status.HTTP_400_BAD_REQUEST)", "def fetchGeoData():\n if request.method ==\"POST\":\n result = {}\n if request.get_json():\n post_requests = request.get_json()\n print(post_requests)\n result = db.getmapdata(post_requests['attr']) \n return result", "def get_data(self):", "def tubbo_location():\n shelter_id = request.form.get('shelter_id')\n shelter = petfinder.shelter_data_map(shelter_id)\n shelter = list(shelter.values())\n\n return jsonify(shelter)", "def get_location(self):\n return self.cleaned_data['location']", "def get(self, lid=None):\n if lid:\n loc = Location.query.filter(Location.id == lid).one_or_none()\n if loc is None:\n return None\n else:\n return loc.json()\n else:\n locs = Location.query.all()\n lista = []\n for loc in locs:\n lista.append(loc.json())\n return lista", "def route(self, ori, dest, pois):\n #find one route from ori to dest\n departure_time = int(time.time())\n routes = util.query_routes(origin=ori, \n destination=dest,\n departure_time=departure_time)\n if routes is None or routes['status'] != \"OK\":\n print ',=====',routes\n return None\n\n route = routes[\"routes\"][0] #get the first route\n\n #get the points in the route to search the potential poi\n points = util.extract_points(route)\n\n if points is None or len(points) ==0:\n print \"Error in extracting points\"\n return None\n #get the candiates in the route\n candidates = []\n way_points = pois.split(\"|\")\n for point in points:\n information = {}\n information[\"location\"] = point\n for way_p in way_points:\n response = util.get_nearby_points(location=point, keyword=way_p)\n if response is None or response[\"status\"] != \"OK\":\n information[way_p] = []\n continue\n ps = []\n for result in response[\"results\"]:\n poi = {\"geometry\": result[\"geometry\"],\n \"name\": result[\"name\"],\n \"price_level\": result.get(\"price_level\", None),\n \"rating\": result.get(\"rating\", None),\n \"vicinity\": result[\"vicinity\"]}\n ps.append(poi)\n information[way_p] = ps\n candidates.append(information)\n \n cost_matrix = waypoint.find_waypoints([candidates], way_points)\n cost_matrix.sort(key=lambda x:x[1])\n\n top_candidate = cost_matrix[0]\n json.dump(top_candidate, open('./top_candidate.json','w'))\n final_route = self.get_direction(ori, dest, top_candidate)\n json.dump(final_route, open(\"./real_route.json\", \"w\"))\n\n return final_route, top_candidate", "def __init__(self, location):\n self.location = location", "def do_GET(self):\n parsed_path = urlparse.urlparse(self.path)\n if parsed_path.path == '/books':\n return self.books()\n elif parsed_path.path == '/my_loans':\n return self.my_loans()\n return self.send_response(404)", "def listingsQuery():\n try:\n if request.method == 'GET':\n return render_template('queryListing.html')\n if request.method == 'POST':\n city = request.form.get('city')\n stateCode = request.form.get('stateCode')\n return redirect(url_for('main.listingsPage', city=city, stateCode=stateCode))\n except (ValueError, TypeError):\n return render_template('500.html'), 500", "def home(request):\n # Get list of sensors and create sensors MVLayer:\n sensors = get_all_sensors()\n features = []\n lat_list = []\n lng_list = []\n\n if sensors is not None:\n for sensor in sensors:\n lat_list.append(sensor.latitude)\n lng_list.append(sensor.longitude)\n\n sensor_feature = {\n 'type': 'Feature',\n 'geometry': {\n 'type': 'Point',\n 'coordinates': [sensor.longitude, sensor.latitude]\n },\n 'properties': {\n 'id': sensor.id,\n 'latitude': sensor.latitude,\n 'longitude': sensor.longitude\n }\n }\n features.append(sensor_feature)\n\n # Define GeoJSON FeatureCollection\n sensors_feature_collection = {\n 'type': 'FeatureCollection',\n 'crs': {\n 'type': 'name',\n 'properties': {\n 'name': 'EPSG:4326'\n }\n },\n 'features': features\n }\n\n # Create a Map View Layer\n sensors_layer = MVLayer(\n source='GeoJSON',\n options=sensors_feature_collection,\n legend_title='Sensors',\n layer_options={\n 'style': {\n 'image': {\n 'circle': {\n 'radius': 8,\n 'fill': {'color': '#d84e1f'},\n 'stroke': {'color': '#ffffff', 'width': 1},\n }\n }\n }\n },\n feature_selection=True\n )\n\n\n # Define view centered on sensor locations\n try:\n view_center = [sum(lng_list) / float(len(lng_list)), sum(lat_list) / float(len(lat_list))]\n except ZeroDivisionError:\n view_center = [-98.6, 39.8]\n\n view_options = MVView(\n projection='EPSG:4326',\n center=view_center,\n zoom=4.5,\n maxZoom=18,\n minZoom=2\n )\n\n sensor_map = MapView(\n height='100%',\n width='100%',\n layers=[sensors_layer],\n basemap='OpenStreetMap',\n view=view_options\n )\n\n context = {\n 'sensor_map': sensor_map,\n }\n\n return render(request, 'open_air/home.html', context)" ]
[ "0.6643356", "0.6501124", "0.62959623", "0.6232448", "0.62138563", "0.6093613", "0.60217726", "0.59801704", "0.59584683", "0.58436835", "0.5829568", "0.57859164", "0.577833", "0.5771708", "0.56856674", "0.56406236", "0.5606987", "0.5575436", "0.55724984", "0.5565517", "0.55654144", "0.55651635", "0.5529574", "0.55286825", "0.55071896", "0.54828256", "0.5467841", "0.54419607", "0.5438582", "0.5438582", "0.54375", "0.543182", "0.5421169", "0.5413087", "0.54129386", "0.5405069", "0.5402469", "0.53934157", "0.53903174", "0.5390078", "0.5387827", "0.5382393", "0.5381415", "0.5373556", "0.53665155", "0.5362233", "0.5351719", "0.53383964", "0.5332393", "0.53199816", "0.53197956", "0.53181934", "0.5312883", "0.53103584", "0.52924436", "0.52816623", "0.5275857", "0.52666223", "0.52655953", "0.5265565", "0.5265564", "0.5259396", "0.5251116", "0.5248281", "0.5236019", "0.522967", "0.52230525", "0.52229905", "0.5218453", "0.5218191", "0.5216843", "0.52147055", "0.52143395", "0.5210061", "0.5209411", "0.5207158", "0.5204978", "0.5204469", "0.5204469", "0.5204469", "0.5200334", "0.5199128", "0.51875895", "0.51832575", "0.5178971", "0.5178611", "0.51774305", "0.517171", "0.51691806", "0.5162161", "0.5159448", "0.51580536", "0.51572317", "0.51541567", "0.5150782", "0.51423424", "0.5140422", "0.5133973", "0.51335526", "0.5131197" ]
0.52397954
64
Computes the forward pass for a fullyconnected layer. The input x has shape (N, Din) and contains a minibatch of N examples, where each example x[i] has shape (Din,).
def fc_forward(x, w, b): out = None ########################################################################### # TODO: Implement the forward pass. Store the result in out. # ########################################################################### N = x.shape[0] x2d = x.reshape(N, -1) out = x2d.dot(w) + b ########################################################################### # END OF YOUR CODE # ########################################################################### cache = (x, w, b) return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, x): \n # Layer 1\n x = F.elu(self.conv1(x)) # bsize x l1_channels x 1 x Nsamples\n x = self.batchnorm1(x)\n x = F.dropout(x, 0.25)\n x = x.permute(0, 2, 1, 3) # bsize x 1 x l1_channels x Nsamples\n\n # Layer 2\n x = self.padding1(x)\n x = F.elu(self.conv2(x)) # bsize x l2_channels x l1_channels x Nsamples\n x = self.batchnorm2(x) \n x = F.dropout(x, 0.25)\n x = self.pooling2(x) # bsize x l2_channels x floor(l1_channels/2) x floor(Nsamples/4)\n\n # Layer 3\n x = self.padding2(x)\n x = F.elu(self.conv3(x)) # bsize x l3_channels x floor(l1_channels/2) x floor(Nsamples/4)\n x = self.batchnorm3(x)\n x = F.dropout(x, 0.25)\n x = self.pooling3(x) # bsize x l3_channels x floor(l1_channels/4) x floor(Nsamples/16)\n\n # Fully-connected Layer\n x = x.view(-1, self.fc1.in_features) # bsize x (l3_channels*floor(l1_channels/4)*floor(Nsamples/16))\n x = F.sigmoid(self.fc1(x)) # bisze x self.fc1.out_features \n \n if self.fc1.out_features == 1:\n x = x.view(-1) # bsize (1D if 1 output unit)\n \n return x", "def forward(self, x):\n x = self._activation(self.fully_connected_1(x))\n x = self._activation(self.fully_connected_2(x))\n x = self.dropout(x)\n x = self._activation(self.fully_connected_3(x))\n x = self._activation(self.fully_connected_4(x))\n x = self.dropout(x)\n x = self._activation(self.fully_connected_5(x))\n return self.fully_connected_out(x)", "def forward(self, x):\n x = self.pool(x)\n x = self.conv(x)\n x = x.reshape(x.shape[0], -1)\n x = self.relu(self.fc1(x))\n x = self.dropout1(x)\n x = self.fc2(x)\n x = self.dropout2(x)\n x = self.fc3(x)\n x = self.dropout3(x)\n x = self.fc4(x)\n\n return x", "def forward(self, x):\n x = self.first_deconv(x)\n x = self.first_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.second_deconv(x)\n x = self.second_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.third_deconv(x)\n x = self.third_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.fourth_deconv(x)\n x = self.fourth_batch_norm(x)\n\n x = self.fifth_deconv(x)\n x = self.fifth_batch_norm(x)\n\n x = self.sixth_deconv(x)\n x = self.sixth_batch_norm(x)\n\n x = self.seventh_deconv(x)\n\n # sigmoid_out = nn.functional.sigmoid(x)\n tanh_out = nn.functional.tanh(x)\n\n out = (tanh_out + 1) * 255 / 2\n\n # print 'out.shape =', out.shape\n\n return out", "def forward(self, x):\n previous_batch, current_batch = x\n previous_batch_pc, previous_batch_f = previous_batch[0], previous_batch[1]\n current_batch_pc, current_batch_f = current_batch[0], current_batch[1]\n\n f1 = previous_batch_pc[:, :, 3:]\n pc1 = previous_batch_pc[:, :, :3]\n\n f2 = current_batch_pc[:, :, 3:]\n pc2 = current_batch_pc[:, :, :3]\n\n batch_size, n_points_prev, _ = previous_batch_pc.shape\n batch_size, n_points_cur, _ = current_batch_pc.shape\n\n # All outputs of the following layers are tuples of (pos, features)\n # --- Point Feature Part ---\n pf_prev_1, pf_prev_2, pf_prev_3 = self._point_feature_net(pc1.float(), f1.float())\n pf_curr_1, pf_curr_2, pf_curr_3 = self._point_feature_net(pc2.float(), f2.float())\n\n # --- Flow Embedding / Point Mixture Part ---\n _, fe_2, fe_3 = self._point_mixture(x1=pf_prev_3, x2=pf_curr_3)\n\n # --- Flow Refinement Part ---\n x = self._flow_refinement(pf_curr_1=pf_curr_1, pf_curr_2=pf_curr_2, pf_curr_3=pf_curr_3, fe_2=fe_2, fe_3=fe_3)\n\n # --- Final fully connected layer ---\n pos, features = x\n features = features.transpose(1, 2)\n x = self._fc(features)\n return x", "def forward(self, x):\n\n x = F.max_pool2d(F.relu(self.batch_norm1(self.conv1(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm2(self.conv2(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm3_b(self.conv3_b(F.relu(self.batch_norm3_a(self.conv3_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm4_b(self.conv4_b(F.relu(self.batch_norm4_a(self.conv4_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm5_b(self.conv5_b(F.relu(self.batch_norm5_a(self.conv5_a(x)))))), 3, stride=2, padding=1)\n x = self.avg_pool(x).view(-1,512)\n out = self.linear(x)\n\n return out", "def forward_pass(self, x):\n # Forward pass on the convolutions\n conv_output, x = self.forward_pass_on_convolutions(x)\n x = x.view(x.size(0), -1) # Flatten\n # Forward pass on the classifier\n x = self.model.fc(x)\n return conv_output, x", "def forward(self, x):\n\n x = self.first_conv_layer(x)\n x = self.second_conv_layer(x)\n x = self.third_conv_layer(x)\n x = self.fourth_conv_layer(x)\n\n #print 'x.shape=', x.shape\n x = x.view(-1, 5 * 5 * 64)\n x = F.relu(self.fc1(x))\n\n sigmoid_out = nn.functional.sigmoid(x)\n\n return sigmoid_out", "def setup_fully_connected(in_features: int, out_features: int, width: int = 16, depth: int = 4) -> hessQuik.networks.NN:\n f = net.fullyConnectedNN([in_features] + depth * [width] + [out_features], act=act.tanhActivation())\n return f", "def forward(self, x):\n c_out = self.conv_net.forward(x)\n\n c_out_flat = c_out.flatten(start_dim=1)\n \n \n return self.linear.forward(c_out_flat)", "def forward(self, x):\n #print('output of fetures.children() : %s'%str([i for i in self.features.children()]))\n #print(\"shape of input is %s\" % str(x.size()))\n for layer_no, layer in enumerate(self.features.children()):\n\n if layer_no is 23:\n y = layer(x)\n if layer_no is 33:\n z = layer(x)\n x = layer(x)\n\n #print('debug')\n #print('layer info: %s'%str(layer))\n #print(\"shape of x is %s\" % str(x.size()))\n\n x = self.conv1D_downstream1(x)\n x = self.conv1D_downstream2(x)\n x = self.upsample_1(x)\n\n z = self.conv1D_pool4(z)\n y = self.conv1D_pool3(y)\n #print('debug')\n #print(\"shape of x is %s\"%str(x.size()))\n #print(\"shape of z is %s\" % str(z.size()))\n\n if x.size() is not z.size():\n x = nn.functional.interpolate(x,size = (z.size()[2],z.size()[3]), mode = 'nearest')\n x = x+ z\n x = self.upsample_2(x)\n x = x+y\n x = self.upsample_3(x)\n\n return x", "def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.maxpool(out)\n out = self.avgpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out", "def forward(self, x):\n \n x = F.relu(self.conv1_bn(self.conv1(self.conv0_bn(x))))\n x = F.relu(self.conv2_bn(self.conv2(x)))\n x = F.relu(self.conv3_bn(self.conv3( self.maxpool2(x))))\n x = F.relu(self.conv4_bn(self.conv4( self.maxpool3(x))))\n x = self.maxpool4(x) \n x = x.view(-1, 1184)\n x = F.relu(self.fc1(x))\n x = self.dense1_bn(x)\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x)", "def _full_fourier_graph_conv_step(\n X,\n G,\n scope,\n nodelist,\n receptive_field_size = 10,\n num_filters_out = 32,\n activation = leaky_relu,\n batch_normalization = None,\n training = True,\n weights_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.05),\n bias_init = tf.constant_initializer(0.0),\n):\n num_samples, num_features, num_filters_in = X.shape.as_list()\n\n L = networkx.normalized_laplacian_matrix(G, nodelist=nodelist)\n U = tf.constant(np.linalg.eigh(L.toarray())[1], dtype=tf.float32)\n\n # TODO(mmd): Get the below to work.\n #_, U = scipy.sparse.linalg.eigsh(L, k=k, which='SM')\n\n x = tf.transpose(X, [0, 2, 1]) # num_samples x num_filters_in x num_features\n x = tf.reshape(x, [num_samples * num_filters_in, num_features])\n xf = tf.expand_dims(tf.matmul(x, U), 1)\n xf = tf.reshape(xf, [num_samples, num_filters_in, num_features])\n xf = tf.transpose(xf, [2, 1, 0]) # num_features x num_filters_in x num_samples\n\n with tf.variable_scope(scope):\n # TODO(mmd): Shapes probably wrong.\n W = tf.get_variable(\n 'graph_convolution',\n [num_features * num_filters_in, num_filters_out, 1],\n tf.float32,\n initializer = weights_init,\n )\n b = tf.get_variable(\n 'graph_bias',\n [1, num_filters_out, 1],\n tf.float32,\n initializer = bias_init,\n )\n\n yf = tf.matmul(W, xf)\n yf = tf.reshape(tf.transpose(yf, [2, 1, 0]), [num_samples * num_filters_out, num_features])\n y = tf.matmul(yf, tf.transpose(U))\n\n return activation(tf.reshape(y, [num_samples, num_filters_out, num_features]) + b)", "def forward(self, x):\n # Convolutional Layers\n ## add pooling layers\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = x.view(-1, 256) # flatten to pass to fully connected layers\n\n # fully connected layers\n ## and dropout layers\n x = F.relu(self.dropout(self.fc1(x)))\n x = F.relu(self.dropout(self.fc2(x)))\n x = self.fc3(x)\n\n return x", "def forward(self, x):\n\n x = self.first_conv_layer(x)\n x = self.second_conv_layer(x)\n x = self.third_conv_layer(x)\n x = self.fourth_conv_layer(x)\n x = self.fifth_conv_layer(x)\n\n '''\n x = x.view(-1, 4 * 4 * 512)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n '''\n\n sigmoid_out = nn.functional.sigmoid(x)\n\n return sigmoid_out", "def forward(self, x):\n #batch_size = x.shape[0]\n out = self.model(x)\n return out", "def _forward(self, x):\n global global_epoch\n global_epoch += 1\n bias = -np.ones((x.shape[0], 1))\n tail = np.zeros((x.shape[0], self.dim_hid+self.dim_out))\n nodes = np.concatenate((bias, x, tail), axis=1)\n weight = self.weight * self.connectivity\n for i in range(self.dim_in, self.dim_in+self.dim_hid+self.dim_out):\n net = nodes.dot(weight[i])\n nodes[:,i] = self.__sigmoid(net)\n nodes[:,self.dim_in:self.dim_in+self.dim_hid] *= self.hidden\n return nodes", "def forward(self,x):\n x = x.transpose(1,2).contiguous()\n x = F.leaky_relu(self.fc1(x), 0.2)\n x = F.leaky_relu(self.bn2(self.fc2(x)), 0.2)\n x = F.leaky_relu(self.bn3(self.fc3(x)), 0.2)\n x = torch.sigmoid(self.fc4(x))\n return x.transpose(1,2)", "def forward(self, x):\n if x.dim() == 1:\n x = torch.unsqueeze(x, 0)\n return self.net(x)", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n model_output = None\n #######################################################################\n # Student code begins\n #######################################################################\n\n (N,C,H,W) = x.shape\n\n conv_features = self.conv_layers(x)\n \n flat_features = conv_features.reshape(-1, 500)\n model_output = self.fc_layers(flat_features)\n\n\n #######################################################################\n # Student code ends\n #######################################################################\n return model_output", "def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n assert x.dim() == 4, \\\r\n \"Input should have 4 dimensions. Was {}\".format(x.dim())\r\n\r\n return self.net(x)", "def forward(self, x):\n cnn_out = self.hidden_layers(x) # apply hidden layers (N, n_in_channels, X, Y) -> (N, n_kernels, X, Y)\n pred = self.output_layer(cnn_out) # apply output layer (N, n_kernels, X, Y) -> (N, 1, X, Y)\n return pred", "def forward(self, x):\n x = self.conv1(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x)\n x = self.conv2(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x) \n x = self.maxpool(x) \n return x", "def forward(self, x):\n x1 = x[:, 0, :, :].reshape((-1, 1, obs_size * 2 + 1, obs_size * 2 + 1))\n x2 = x[:, 1, :, :].reshape((-1, (obs_size * 2 + 1) ** 2))\n if x2.shape[0] == 1:\n x2 = np.tile(x2, (minibatch_size, 1))\n h = F.relu(self.bn1(self.conv1(x)))\n h = F.relu(self.bn2(self.conv2(x)))\n h = F.relu(self.bn3(self.conv3(x)))\n h = self.l(h)\n return DiscreteActionValue(h)", "def forward(self, x):\n\n\t\t## Conv layers\n\t\tx = self.avgpool(F.tanh(self.conv1(x)))\n\t\tx = self.avgpool(F.tanh(self.conv2(x)))\n\t\tx = F.tanh(self.conv3(x))\n\n\t\t## Flatten\n\t\tx = x.view(x.size(0), -1)\n\n\t\t## Fully connected layers\n\t\tx = F.tanh(self.fc1(x))\n\t\tx = self.fc2(x)\n\n\t\tx = F.softmax(x, dim=1)\n\n\t\treturn x", "def forward(self, x):\n\n assert(len(x.shape) == 4)\n\n x_out = self.layers(x) + x\n\n return x_out", "def forward(self, x):\n x = self.fc0(x.view(-1, x.size(-1))).view(x.size(0), x.size(1), -1)\n x = self.pe(x)\n\n x = self.inner_layers(x) # FF, FF, FF, finalFF\n\n state_value = self.fc_s(x) # double-dqn : state\n\n advantage_values = self.fc_a(x) # double-dqn : advantage\n advantage_values = advantage_values.view(\n advantage_values.size()[:-1] + (self.action_size, self.n_atoms))\n\n dist_weights = state_value.unsqueeze(\n dim=-2) + advantage_values - advantage_values.mean(dim=-2, keepdim=True)\n\n return dist_weights", "def forward(self, x):\n batch_size = x.shape[0]\n expected_shape = (batch_size, self.num_classes)\n\n out = self.feature_extractor(x)\n out = torch.flatten(out, 1)\n out = self.classifier(out)\n\n assert out.shape == (batch_size, self.num_classes),\\\n f\"Expected output of forward pass to be: {expected_shape}, but got: {out.shape}\"\n return out", "def forward(self, x):\n batch_size = x.shape[0]\n expected_shape = (batch_size, self.num_classes)\n\n out = self.feature_extractor(x)\n out = torch.flatten(out, 1)\n out = self.classifier(out)\n\n assert out.shape == (batch_size, self.num_classes),\\\n f\"Expected output of forward pass to be: {expected_shape}, but got: {out.shape}\"\n return out", "def feed_forward(self, x_input: np.ndarray) -> np.ndarray:\n output = x_input.copy()\n if np.ndim(x_input) == 1:\n output = output.reshape(-1, 1)\n for layer in self.layers:\n output = layer.feed(output)\n return output", "def forward(self, x):\n if x.size()[0] != 1 or x.size()[1] != 200 or x.size()[2] != 96:\n return torch.zeros(1,1)\n x = x.view(1,1,x.size()[1],x.size()[2]) #1,1,200,96\n x = nn.MaxPool2d(2)(self.conv1(x))\n x = self.dropout(F.relu(x)) #1,3,96,46\n x = nn.MaxPool2d(2)(self.conv2(x))\n x = self.dropout(F.relu(x)) #1,6,47,21\n x = nn.MaxPool2d(2)(self.conv3(x))\n x = self.dropout(F.relu(x)) #1,12,21,8\n x = nn.MaxPool2d(2)(self.conv4(x))#1,24,8,2\n x = x.view(1,-1)#1,384\n x = self.fc1(F.relu(x))\n x = self.fc2(F.relu(x))\n x = self.fc3(F.relu(x))\n return F.sigmoid(x)", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x.squeeze(0)", "def forward(self, x):\r\n x = x.reshape(x.shape[0], x.shape[1], 1 , 1)\r\n x = self.input(x)\r\n x = self.bn(x)\r\n x = F.relu(x)\r\n for i in range(len(self.DV)-1, -1, -1):\r\n x = self.DV[i](x)\r\n if i != 0:\r\n x = self.BN[i](x)\r\n x = F.relu(x)\r\n for col, t in enumerate(self.col_type):\r\n i = int(col/self.shape)\r\n j = col % self.shape\r\n if t == \"binary\":\r\n x[:,:,i,j] = torch.sigmoid(x[:,:,i,j])\r\n elif t == \"normalize\":\r\n x[:,:,i,j] = torch.tanh(x[:,:,i,j])\r\n else:\r\n x[:,:,i,j] = torch.relu(x[:,:,i,j])\r\n return x", "def forward(self, input):\n input, _ = input\n bs = input.shape[0]\n d1 = self.relu1(self.fc1(input))\n d2 = self.relu2(self.fc2(d1))\n d3 = self.fc3(d2)\n out = self.sigmoid(d3)\n\n out = out.view(bs, 17, 3)\n return out", "def fully_connected_forward(self, X, W, b):\n \n #############################################################################\n # TODO: Implement the forward pass of a fully connected layer and store #\n # the variables needed for the backward pass (gradient computation) #\n # as a tuple inside cache. #\n #############################################################################\n out = np.matmul(X, W) + b\n cache = (X, W, b)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n return out, cache", "def forward(self, x): \n out = self.layer1(x)\n out = self.layer2(out)\n\n out = out.reshape(out.size(0), -1)\n \n out = self.dropout(out)\n out = self.fc1(out)\n out = self.fc2(out)\n \n return out", "def forward(self, x: torch.tensor) -> torch.tensor:\n # flatten image input\n x = x.flatten(start_dim=1)\n # add hidden layer, with relu activation function\n x = self.relu(self.fc1(x))\n x = self.drop(x)\n \n x = self.relu(self.fc2(x))\n x = self.drop(x)\n \n x = self.relu(self.fc3(x))\n x = self.drop(x)\n \n x = self.fc4(x)\n x = self.sigmoid(self.classifier(x))\n \n return x.squeeze(), None", "def __feedforward(self, X):\n A = X\n for layer in self.layers:\n layer._Dense__forward(A)\n A = layer.A\n return A", "def forward(self, input_x) -> Tensor:\n conv_out = self.conv(input_x).view(input_x.size()[0], -1)\n return self.head(conv_out)", "def forward(self, input_x) -> Tensor:\n conv_out = self.conv(input_x).view(input_x.size()[0], -1)\n return self.head(conv_out)", "def forward(self, inp: torch.Tensor) -> torch.Tensor:\n x = self.conv1(inp)\n x = self.maxpool(x)\n\n for i in range(self._num_layers):\n x = getattr(self, \"C%d\" % (i + 1))(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x", "def forward(self, x):\n residues = []\n # Downward Pass\n x = self.layers[0](x.unsqueeze(1))\n for layer in self.layers[1:self.half]:\n x = layer(x)\n residues.insert(0, x)\n\n # Upward Pass\n for idx, layer in enumerate(self.layers[self.half:(len(self.layers)-1)]):\n x = layer(x, residues[idx])\n x = self.layers[-1](x)\n\n return(x)", "def makeFastFeedForwardFunction(self):\n\n\t\toutWeightMatrix = []\n\t\tfor unit in self.outputLayer:\n\n\t\t\trow = []\n\t\t\tfor b in unit.branchesIn:\n\t\t\t\tprint b.weight\n\t\t\t\trow.append(b.weight)\n\t\t\t\n\t\t\toutWeightMatrix.append(row)\n\t\toutWeightMatrix = np.array(outWeightMatrix).squeeze()\n\n\t\thiddenMatrices = []\n\t\tfor layer in self.hiddenLayers:\n\t\t\tmatrix = []\n\t\t\t#ignore the bias unit, since it has no branches in\n\t\t\tfor unit in layer[1:]:\n\t\t\t\trow = []\n\t\t\t\tfor b in unit.branchesIn:\n\t\t\t\t\trow.append(b.weight)\n\n\t\t\t\tmatrix.append(row)\n\t\t\tmatrix = np.array(matrix)\n\n\t\t\thiddenMatrices.append(matrix)\n\n\t\thidActFunc = (self.hiddenLayers[0])[1].activationFunction\n\t\toutActFunc = self.outputLayer[0].activationFunction\n\n\t\tdef ffFunc(inp):\n\t\n\t\t\tforward = np.insert(inp.T,0,1.0,axis=0)\n\t\t\tfor matrix in hiddenMatrices:\n\t\t\t\tnext = np.dot(matrix,forward)\n\t\t\t\tnext = hidActFunc(next)\n\t\t\t\tforward = np.insert(next,0,1.0,axis=0)\n\n\t\t\tout = np.dot(outWeightMatrix,forward)\n\n\t\t\treturn outActFunc(out)\n\n\t\treturn ffFunc", "def forward(self, input):\n return Conv1dfftFunctionCuda.apply(\n input, self.filter, self.bias, self.padding, self.compress_rate)", "def forward(self, x_in):\r\n return torch.sigmoid(self.fc1(x_in)).squeeze()", "def forward(self, x: Tuple[Tensor]) -> Tensor:\n inputs = x[self.start_level:self.end_level + 1]\n assert len(inputs) == (self.end_level - self.start_level + 1)\n feature_add_all_level = self.convs_all_levels[0](inputs[0])\n target_h, target_w = feature_add_all_level.size()[2:]\n for i in range(1, len(inputs)):\n input_p = inputs[i]\n x_p = self.convs_all_levels[i](input_p)\n h, w = x_p.size()[2:]\n factor_h = target_h // h\n factor_w = target_w // w\n assert factor_h == factor_w\n feature_per_level = aligned_bilinear(x_p, factor_h)\n feature_add_all_level = feature_add_all_level + \\\n feature_per_level\n\n feature_add_all_level = self.conv_branch(feature_add_all_level)\n feature_pred = self.conv_pred(feature_add_all_level)\n return feature_pred", "def forward(self, x_in):\r\n # x_out = torch.zeros_like(x_in)\r\n\r\n for layer in self.layers: #Call forward function of each layer in order\r\n x_out = layer.forward(x_in)\r\n # print(\"Forward pass Seq: \", layer, x_in, x_out)\r\n x_in = x_out # output of the layer is passed as input to the next layer\r\n self.temp = x_in\r\n return x_out", "def forward(self, x):\n h = x\n\n # Get features\n local_feat = self.local_feat_blocks(h) # (N, C, H, W)\n global_feat = self.global_feat_blocks(local_feat)\n global_feat = self.activation(global_feat)\n global_feat = torch.sum(global_feat, dim=(2, 3))\n\n # GAN task output\n output = self.linear(global_feat)\n\n return output, local_feat, global_feat", "def forward(self, x):\n for l in self.layers:\n w = l.weights\n b = l.biases\n x = self.sigmoid(np.dot(x, w) + b)\n return x", "def forward_once(self, x):\n output = self.cnn1(x)\n output = output.view(output.size()[0], -1)\n output = self.fc1(output)\n return output", "def forward_once(self, x):\n x = self.features(x)\n x = x.view(x.size(0), 512) # reshpe it into (batch_size, feature_dimention)\n return x", "def forward(self, x):\n\n # x = [batch size, seq len, hid dim]\n\n x = self.dropout(torch.relu(self.fc_1(x)))\n\n # x = [batch size, seq len, pf dim]\n\n x = self.fc_2(x)\n\n # x = [batch size, seq len, hid dim]\n\n return x", "def fully_connected(input_x, size, is_training, BN=False):\n weights = tf.get_variable('weights',\n shape=[input_x.get_shape()[1], size],\n initializer=tf.contrib.layers.xavier_initializer()\n )\n biases = tf.get_variable('biases',\n shape=[size],\n initializer=tf.constant_initializer(0.0)\n )\n out = tf.matmul(input_x, weights) + biases\n if BN:\n out = tf.contrib.layers.batch_norm(out, center=True, scale=True, is_training=is_training, scope='bn')\n return out", "def forward(self, x: torch.tensor) -> torch.tensor:\n input1 = x[:, 0, :, :].view(-1, 1, 14, 14) # size Bx1x14x14\n input2 = x[:, 1, :, :].view(-1, 1, 14, 14)\n \n x1 = self.forward_once(input1) # size Bx1x10\n x2 = self.forward_once(input2)\n \n auxiliary = torch.stack((x1, x2), 1) # size Bx2x10\n \n output = torch.cat((x1, x2), 1) # size Bx1x20\n output = self.relu(self.fc3(output)) # size Bx1x10\n output = self.sigmoid(self.fc4(output)) # size Bx1x1\n \n return output.squeeze(), auxiliary", "def forward(self, x):\n length, batch, dim = x.shape\n res = []\n res.append(self.one_step(x[0], torch.zeros((batch, self.latent_size), dtype=torch.float)))\n\n for i in range(1,length):\n res.append(self.one_step(x[i], res[i-1]))\n\n return torch.stack(res)", "def forward(self, x):\n length, batch, dim = x.shape\n res = []\n res.append(self.one_step(x[0], torch.zeros((batch, self.latent_size), dtype=torch.float)))\n\n for i in range(1,length):\n res.append(self.one_step(x[i], res[i-1]))\n\n return torch.stack(res)", "def forward(self, x):\n length, batch, dim = x.shape\n res = []\n res.append(self.one_step(x[0], torch.zeros((batch, self.latent_size), dtype=torch.float)))\n\n for i in range(1,length):\n res.append(self.one_step(x[i], res[i-1]))\n\n return torch.stack(res)", "def forward(self, inputs):\n\n # Convolution layers\n x = self.extract_features(inputs)\n\n # Pooling and final linear layer\n x = F.adaptive_avg_pool2d(x, 1).squeeze(-1).squeeze(-1)\n if self._dropout:\n x = F.dropout(x, p=self._dropout, training=self.training)\n x = self._fc(x)\n return x", "def forward(self, x):\n #delete all cts\n #self.cts = [self.cts[-1]]\n \n #forward\n length, batch, dim = x.shape\n res = []\n res.append(self.one_step(x[0], torch.zeros((batch, self.latent_size), dtype=torch.float)))\n\n for i in range(1,length):\n res.append(self.one_step(x[i], res[i-1]))\n\n return torch.stack(res)", "def forward(self, x):\n self.save_net()\n self.perturb_tensors()\n out = self.net.forward(x)\n return out", "def forward(self, x):\n x = self.efficient_net(x)\n return x", "def forward(self, X):\n if self.padding:\n X = zero_pad(X, padding_width=self.padding, dims=(2, 3))\n\n self.cache['X'] = X\n\n N, C, H, W = X.shape\n \n # To get the kernel dimension\n KH, KW = self.kernel_size \n \n #int((in_dim - f_s)/stride)+1\n out_shape = (N, self.out_channels, 1 + int((H - KH)/self.stride), 1 + int((W - KW)/self.stride))\n \n # Getting th output to have the same shape as the input\n Y = np.zeros(out_shape)\n \n for n in range(N):\n for c_w in range(self.out_channels):\n for h, w in product(range(out_shape[2]), range(out_shape[3])):\n h_offset, w_offset = h*self.stride, w*self.stride\n \n rec_field = X[n, :, h_offset:h_offset + KH, w_offset:w_offset + KW]\n # print(rec_field.shape)\n # print((self.weight['W'][c_w]).shape)\n Y[n, c_w, h, w] = np.sum(self.weight['W'][c_w]*rec_field) + self.weight['b'][c_w]\n assert(not isnan(np.max(Y)))\n\n return Y", "def forward(self, X):\n N = X.size()[0]\n assert X.size() == (N, 3, 448, 448)\n X = self.features(X)\n assert X.size() == (N, 512, 28, 28)\n X = X.view(N, 512, 28**2)\n X = torch.bmm(X, torch.transpose(X, 1, 2)) / (28**2) # Bilinear\n assert X.size() == (N, 512, 512)\n X = X.view(N, 512**2)\n X = torch.sqrt(X + 1e-5)\n X = torch.nn.functional.normalize(X)\n X = self.fc(X)\n assert X.size() == (N, 36)\n return X", "def forward(self, x):\n x1, x2 = x\n y1 = self.conv_net.forward(x1)\n y2 = self.sparse_net.forward(x2)\n return y1, y2", "def forward(self, X):\r\n N = X.size()[0]\r\n assert X.size() == (N, 3, 448, 448)\r\n X = self.features(X)\r\n assert X.size() == (N, 512, 28, 28)\r\n X = X.view(N, 512, 28**2)\r\n X = torch.bmm(X, torch.transpose(X, 1, 2)) / (28**2) # Bilinear\r\n assert X.size() == (N, 512, 512)\r\n X = X.view(N, 512**2)\r\n X = torch.sign(X)*torch.sqrt(torch.abs(X)+1e-12)\r\n # X = torch.sqrt(X + 1e-5)\r\n X = torch.nn.functional.normalize(X)\r\n X = self.fc(X)\r\n assert X.size() == (N, 11)\r\n return X", "def forward(self, x):\n\n # 2.2 BUG: Did Bob do anything wrong in the forward method?\n # HINT: Usually a CNN would expect correctly normalized data.\n # Roughly make input to be within -1 to 1 range\n x = (x - 127.5) / 127.5\n\n # Apply conv layers\n x = self.convs(x)\n\n # Global average pooling\n x = x.mean(-1).mean(-1)\n\n # Output layer\n x = self.output(x)\n\n return x", "def forward(self, x):\n sources = list()\n new_sources = list()\n\n # apply lds to the initial image\n x_pool = self.lds(x)\n\n # apply vgg up to conv4_3\n for k in range(22):\n x = self.features[k](x)\n conv4_3_bn = self.ibn1(x)\n x_pool1_skip, x_pool1_icn = self.icn1(x_pool)\n s = self.Norm1(conv4_3_bn * x_pool1_icn)\n\n # apply vgg up to fc7\n for k in range(22, 34):\n x = self.features[k](x)\n conv7_bn = self.ibn2(x)\n x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)\n p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)\n\n x = self.features[34](x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extra):\n x = v(x)\n if k == 0:\n x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)\n w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)\n elif k == 2:\n x_pool4_skip, x_pool4_icn = self.icn4(x_pool3_skip)\n q = self.Norm4(self.dsc3(w) + x * x_pool4_icn)\n elif k == 4:\n o = self.Norm5(self.dsc4(q) + x)\n sources.append(o)\n elif k == 7 or k == 9:\n sources.append(x)\n else:\n pass\n\n # project the forward features into lower dimension.\n tmp1 = self.proj1(p)\n tmp2 = self.proj2(w)\n tmp3 = self.proj3(q)\n tmp4 = self.proj4(o)\n\n # The conv4_3 level\n proj1 = F.upsample(tmp1, scale_factor=2, mode='bilinear')\n proj2 = F.upsample(tmp2, scale_factor=4, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=8, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=16, mode='bilinear')\n proj = torch.cat([proj1, proj2, proj3, proj4], dim=1)\n\n agent1 = self.agent1(s)\n\n convert1 = self.convert1(proj)\n pred1 = torch.cat([agent1, convert1], dim=1)\n pred1 = self.merge1(pred1)\n new_sources.append(pred1)\n\n # The fc_7 level\n proj2 = F.upsample(tmp2, scale_factor=2, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=4, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=8, mode='bilinear')\n proj = torch.cat([proj2, proj3, proj4], dim=1)\n\n agent2 = self.agent2(p)\n convert2 = self.convert2(proj)\n pred2 = torch.cat([agent2, convert2], dim=1)\n pred2 = self.merge2(pred2)\n new_sources.append(pred2)\n\n # The conv8 level\n proj3 = F.upsample(tmp3, scale_factor=2, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=4, mode='bilinear')\n proj = torch.cat([proj3, proj4], dim=1)\n\n agent3 = self.agent3(w)\n convert3 = self.convert3(proj)\n pred3 = torch.cat([agent3, convert3], dim=1)\n pred3 = self.merge3(pred3)\n new_sources.append(pred3)\n\n # The conv9 level\n proj4 = F.upsample(tmp4, scale_factor=2, mode='bilinear')\n proj = proj4\n\n agent4 = self.agent4(q)\n convert4 = self.convert4(proj)\n pred4 = torch.cat([agent4, convert4], dim=1)\n pred4 = self.merge4(pred4)\n new_sources.append(pred4)\n\n for prediction in sources:\n new_sources.append(prediction)\n\n return new_sources", "def forward(self, x):\n x = self.pad_tensor(x)\n if self.network_controller.is_float_coefficient:\n return self.bn(self.conv(x))\n else:\n res = F.conv2d(x, self.q(self.conv.weight), self.conv.bias, self.stride,\n self.padding_conv, self.dilation, self.group)\n return self.bn(res)", "def forward(self, x):\n conv_output = self.conv1(x)\n\n # The window size of max pooling layer of CNN depends on the dimension of conv1d output.\n # Since padding size is 1 and kernal size is 5, so the output of conv1d is with dimension\n # length_of_input_sequence - 2 + 5 - 1 = length_of_input_sequence - 2\n x_conv = F.max_pool1d(F.relu(conv_output), x.size()[-1] - 2)\n return x_conv", "def forward(self, x):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\toutput = self._layers[0].forward(x)\n\t\tfor i in range(1, len(self._layers)):\n\t\t\toutput = self._layers[i].forward(output)\n\t\treturn output\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def forward(self, state):\n x = state.unsqueeze(1)\n x = F.relu(self.cnl1(x))\n x = F.relu(self.cnl2(x))\n x = F.relu(self.cnl3(x))\n x = x.view(x.shape[0], -1) # flatten\n x = F.relu(self.dense1(x))\n x = self.out(x)\n return x", "def forward(self, input_x):\n return self.net(input_x.float())", "def forward_pass(X,architecture):\n \n architecture['layer1'][0] = X\n kernel_shape1 = architecture['layer1'][7]\n stride1 = architecture['layer1'][8]\n if kernel_shape1 is not None and not isinstance(kernel_shape1,int):\n X_input_1_im2col,imX = im2col(X,kernel_shape1,stride1,im_needed = False, shape_specified = True)\n architecture['layer1'][4] = X_input_1_im2col\n else:\n architecture['layer1'][4] = None\n\n for layer in range(len(architecture)): # Feedforward from the first till the second last layer\n X_input,X_output,weightsi,biasi,X_input_1_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imx = architecture['layer{}'.format(layer+1)]\n\n if operationi == 'conv_bn_relu':\n conv_output = relu(BatchNorm(torch.t(X_input_1_im2col).mm(weightsi) + biasi))\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_relu':\n conv_output = relu(torch.t(X_input_1_im2col).mm(weightsi) + biasi)\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_bn_sigmoid':\n conv_output = sigmoid(BatchNorm(torch.t(X_input_1_im2col).mm(weightsi) + biasi))\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_sigmoid':\n conv_output = sigmoid(torch.t(X_input_1_im2col).mm(weightsi) + biasi)\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'maxpool':\n maxpool_output = maxpool(X_input,kernel_shapei,stridei)\n\n maxpool_output = torch.reshape(maxpool_output,output_shapei)\n\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = maxpool_output\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n architecture['layer{}'.format(layer+2)][4],imX = im2col(maxpool_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'flatten_dense_relu':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'relu',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'flatten_dense_none':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'none',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'flatten_dense_sigmoid':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'sigmoid',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'softmax':\n Xin = architecture['layer{}'.format(layer+1)][0]\n output = softmax(Xin).squeeze()\n architecture['layer{}'.format(layer+1)][1] = output\n if layer == len(architecture) - 1:\n y_pred = architecture['layer{}'.format(len(architecture))][1]\n \n return y_pred", "def forward(self, x):\n x = x.float()\n n, c, t, v, m = x.size()\n x = x.permute(0, 4, 3, 1, 2).contiguous()\n x = x.view(n * m, v * c, t)\n x = self.data_bn(x)\n x = x.view(n, m, v, c, t)\n x = x.permute(0, 1, 3, 4, 2).contiguous()\n x = x.view(n * m, c, t, v)\n for gcn in self.agcn_networks:\n x = gcn(x)\n return x", "def forward(self, X):\r\n # input layer\r\n self.ff[0] = X\r\n # hidden layer\r\n for x in range(1, np.shape(self.ff)[0]-1):\r\n self.ff[x] = self.hid_transfer(self.weights[x-1].dot(self.ff[x-1]) + self.bias[x-1])\r\n # output layer\r\n self.ff[-1] = self.out_transfer(self.weights[-1].dot(self.ff[-2]) + self.bias[-1])", "def fully_connected_relu(input, size):\n return tf.nn.relu(fully_connected(input, size))", "def fully_connected(input, size):\n weights = tf.get_variable('weights',\n shape=[input.get_shape()[1], size],\n initializer=tf.contrib.layers.xavier_initializer()\n )\n biases = tf.get_variable('biases',\n shape=[size],\n initializer=tf.constant_initializer(0.0)\n )\n return tf.matmul(input, weights) + biases", "def forward_features(self, x):\n x_size = (x.shape[2], x.shape[3])\n x = self.patch_embed(x)\n if self.ape:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n\n for layer in self.layers:\n x = layer(x, x_size)\n\n x = self.norm(x) # B L C\n x = self.patch_unembed(x, x_size)\n\n return x", "def forward(self, x):\n batch_size, channels, width, height = x.size()\n\n # Input Layer: (batch_size, 1, 28, 28) -> (batch_size, 1*28*28)\n x = x.view(batch_size, -1)\n\n # Layer 1: (batch_size, 1*28*28) -> (batch_size, 128)\n x = self.layer_1(x)\n x = torch.relu(x)\n\n # Layer 2: (batch_size, 128) -> (batch_size, 256)\n x = self.layer_2(x)\n x = torch.relu(x)\n\n # Layer 3: (batch_size, 256) -> (batch_size, 10)\n x = self.layer_3(x)\n x = torch.log_softmax(x, dim=1)\n\n return x", "def forward(self, x):\n # define feedforward behavior, applying activations as necessary\n out = self.leaky_relu(self.conv1(x))\n out = self.leaky_relu(self.conv2(out))\n out = self.leaky_relu(self.conv3(out))\n out = self.leaky_relu(self.conv4(out))\n\n out = self.res_blocks(out)\n\n out = self.leaky_relu(self.deconv1(out))\n out = self.leaky_relu(self.deconv2(out))\n out = self.leaky_relu(self.deconv3(out))\n\n # tanh applied to last layer\n out = F.tanh(self.out_layer(out))\n out = torch.clamp(out, min=-0.5, max=0.5)\n\n return out", "def forward(self, input):\n\n # Work on each channel separately\n all_features = []\n\n for channel in range(0, self.n_channels):\n input_channel = input[:, :, channel]\n\n # Add a dummy (spatial) dimension for the time convolutions\n # Conv1D format : (batch_size, n_feature_maps, duration)\n input_channel = input_channel.unsqueeze(1)\n\n high = self.all_conv_high[channel](input_channel)\n low = self.all_conv_low[channel](input_channel)\n ap_residual = self.all_residual[channel](input_channel)\n\n # Time convolutions are concatenated along the feature maps axis\n output_channel = torch.cat([\n high,\n low,\n ap_residual\n ], dim=1)\n all_features.append(output_channel)\n\n # Concatenate along the feature maps axis\n all_features = torch.cat(all_features, dim=1)\n # Flatten for the Linear layers\n all_features = all_features.view(-1,\n 9 * self.n_channels * 12) # <-- 12: depends of the initial sequence length (100).\n # If you have shorter/longer sequences, you probably do NOT even need to modify the modify the network architecture:\n # resampling your input gesture from T timesteps to 100 timesteps will (surprisingly) probably actually work as well!\n\n # Fully-Connected Layers\n output = self.fc(all_features)\n\n return output", "def forward(self, x):\r\n h_0 = torch.zeros(\r\n self.num_layers, x.size(0), self.hidden_size).to(device)\r\n\r\n c_0 = torch.zeros(\r\n self.num_layers, x.size(0), self.hidden_size).to(device)\r\n\r\n # Propagate input through LSTM\r\n ula, (h_out, _) = self.lstm(x, (h_0, c_0))\r\n\r\n h_out = h_out.view(-1, self.hidden_size)\r\n\r\n out = self.fc(h_out)\r\n\r\n return out", "def feedforward(self,x):\n \n a = [] #list of activation vectors\n for i,weight in enumerate(self.weights):\n if i == 0:\n a[i] = sigmoid(np.dot(weight,x))\n else:\n a[i] = sigmoid(np.dot(weight,a[i-1]))\n \n self.layer1 = sigmoid(np.dot(self.input, self.weights1))\n self.output = sigmoid(np.dot(self.layer1, self.weights2))", "def forward(self, x):\n x = self.feature_extractor(x)\n batch_size, hidden = x.size()\n\n x = self.layer_1(x)\n x = torch.relu(x)\n x = self.layer_2(x)\n x = torch.relu(x)\n x = self.layer_3(x)\n\n x = torch.log_softmax(x, dim=1)\n return x", "def forward(self, input):\n\n # Work on each channel separately\n all_features = []\n\n for channel in range(0, self.n_channels):\n input_channel = input[:, :, channel]\n\n # Add a dummy (spatial) dimension for the time convolutions\n # Conv1D format : (batch_size, n_feature_maps, duration)\n input_channel = input_channel.unsqueeze(1)\n\n high = self.all_conv_high[channel](input_channel)\n low = self.all_conv_low[channel](input_channel)\n ap_residual = self.all_residual[channel](input_channel)\n\n # Time convolutions are concatenated along the feature maps axis\n output_channel = torch.cat([\n high,\n low,\n ap_residual\n ], dim=1)\n all_features.append(output_channel)\n\n # Concatenate along the feature maps axis\n all_features = torch.cat(all_features, dim=1)\n\n # Flatten for the Linear layers\n all_features = all_features.view(-1,\n 9 * self.n_channels * 12) # <-- 12: depends of the initial sequence length (100).\n # If you have shorter/longer sequences, you probably do NOT even need to modify the modify the network architecture:\n # resampling your input gesture from T timesteps to 100 timesteps will (surprisingly) probably actually work as well!\n\n # Fully-Connected Layers\n output = self.fc(all_features)\n\n return output", "def forward(self, x):\n self.activations[0] = np.dot(x,self.weights[0]) + self.biases[0]\n self.zetas[0] = self.activation_f(self.activations[0])\n for i in range(1, self.n_layers-1):\n self.activations[i] = np.dot(self.zetas[i-1],self.weights[i]) \\\n + self.biases[i]\n self.zetas[i] = self.activation_f(self.activations[i])\n self.activations[-1] = np.dot(self.zetas[-2],self.weights[-1]) \\\n + self.biases[-1]\n self.zetas[-1] = self.activation_out_f(self.activations[-1])\n if self.activation_out_function == 'softmax':\n z = np.sum(self.zetas[-1], axis=1)\n z = np.reshape(z,(-1,1))\n self.zetas[-1] = np.divide(self.zetas[-1],z)\n return self.zetas[-1]", "def forward(self, x):\n flows_forward, flows_backward = self.get_flow(x)\n b, n, _, h, w = x.size()\n\n # backward branch\n out_l = []\n feat_prop = x.new_zeros(b, self.num_feat, h, w)\n for i in range(n - 1, -1, -1):\n x_i = x[:, i, :, :, :]\n if i < n - 1:\n flow = flows_backward[:, i, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n feat_prop = torch.cat([x_i, feat_prop], dim=1)\n feat_prop = self.backward_trunk(feat_prop)\n out_l.insert(0, feat_prop)\n\n # forward branch\n feat_prop = torch.zeros_like(feat_prop)\n for i in range(0, n):\n x_i = x[:, i, :, :, :]\n if i > 0:\n flow = flows_forward[:, i - 1, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n\n feat_prop = torch.cat([x_i, feat_prop], dim=1)\n feat_prop = self.forward_trunk(feat_prop)\n\n # upsample\n out = torch.cat([out_l[i], feat_prop], dim=1)\n out = self.lrelu(self.fusion(out))\n out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n out = self.lrelu(self.conv_hr(out))\n out = self.conv_last(out)\n base = F.interpolate(x_i, scale_factor=4, mode='bilinear', align_corners=False)\n out += base\n out_l[i] = out\n\n return torch.stack(out_l, dim=1)", "def forward(self, X):\n X = np.asarray(X)\n \n if (len(X.shape) == 1):\n X = np.reshape(X, (1, len(X)))\n \n if (self.inputBias == True):\n # add a bias unit to each row\n rows = []\n \n for i in range(0, X.shape[0]):\n rows.append(np.append(X[i],1))\n \n X = np.asarray(rows)\n \n \n if (len(self.hlayers) == 0):\n print(\"No hidden layers yet! Please add hidden layers.\")\n return 0\n \n z = np.matmul(X, self.weights[0]) # result of inputlayer x weights\n a = self.hlayers[0].activate(z) # apply activation function at first hidden layer\n \n if (len(self.hlayers) > 1):\n for i in range(1, len(self.hlayers)):\n z = np.matmul(a, self.weights[i])\n a = self.hlayers[i].activate(z)\n return a", "def forward(self, x):\n #print(\"full frwd x shape:\",x.shape)\n y=np.zeros((x.shape[0],self.W.shape[0]))\n y=np.dot(x,np.transpose(self.W))+self.b\n self.x=np.copy(x)\n return y", "def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def forward(self , x):\r\n x = self.fc1(x) #(n_samples , n_patches + 1,hidden_dim)\r\n x = self.act1(x) #(n_samples , n_patches + 1, hidden_dim)\r\n x = self.fc2(x) # (n_samples , n_patches + 1 , output_dim)\r\n x = self.drop(x) #(n_samples , n_patches + 1, output_dim)\r\n return x", "def forward(self, x):\n batch_size = x.shape[0]\n x = x.mean(dim=-1).mean(dim=-1)\n init_pose = self.init_pose.expand(batch_size, -1)\n init_shape = self.init_shape.expand(batch_size, -1)\n init_cam = self.init_cam.expand(batch_size, -1)\n pred_pose = init_pose\n pred_shape = init_shape\n pred_cam = init_cam\n for _ in range(self.n_iter):\n xc = torch.cat([x, pred_pose, pred_shape, pred_cam], 1)\n xc = self.fc1(xc)\n xc = self.drop1(xc)\n xc = self.fc2(xc)\n xc = self.drop2(xc)\n pred_pose = self.decpose(xc) + pred_pose\n pred_shape = self.decshape(xc) + pred_shape\n pred_cam = self.deccam(xc) + pred_cam\n pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, 24, 3, 3)\n out = pred_rotmat, pred_shape, pred_cam\n return out", "def __feed_forward(self, X):\n # go over all layers\n for layer in self.__layers:\n X = layer.compute_act(X)\n\n return X", "def feedforward(self, _input):\r\n self._input = _input\r\n self.hidden_layers[0] = sigmoid(np.dot(self._input, self[0]))\r\n for i in range(1, len(self.hidden_layers)):\r\n matrix = np.dot(self.hidden_layers[i - 1], self[i])\r\n self.hidden_layers[i] = sigmoid(matrix)\r\n self._output = sigmoid(np.dot(self.hidden_layers[-1], self[-1]))", "def feed_forward(self, X):\n Z = self._activation(dot(c_[X, ones((X.shape[0], 1))], self.W_hidden))\n return self._activation(dot(c_[Z, ones((X.shape[0], 1))], self.W_output)), Z", "def forward_pass(self):\n # Have to use one_hot labels since sparse softmax doesn't allow\n # second derivatives.\n one_hot_train_labels = tf.one_hot(self.data.train_labels, self.way)\n train_embeddings_ = self.embedding_fn(\n self.data.train_images,\n depth_multiplier=self.depth_multiplier,\n reuse=tf.AUTO_REUSE)\n train_embeddings = train_embeddings_['embeddings']\n embedding_vars_dict = train_embeddings_['params']\n\n with tf.variable_scope('linear_classifier', reuse=tf.AUTO_REUSE):\n embedding_depth = train_embeddings.shape.as_list()[-1]\n fc_weights = weight_variable([embedding_depth, MAX_WAY])\n fc_bias = bias_variable([MAX_WAY])\n\n embedding_vars_keys = []\n embedding_vars = []\n embedding_vars_copy_ops = []\n for name, var in embedding_vars_dict.iteritems():\n embedding_vars_keys.append(name)\n if not self.is_training:\n with tf.variable_scope('weight_copy'):\n shape = var.shape.as_list()\n var_copy = tf.Variable(\n tf.zeros(shape), collections=[tf.GraphKeys.LOCAL_VARIABLES])\n var_copy_op = tf.assign(var_copy, var)\n embedding_vars_copy_ops.append(var_copy_op)\n embedding_vars.append(var_copy)\n else:\n embedding_vars.append(var)\n\n fc_vars_copy_ops = []\n if not self.is_training:\n with tf.variable_scope('weight_copy'):\n # fc_weights copy\n fc_weights_copy = tf.Variable(\n tf.zeros(fc_weights.shape.as_list()),\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n fc_weights_copy_op = tf.assign(fc_weights_copy, fc_weights)\n fc_vars_copy_ops.append(fc_weights_copy_op)\n\n # fc_bias copy\n fc_bias_copy = tf.Variable(\n tf.zeros(fc_bias.shape.as_list()),\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n fc_bias_copy_op = tf.assign(fc_bias_copy, fc_bias)\n fc_vars_copy_ops.append(fc_bias_copy_op)\n\n fc_weights = fc_weights_copy\n fc_bias = fc_bias_copy\n\n fc_vars = [fc_weights, fc_bias]\n num_embedding_vars = len(embedding_vars)\n num_fc_vars = len(fc_vars)\n\n def _cond(step, *args):\n del args\n num_steps = self.num_update_steps\n if not self.is_training:\n num_steps += self.additional_test_update_steps\n return step < num_steps\n\n def _body(step, *args):\n \"\"\"The inner update loop body.\"\"\"\n updated_embedding_vars = args[0:num_embedding_vars]\n updated_fc_vars = args[num_embedding_vars:num_embedding_vars +\n num_fc_vars]\n train_embeddings = self.embedding_fn(\n self.data.train_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, updated_embedding_vars)),\n depth_multiplier=self.depth_multiplier,\n reuse=True)['embeddings']\n\n if self.proto_maml_fc_layer_on_support_set:\n # Set fc layer weights with prototypical equivalent values.\n prototypes = self.proto_maml_prototypes(train_embeddings)\n pmaml_fc_weights = self.proto_maml_fc_weights(\n prototypes, zero_pad_to_max_way=True)\n pmaml_fc_bias = self.proto_maml_fc_bias(\n prototypes, zero_pad_to_max_way=True)\n train_logits = tf.matmul(train_embeddings,\n pmaml_fc_weights) + pmaml_fc_bias\n else:\n updated_fc_weights, updated_fc_bias = updated_fc_vars\n train_logits = tf.matmul(train_embeddings,\n updated_fc_weights) + updated_fc_bias\n\n train_logits = train_logits[:, 0:self.way]\n loss = tf.losses.softmax_cross_entropy(one_hot_train_labels, train_logits)\n\n if self.debug_log:\n print_op = tf.print(['step: ', step, updated_fc_bias[0], 'loss:', loss])\n else:\n print_op = tf.no_op()\n\n embedding_grads = tf.gradients(loss, updated_embedding_vars)\n # Only computes fc grad when it's not created from prototypes.\n if not self.proto_maml_fc_layer_on_support_set:\n fc_grads = tf.gradients(loss, updated_fc_vars)\n\n if self.first_order:\n\n def _stop_grads(grads):\n return [tf.stop_gradient(dv) for dv in grads]\n\n embedding_grads = _stop_grads(embedding_grads)\n if not self.proto_maml_fc_layer_on_support_set:\n fc_grads = _stop_grads(fc_grads)\n\n # Apply gradients\n def _apply_grads(variables, grads):\n \"\"\"Applies gradients using SGD on a list of variables.\"\"\"\n v_new = []\n for (v, dv) in zip(variables, grads):\n if (not self.train_batch_norm and\n ('offset' in v.name or 'scale' in v.name)):\n v_new.append(v)\n else:\n v_new.append(v - self.alpha * dv)\n return v_new\n\n with tf.control_dependencies([print_op]):\n updated_embedding_vars = _apply_grads(updated_embedding_vars,\n embedding_grads)\n # Only apply fc grad when it's not created from prototypes.\n if not self.proto_maml_fc_layer_on_support_set:\n updated_fc_vars = _apply_grads(updated_fc_vars, fc_grads)\n step = step + 1\n return tuple([step] + list(updated_embedding_vars) +\n list(updated_fc_vars))\n\n # MAML meta updates using query set examples from an episode.\n if self.zero_fc_layer:\n # To account for variable class sizes, we initialize the output\n # weights to zero. See if truncated normal initialization will help.\n zero_weights_op = tf.assign(fc_weights, tf.zeros_like(fc_weights))\n zero_bias_op = tf.assign(fc_bias, tf.zeros_like(fc_bias))\n fc_vars_init_ops = [zero_weights_op, zero_bias_op]\n else:\n fc_vars_init_ops = fc_vars_copy_ops\n\n if self.proto_maml_fc_layer_init:\n train_embeddings = self.embedding_fn(\n self.data.train_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, embedding_vars)),\n depth_multiplier=self.depth_multiplier,\n reuse=True)['embeddings']\n prototypes = self.proto_maml_prototypes(train_embeddings)\n pmaml_fc_weights = self.proto_maml_fc_weights(\n prototypes, zero_pad_to_max_way=True)\n pmaml_fc_bias = self.proto_maml_fc_bias(\n prototypes, zero_pad_to_max_way=True)\n fc_vars = [pmaml_fc_weights, pmaml_fc_bias]\n\n with tf.control_dependencies(fc_vars_init_ops + embedding_vars_copy_ops):\n # We will first compute gradients using the initial weights\n # Don't want to restore it during eval.\n step = tf.Variable(\n 0,\n trainable=False,\n name='inner_step_counter',\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n loop_vars = [step] + embedding_vars + fc_vars\n step_and_all_updated_vars = tf.while_loop(\n _cond, _body, loop_vars, swap_memory=True)\n step = step_and_all_updated_vars[0]\n all_updated_vars = step_and_all_updated_vars[1:]\n updated_embedding_vars = all_updated_vars[0:num_embedding_vars]\n updated_fc_weights, updated_fc_bias = all_updated_vars[\n num_embedding_vars:num_embedding_vars + num_fc_vars]\n\n # Forward pass the training images with the updated weights in order to\n # compute the means and variances, to use for the query's batch norm.\n support_set_moments = None\n if not self.transductive_batch_norm:\n support_set_moments = self.embedding_fn(\n self.data.train_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, updated_embedding_vars)),\n depth_multiplier=self.depth_multiplier,\n reuse=True)['moments']\n\n test_embeddings = self.embedding_fn(\n self.data.test_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, updated_embedding_vars)),\n moments=support_set_moments, # Use support set stats for batch norm.\n depth_multiplier=self.depth_multiplier,\n reuse=True,\n backprop_through_moments=self.backprop_through_moments)['embeddings']\n\n if not self.proto_maml_fc_layer_on_query_set:\n self.test_logits = (tf.matmul(test_embeddings, updated_fc_weights) +\n updated_fc_bias)[:, 0:self.way]\n else:\n train_embeddings = self.embedding_fn(\n self.data.train_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, updated_embedding_vars)),\n depth_multiplier=self.depth_multiplier,\n reuse=True)['embeddings']\n prototypes = self.proto_maml_prototypes(train_embeddings)\n pmaml_fc_weights = self.proto_maml_fc_weights(prototypes)\n pmaml_fc_bias = self.proto_maml_fc_bias(prototypes)\n self.test_logits = (\n tf.matmul(test_embeddings, pmaml_fc_weights) + pmaml_fc_bias)", "def forward(self,x):\n embeds = self.embedding(x)\n \n x = torch.unsqueeze(embeds,1)\n # print('x',x.shape)\n xs = []\n for conv in self.convs:\n x2 = torch.tanh(conv(x))\n # print('after filter',x2.shape)\n x2 = torch.squeeze(x2,-1)\n # print('after squeeze',x2.shape)\n x2 = F.max_pool1d(x2,x2.size(2))\n \n xs.append(x2)\n \n x = torch.cat(xs,2)\n x = x.view(x.size(0),-1)\n logits = self.fc(x)\n return torch.sigmoid(logits)", "def forward(self, x):\r\n y = self.en_fc1(x)\r\n y = F.relu(y)\r\n y = self.en_fc2(y)\r\n y = F.relu(y)\r\n y = self.en_fc3(y)\r\n y = F.relu(y)\r\n\r\n mean = self.en_mu(y)\r\n stddev_p = self.en_log(y)\r\n \r\n n = x.shape[0]\r\n z = torch.randn(n,self.latent_dim)\r\n std = torch.exp(stddev_p/2.0)\r\n z = z.mul(std) + mean\r\n \r\n xhat = self.de_fc1(z)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc2(xhat)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc3(xhat)\r\n xhat = F.sigmoid(xhat)\r\n \r\n return y,mean,stddev_p,z,xhat" ]
[ "0.7153318", "0.6853486", "0.682324", "0.67808783", "0.6726576", "0.67157876", "0.66740847", "0.66715527", "0.6665411", "0.6663272", "0.6636702", "0.66286105", "0.6605179", "0.65966856", "0.6590063", "0.65451944", "0.6540172", "0.6539711", "0.6511885", "0.64758414", "0.64528996", "0.6444381", "0.6423562", "0.64163613", "0.6391713", "0.63840425", "0.6376284", "0.6372927", "0.63381517", "0.63167536", "0.63167536", "0.63134646", "0.63093585", "0.63034546", "0.62881094", "0.62815404", "0.6273332", "0.62608737", "0.6253739", "0.62499267", "0.62385386", "0.623767", "0.623767", "0.62337184", "0.62299496", "0.6225511", "0.6212683", "0.6212048", "0.6212013", "0.6202469", "0.61976403", "0.6173155", "0.6171528", "0.6166246", "0.6146942", "0.6142432", "0.61407465", "0.6134516", "0.6134516", "0.6134516", "0.6129964", "0.6111522", "0.6101114", "0.6096838", "0.6096203", "0.6094912", "0.6092404", "0.60897684", "0.6088939", "0.60806316", "0.6072783", "0.60557", "0.60501766", "0.604974", "0.6046282", "0.6041999", "0.6040178", "0.6040101", "0.60396767", "0.6036304", "0.6033625", "0.6033447", "0.6033205", "0.6030767", "0.6025275", "0.60235846", "0.6022737", "0.60222197", "0.60211504", "0.6018906", "0.6016851", "0.6004627", "0.60017747", "0.60014474", "0.6000238", "0.59966433", "0.5995386", "0.59909296", "0.5970096", "0.59688795", "0.5964879" ]
0.0
-1
Computes the backward pass for a fully_connected layer.
def fc_backward(dout, cache): x, w, b = cache dx, dw, db = None, None, None ########################################################################### # TODO: Implement the affine backward pass. # ########################################################################### N = x.shape[0] x2d = x.reshape(N, -1) dx = dout.dot(w.T) dx = dx.reshape(x.shape) dw = x2d.T.dot(dout) db = dout.sum(axis=0) #add from top to down ########################################################################### # END OF YOUR CODE # ########################################################################### return dx, dw, db
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)", "def backward_pass(self, loss):\n\n self.optimizer.zero_grad()\n self.optimizer.backward(loss)\n self.optimizer.step()", "def _backward(loss):\n\n loss.backward()", "def backward_pass(architecture,gradient_layerwise,grad_weights,grad_bias):\n \n for layer in range(len(architecture)-1,-1,-1):\n X_input,X_output,weightsi,biasi,X_input_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imxi = architecture['layer{}'.format(layer+1)]\n# print(\"Operation is:{} and Layer is: {}\".format(operationi,layer+1))\n if operationi == 'softmax': # Last layer -> Dont apply softmax in any layer other than the last layer!\n # not taking gradients here because we need dz_dX(secondlastlayer) which is y_pred - y\n continue\n \n if operationi == 'conv_bn_relu' or operationi == 'conv_relu' or operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if operationi__1 == 'softmax':\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # .\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # .\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input_im2col)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input_im2col)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi #\n elif operationi__1 == 'maxpool': # need to do something here to fix the problem\n None\n\n elif 'flatten' in operationi__1:\n # we currently have dz_doutput of flatten -> we want dz_doutput of the conv_bn_relu before flatten\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2] # weights2\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput of flatten\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5] # i\n try:\n dz_dXi = torch.t(weightsi__1).mm(dz_dXi__1)\n except:\n dz_dXi = weightsi__1.mm(dz_dXi__1)\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n\n dz_dXi = torch.reshape(dz_dXi,(output_shapei[1]*output_shapei[2],-1))\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n dz_dweightsi = X_input_im2col.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n dz_dbi = dz_dXi\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)# Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi) # Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi) # Can also set this to layer like in line ~800\n \n else:\n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dX2 -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n \n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n if 'sigmoid' in operationi__1: # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi__1: # ...\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dXi = torch.reshape(dz_dXi,(output_shape_current_layer[1]*output_shape_current_layer[2],-1))\n dz_dbi = torch.reshape(dz_dXi,bias_current_layer.shape)\n dz_dweightsi = X_im2col_current_layer.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n \n if operationi == 'maxpool':\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n \n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n try:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n except:\n Y = torch.t(weightsi__1).mm(dz_dXi__1) # Ensuring valid matrix multiplication here\n \n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n \n if operationi__1 == 'conv_sigmoid' or operationi__1 == 'conv_bn_sigmoid': # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n else:\n dz_dXi[X_output <= 0] = 0\n\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n \n dz_dXinput = torch.zeros((X_input.shape))\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+1)][0] # output = output of maxpool\n\n dz_dXoutput = torch.reshape(dz_dXoutput,(output_shapei[0],X_input_im2col.shape[2]))\n \n for i in range(output_shapei[0]):\n for j in range(X_input_im2col.shape[2]):\n Xi2ci = X_im2col_current_layer[i,:,:]\n idx = torch.argmax(Xi2ci[:,j]).item()\n value = imxi[i][(idx,j)]\n dz_dXinput[value[0],value[1],value[2]] += float(dz_dXoutput[i,j])\n\n# dz_dXinput = torch.reshape(dz_dXinput,output_shapei)\n \n X_prev_im2col = architecture['layer{}'.format(layer)][4]\n X_output_prev = architecture['layer{}'.format(layer)][1]\n X_output_prev = torch.reshape(X_output_prev,dz_dXinput.shape)\n X_input_prev = architecture['layer{}'.format(layer)][0]\n prev_bias = architecture['layer{}'.format(layer)][3]\n output_shape_prev = architecture['layer{}'.format(layer)][6]\n prev_operation = architecture['layer{}'.format(layer)][9]\n \n if prev_operation == 'conv_sigmoid' or prev_operation == 'conv_bn_sigmoid':\n dz_dXinput *= sigmoid(X_output_prev)*(1-sigmoid(X_output_prev)) # Taking the derivative of the sigmoid function\n else:\n dz_dXinput[X_output_prev <= 0] = 0\n \n if len(dz_dXinput.shape) == 3:\n dz_dXinput = torch.reshape(dz_dXinput,(-1,output_shape_prev[0]))\n \n dz_dbi = torch.reshape(dz_dXinput,prev_bias.shape)\n dz_dweightsi = X_prev_im2col.mm(dz_dXinput)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer)][0] = torch.Tensor(dz_dXinput) # ...\n \n if 'flatten_dense' in operationi:\n \n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n \n if operationi__1 == 'softmax':\n \n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n X_output = torch.reshape(X_output,(-1,1))\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if 'sigmoid' in operationi:\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # Can also set this to layer like in line ~800\n \n else:\n # Have to modify and test this before implementation -> Specifically\n # the backprop implementation is not consistent with the ones above\n #\n X_output = torch.reshape(X_output,(-1,1))\n weights__i = architecture['layer{}'.format(layer+2)][2]\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+2)][0]\n dz_dXoutput = torch.reshape(torch.Tensor(dz_dXoutput),X_output.shape)\n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n\n if 'relu' in operationi:\n dz_dXoutput[X_output<0] = 0\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n if 'sigmoid' in operationi:\n dz_dXoutput*= sigmoid(X_output)*(1-sigmoid(X_output))\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n else:\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n \n unflattened_Xinput = architecture['layer{}'.format(layer+1)][0]\n dz_dXinput = torch.reshape(dz_dXinput,unflattened_Xinput.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXinput)\n \n if gradient_layerwise['layer{}'.format(layer+1)][1] is not None:\n try:\n grad_weights['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][1]\n except:\n grad_weights['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][1])\n if gradient_layerwise['layer{}'.format(layer+1)][2] is not None:\n try:\n grad_bias['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][2]\n except:\n grad_bias['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][2])\n \n gc.collect()\n return", "def backward(self, gradient):\n raise NotImplementedError()", "def backward(self, dout):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)-1,-1,-1):\n act_dout = self.activations[l].backward(dout)\n dout = self.layers[l].backward(act_dout)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return", "def backward_and_step(self, loss):\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()", "def backward(self, loss):\n global_timer.my_timer.start_profile(\"BWD\")\n mgr = PatrickStarManager()\n mgr.set_training_stage(TrainingStage.BWD)\n\n for param_fp16 in self.client.chunk_based_param_fp16:\n param_fp16.ps_attr.bwd_used_cnt = 0\n\n self.optimizer.zero_grad()\n if self.loss_scaler:\n self.loss_scaler.backward(loss)\n else:\n loss.backward()\n mgr.update_margin_mem()\n global_timer.my_timer.finish_profile(\"BWD\")", "def backward(self, input_train, input_train_label):\n batchSize = len(input_train) #liczba obrazow podawanych na wejscie w trakcie jednej iteracji\n weights = self.Weights\n biases = self.Biases\n delta_W = self.delta_W\n delta_B = self.delta_B\n poolParams = self.poolParams\n dW_list = []\n dB_list = []\n dW4 = np.zeros(weights[4].shape)\n dB4 = np.zeros(biases[4].shape)\n dW3 = np.zeros(weights[3].shape)\n dB3 = np.zeros(biases[3].shape)\n dW2 = np.zeros(weights[2].shape)\n dB2 = np.zeros(biases[2].shape)\n dW1 = np.zeros(weights[1].shape)\n dB1 = np.zeros(biases[1].shape)\n dW0 = np.zeros(weights[0].shape)\n dB0 = np.zeros(biases[0].shape)\n loss = 0\n for image in range(batchSize):\n\n X_data = input_train[image]\n X_label = input_train_label[image]\n output_forward, cache = self.forward(X_data) \n loss += -1*sum(X_label - np.log(output_forward)) #obliczenie wartosci funkcji straty [cross entropy]\n\n #Propagacja wsteczna gradientu\n dy = -1*(X_label - output_forward)/2\n #print(\"X_label = {} \\t layer7 = {} \\t dy = {}\".format(X_label, output_forward, dy))\n\n [dy, dW, dB ] = fullycon_b(cache[6], np.asarray([dy]).transpose() , weights[4])\n dW4 += dW\n dB4 += dB.flatten() #wektoryzacja macierzy\n dy = act.relu_b(dy.transpose(), cache[6])\n\n [dy, dW, dB ] = fullycon_b(cache[5][:,0], dy, weights[3])\n dW3 += dW\n dB3 += dB.flatten()\n dy = act.relu_b(dy.transpose(), cache[5][:,0]) \n \n [dy, dW, dB ] = convolution_b(cache[4], dy, weights[2])\n dW2 += dW\n dB2 += dB.flatten()\n \n dy = maxpool_b(cache[3], dy)\n dy = act.relu_b(dy, cache[3])\n\n [dy, dW, dB ] = convolution_b(cache[2], dy, weights[1])\n dW1 += dW\n dB1 += dB.flatten()\n \n dy = maxpool_b(cache[1], dy)\n dy = act.relu_b(dy, cache[1]) \n\n [dy, dW, dB ] = convolution_b(np.asarray([cache[0]]), dy, weights[0])\n dW0 += dW\n dB0 += dB.flatten()\n\t\t\t\n dW_list.append(dW4)\n dB_list.append(dB4)\n dW_list.append(dW3)\n dB_list.append(dB3)\n dW_list.append(dW2)\n dB_list.append(dB2)\n dW_list.append(dW1)\n dB_list.append(dB1)\n dW_list.append(dW0)\n dB_list.append(dB0)\n dW_list = dW_list[::-1]\n dB_list = dB_list[::-1]\n \n #Aktualizacja parametrow kazdej z warstw (o ile takie posiada)\n #uczenie z metoda momentum: learning rate = const; alpha = const\n for x in range(len(dW_list)):\n delta_W[x] = alpha*delta_W[x] - eta*dW_list[x]/batchSize\n weights[x] += delta_W[x]\n delta_B[x] = alpha*delta_B[x] - eta*dB_list[x]/batchSize\n biases[x] += delta_B[x]\n #przypisanie nowych wag po aktualiacji wszystkich parametrow\n self.Weights = weights\n self.Biases = biases\n\n #zwrocenie stosunku wartosci f-cji straty do rozmiaru batch'u\n return loss/batchSize", "def backward(self, gradient):\n #TODO\n pass", "def backward(self, gradient):\n #TODO\n pass", "def backward_pass(self, grad):\n pass", "def backward(ctx, grad_output):\n\n # This is a pattern that is very convenient - at the top of backward\n # unpack saved_tensors and initialize all gradients w.r.t. inputs to\n # None. Thanks to the fact that additional trailing Nones are\n # ignored, the return statement is simple even when the function has\n # optional inputs.\n # input, weight, bias = ctx.saved_variables\n\n return grad_output", "def backward_pass(total_loss):\n\n # Get the tensor that keeps track of step in this graph or create one if not there\n global_step = tf.train.get_or_create_global_step()\n\n # Print summary of total loss\n tf.summary.scalar('Total_Loss', total_loss)\n\n # Decay the learning rate\n dk_steps = int((FLAGS.epoch_size / FLAGS.batch_size) * 75)\n lr_decayed = tf.train.cosine_decay_restarts(FLAGS.learning_rate, global_step, dk_steps)\n\n # Compute the gradients. NAdam optimizer came in tensorflow 1.2\n opt = tf.contrib.opt.NadamOptimizer(learning_rate=lr_decayed, beta1=FLAGS.beta1,\n beta2=FLAGS.beta2, epsilon=0.1)\n\n # Compute the gradients\n gradients = opt.compute_gradients(total_loss)\n\n # Apply the gradients\n train_op = opt.apply_gradients(gradients, global_step, name='train')\n\n # Add histograms for the trainable variables. i.e. the collection of variables created with Trainable=True\n for var in tf.trainable_variables():\n tf.summary.histogram(var.op.name, var)\n\n # Maintain average weights to smooth out training\n variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_avg_decay, global_step)\n\n # Applies the average to the variables in the trainable ops collection\n variable_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.control_dependencies([train_op, variable_averages_op]): # Wait until we apply the gradients\n dummy_op = tf.no_op(name='train') # Does nothing. placeholder to control the execution of the graph\n\n return dummy_op", "def backward_deconvnet_relu(x):\n def grad(dy):\n return tf.nn.relu(dy)\n return tf.nn.relu(x), grad", "def backward(last_layer: str) -> Callable:\n\n def closure() -> Tuple[Optional[torch.Tensor], torch.Tensor]:\n optimizer.zero_grad()\n output = model(data)\n if last_layer == \"output\":\n output.backward(torch.ones_like(target))\n return None, output\n elif last_layer == 'loss':\n loss = compute_loss(output - target)\n loss.backward()\n return loss, output\n else:\n assert False, 'last layer must be \"output\" or \"loss\"'\n\n return closure", "def backward(last_layer: str) -> Callable:\n\n def closure() -> Tuple[Optional[torch.Tensor], torch.Tensor]:\n optimizer.zero_grad()\n output = model(data)\n if last_layer == \"output\":\n output.backward(torch.ones_like(target))\n return None, output\n elif last_layer == 'loss':\n loss = compute_loss(output - target)\n loss.backward()\n return loss, output\n else:\n assert False, 'last layer must be \"output\" or \"loss\"'\n\n return closure", "def backward(self, out_grad, input):\n raise NotImplementedError", "def on_iter_backward(self, runner):\n runner.optimizer.zero_grad()\n runner.loss.backward()\n runner.optimizer.step()", "def backward_D(self):\n self.loss_D.backward()", "def backward(self, gradient: Tensor) -> Tensor:\n self.b_grad = np.sum(gradient, axis=0)\n self.w_grad = self.inputs.T @ gradient\n return gradient @ self.w.T", "def backward(\n self, X: np.ndarray, y: np.ndarray, lr: float, reg: float = 0.0\n ) -> float:\n y_hat = self.forward(X)\n\n y_one_hot = self.one_hot_encode(y)\n loss = CrossEntropy.forward(y_one_hot, y_hat)\n\n d_layer = CrossEntropy.backward(y, y_hat)\n\n w_grads = []\n b_grads = []\n\n for idx, layer in reversed(list(enumerate(self.layers))):\n # Not output layer\n if (idx + 1) < len(self.layers):\n next_layer = self.layers[idx + 1]\n\n d_layer = d_layer.dot(next_layer.w.T)\n d_layer = layer.activation_func.backward(d_layer, layer.activated_out)\n\n d_w = layer.linear_in.T.dot(d_layer) + 2 * reg * layer.w\n d_b = np.sum(d_layer, axis=0)\n\n w_grads.insert(0, d_w)\n b_grads.insert(0, d_b)\n\n self.optimizer.step(self.layers, w_grads, b_grads, lr)\n\n if self.norm_weights:\n w_norm = max(np.linalg.norm(l.w) for l in self.layers) / len(self.layers)\n b_norm = max(np.linalg.norm(l.w) for l in self.layers) / len(self.layers)\n for layer in self.layers:\n layer.w /= w_norm\n layer.b /= b_norm\n\n return loss", "def backward(self, # type: ignore\n closure_loss: torch.Tensor,\n *args,\n **kwargs) -> torch.Tensor:\n closure_loss = closure_loss.to(self.root_device)\n return super().backward(\n closure_loss,\n *args,\n **kwargs,\n )", "def fully_connected_forward(self, X, W, b):\n \n #############################################################################\n # TODO: Implement the forward pass of a fully connected layer and store #\n # the variables needed for the backward pass (gradient computation) #\n # as a tuple inside cache. #\n #############################################################################\n out = np.matmul(X, W) + b\n cache = (X, W, b)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n return out, cache", "def backward(self):\n #initiate the gradients\n #print('')\n \n #print('node {} grad {}'.format(self.id, self.gradient))\n #print('node {} times visited : {}/{}'.format(self.id, self.times_visited, self.times_used))\n\n if self.gradient is None:\n self.gradient=np.eye(self.output_dim)\n self.times_visited+=1\n\n \n \n if self.childrens==[]:\n return(self.gradient)\n else:\n self.backward()\n \n else: \n if self.childrens!=[]:\n #we can still going deeper in backprop\n #print(len(self.childrens), ' childrens', str([self.childrens[i]['node'].id for i in range(len(self.childrens))]))\n for child in self.childrens:\n node,jacobian=child['node'], child['jacobian']\n \n new_grad = np.dot(self.gradient, jacobian)\n #print(node.gradient)\n #print(new_grad)\n \n if node.gradient is None:\n node.gradient = new_grad\n else: \n node.gradient += new_grad\n \n node.times_visited+=1\n #print('looking at node {} \\ngradient {}'.format(node.id, node.gradient))\n\n \n if node.times_used ==node.times_visited: \n #print(node.gradient)\n node.backward() \n else:\n #still some computations to perform upwards before going deeped\n #print('node {} visits : {}/{}'.format(node.id, node.times_visited, node.times_used))\n pass", "def backward_G(self):\n self.loss_G.backward()", "def propagate_backward(self, h):\n h = h.mm(self.feedbackweights.t())\n if self.feedbackbias is not None:\n h += self.feedbackbias.unsqueeze(0).expand_as(h)\n return self.feedback_activationfunction(h)", "def backward(self):\n gradient = blah\n return gradient", "def backward(self):\n gradient = blah\n return gradient", "def propagate_backward(layer, input_layer, target):\n if numpy.isscalar(target):\n tmp = target\n target = numpy.zeros([1, 1])\n target[0] = tmp\n\n error = target - layer.visible\n gradient = error * layer.derivative_function(layer.visible)\n\n hidden_change = (numpy.dot(gradient, layer.weights.T)\n * input_layer.derivative_function(input_layer.visible))\n estimated_hidden = input_layer.visible + hidden_change\n\n change = numpy.outer(input_layer.visible, gradient)\n\n layer.weights += layer.learning_rate * change\n return estimated_hidden", "def backward(self, lhs: Tensor, rhs: Tensor, acc_grad: np.ndarray):\n raise NotImplementedError", "def backwardpass(self, grad):\n return (self.x>0) * grad", "def backward(self, inputs, grad_loss_input):\n raise NotImplementedError", "def backward(ctx, grad_output):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n # Retrieve saved tensors and constants\n gamma, ivar, mean, input = ctx.saved_tensors\n eps = ctx.saved_tensors\n\n # Check which inputs need gradients\n input_needs_grad, gamma_needs_grad, beta_needs_grad = ctx.needs_input_grad\n\n # Get the batch size (=N)\n N, _ = grad_output.shape\n\n # reconstruct the input_norm\n input_norm = (input - mean) * ivar\n grand_input_norm = grad_output * gamma\n\n ##### Gradient wrt beta #####\n grad_beta = grad_output.sum(dim=0) if beta_needs_grad else None\n\n #### Gradient wrt gamma ####\n grad_gamma = (input_norm*grad_output).sum(dim=0) if gamma_needs_grad else None\n \n #### Gradient wrt input ####\n term1 = N*grand_input_norm \n term2 = torch.sum(grand_input_norm, dim=0)\n term3 = input_norm*torch.sum(grand_input_norm*input_norm, dim=0)\n grad_input = (1. / N) * ivar * (term1 - term2 - term3) if input_needs_grad else None\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n # return gradients of the three tensor inputs and None for the constant eps\n return grad_input, grad_gamma, grad_beta, None", "def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs):\n self.clear_lp_grads()\n loss.backward(**bwd_kwargs)\n\n if update_hp_grads:\n self.update_hp_grads(clear_lp_grads=clear_lp_grads)", "def forward_backward(self, data_batch):\n self.forward(data_batch, is_train=True)\n self.backward()\n if self.use_l2norm_grad_clip:\n # 2-Norm Grad Clip\n self.l2norm_grad_clip()", "def backward(self, d_out):\n # TODO: Implement backward pass\n # Compute both gradient with respect to input\n # and gradients with respect to W and B\n # Add gradients of W and B to their `grad` attribute\n\n # It should be pretty similar to linear classifier from\n # the previous assignment\n \n dW = np.dot(self.X.T, d_out);\n dB = np.dot(np.ones((1, d_out.shape[0])), d_out);\n \n d_input = np.dot(d_out, self.W.value.T);\n #print(\"self.X = \", self.X);\n #print(\"self.W.grad.T = \", self.W.grad.T);\n #print(\"dW.T = \", dW.T);\n \n self.W.grad += dW;\n self.B.grad += dB;\n \n return d_input;", "def backward(self, inGradient, lr=0.001): # batchSize = 1\n wGradient = np.dot(inGradient.T, self.data)\n bGradient = np.sum(inGradient, axis=0)\n outGradient = np.dot(inGradient, self.weights)\n\n self.weights = self.weights - lr * wGradient\n self.bias = self.bias - lr * bGradient\n self.wGradient = wGradient\n self.bGradient = bGradient\n\n #print \"weight gradient \", wGradient\n #print \"bias gradient \", bGradient\n\n return outGradient", "def backprop(nn, y):\n LAST = len(nn) - 1\n\n # last layer\n nn[LAST].dCdz = np.multiply(2.0 * (nn[LAST].a - y), AF_PRIME(nn[LAST].z))\n nn[LAST].dCdw = (np.dot(nn[LAST].dCdz, nn[LAST].input_value.T))\n nn[LAST].dCdw_sum = \\\n np.add(nn[LAST].dCdw, nn[LAST].dCdw_sum)\n nn[LAST].w -= nn[LAST].dCdw * LEARNING_RATE\n\n # other layer\n for n in range(1, len(nn)):\n dz1dz2 = \\\n np.dot(nn[LAST - n + 1].w.T, nn[LAST - n + 1].dCdz)\n nn[LAST - n].dCdz = \\\n np.multiply(AF_PRIME(nn[LAST - n].z), dz1dz2)\n nn[LAST - n].dCdw = \\\n (np.dot(nn[LAST - n].dCdz, nn[LAST - n].input_value.T))\n nn[LAST - n].dCdw_sum = \\\n np.add(nn[LAST - n].dCdw, nn[LAST - n].dCdw_sum)\n nn[LAST - n].w -= nn[LAST - n].dCdw * LEARNING_RATE", "def linear_activation_backward(dA, cache, activation):\n pass", "def backpropagation(self):\n\n print \"backpropagation in Convlayer\"\n\n if self.__nextLayer.__class__.__name__ is 'FCLayer':\n WF = self.__nextLayer.numberOfNeuronsInLayer\n dNext = np.reshape(self.__nextLayer.getDeltas(), (1, 1, 1, WF))\n else:\n dNext = self.__nextLayer.getDeltas()\n\n self.deltas = np.zeros(self.outputValues.shape)\n\n # Compute Deltas\n if self.__nextLayer.__class__.__name__ is 'FCLayer':\n for n in range(self.outputValues.shape[0]):\n for nf in range(self.numberOfFilters):\n for h in range(self.outputValues.shape[2]):\n for w in range(self.outputValues.shape[3]):\n deltas_i = self.activationFunctionDerivative(self.outputValues)[n, nf, h, w] * dNext[\n :, :, :, nf]\n self.deltas[n, nf, h, w] += deltas_i\n\n elif self.__previousLayer is None:\n for n in range(self.outputValues.shape[0]):\n deltas_i = self.activationFunctionDerivative(self.outputValues)[n] * dNext\n self.deltas[n] += deltas_i[0]\n\n else:\n for n in range(self.outputValues.shape[0]):\n deltas_i = self.activationFunctionDerivative(self.outputValues)[n] * dNext\n self.deltas[n] += deltas_i[n]\n\n # print \"shape of delta is \" + str(self.deltas.shape)\n\n if self.spaceConv is True:\n self.deltas = np.transpose(self.deltas, (3, 1, 2, 0))\n else:\n pass\n\n # Compute delta Biases\n deltaBiases = (np.sum(self.deltas, axis=(0, 2, 3)))\n assert deltaBiases.shape == self.bias.shape\n\n # Compute delta Kernels\n\n deltaKernel = np.zeros(self.weights.shape)\n\n for ninp in range(self.inputShape[0]):\n for nf in range(self.numberOfFilters):\n flippedDelta = self.flipArray(self.deltas[ninp, nf, :, :]) # Flips Kernel for the convolution\n for cin in range(self.inputShape[1]):\n nh = 0\n for h in np.arange(0, self.inputs.shape[2] - flippedDelta.shape[0] + 1, self.stride[0]):\n nw = 0\n for w in np.arange(0, self.inputs.shape[3] - flippedDelta.shape[1] + 1, self.stride[1]):\n activationMap = self.inputs[ninp, cin,\n h:h + flippedDelta.shape[0],\n w:w + flippedDelta.shape[1]] # Input Map used for the convolution\n deltaKernel[nf, nh, nw] = np.sum(activationMap * flippedDelta) # Convolution\n nw += 1\n nh += 1\n\n if self.spaceConv is True:\n self.deltas = np.transpose(self.deltas, (3, 1, 2, 0))\n else:\n pass\n\n self.deltaWeights = deltaKernel\n self.deltaBiases = deltaBiases\n\n if self.__previousLayer is None:\n return self.deltas, self.deltaWeights, self.deltaBiases\n else:\n return self.__previousLayer.backpropagation()", "def backward(ctx, dy):\n y = ctx.y\n if ctx.eagerly_discard_variables:\n del ctx.y\n for i in range(len(ctx.reversible_blocks) - 1, -1, -1):\n y, dy = ctx.reversible_blocks[i].backward_pass(y, dy, not ctx.eagerly_discard_variables)\n if ctx.eagerly_discard_variables:\n del ctx.reversible_blocks\n return dy, None, None", "def backward(self, grad_output):\n raise NotImplementedError", "def backward(self, d_out):\n # TODO: Implement backward pass\n # Compute both gradient with respect to input\n # and gradients with respect to W and B\n # Add gradients of W and B to their `grad` attribute\n\n # It should be pretty similar to linear classifier from\n # the previous assignment\n\n d_input = np.dot(d_out, self.W.value.T)\n self.W.grad = np.dot(self.X.T, d_out)\n self.B.grad = np.sum(d_out, axis=0, keepdims=True)\n\n return d_input", "def __backprop(self, X, Y_hat, Y):\n m = Y.shape[1]\n Y = Y.reshape(Y_hat.shape.eval())\n\n # Compute the gradient of the output layer\n dA = - ((Y / Y_hat) - ((1 - Y) / (1 - Y_hat)))\n i = len(self.layers) - 1\n while i >= 0:\n\n # Get activation of the previous layer\n A_prev = None\n if i == 0:\n A_prev = X\n else:\n A_prev = self.layers[i-1].A\n \n # Compute gradient for the next layer down\n dA = self.layers[i]._Dense__backward(dA, A_prev)\n i -= 1", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = layers.relu_backward(dout, relu_cache)\n dx, dw, db = layers.conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def layer_backward(d_output, cache):\n\n # Unpack cache values\n x, w, z, output = cache\n\n # Compute derivatives (gradients)\n d_x, d_w = None, None\n\n return d_x, d_w", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n activation = []\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n ### Forward propagation\n activation.append(data)\n\n # Hidden layer inputs: (N, Dx) * (Dx, H) -> N x H\n z = np.dot(activation[-1], W1) + b1 \n # Activations, inputs to the final layer. \n activation.append(sigmoid(z)) # output of the hidden layer, activation\n # Final layer outputs: ( N x H ) * ( H, Dy) -> (N, Dy)\n z = np.dot(activation[-1], W2) + b2\n activation.append( softmax(z) )\n\n # Cross-entropy cost\n\n y_p = activation[-1]\n activation = activation[:-1] # remove activation data (output)\n\n cost = -np.sum(labels * np.log(y_p))\n \n error = []\n \n ### backward propagation\n sigma = (y_p - labels)\n error.append(sigma)\n\n gradb2 = np.sum(error[-1], axis=0)\n gradW2 = np.dot(activation[-1].T, error[-1])\n\n #\n sigma = np.dot(W2, error[-1].T)\n sigma = sigma.T * sigmoid_grad(activation[-1])\n activation = activation[:-1] # remove activation data ( hidden layer )\n\n error.append(sigma)\n\n gradb1 = np.sum(error[-1], axis=0)\n gradW1 = np.dot(activation[-1].T, error[-1])\n\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), \n gradW2.flatten(), gradb2.flatten()))\n \n return cost, grad", "def propagate_backward_irpropm(layer, input_layer, target, batch_size=20):\n if input_layer.bias:\n input_layer.visible[0] = 1.\n\n error = target - layer.visible\n gradient = error * layer.derivative_function(layer.visible)\n\n change = numpy.outer(input_layer.visible, gradient)\n\n # adds an attribute to this layer to keep track of how many\n # iterations of backprop have happened since the last update\n # to the weights\n\n # initialize the layer with backprop count if necessary.\n try:\n layer.backprop_count\n except AttributeError:\n layer.backprop_count = 1\n layer.previous_gradient = layer.gradient\n layer.gradient = numpy.zeros(layer.weights.shape)\n\n if layer.backprop_count <= batch_size - 1:\n layer.backprop_count += 1\n layer.gradient += change\n\n if layer.backprop_count >= batch_size - 1:\n signs = numpy.sign(layer.gradient * layer.previous_gradient)\n for ci in range(0, signs.shape[1]):\n for ri in range(0, signs.shape[0]):\n if signs[ri, ci] < 0.:\n layer.step_size[ri, ci] = max(\n MIN_SS, ETA_MINUS * layer.step_size[ri, ci])\n layer.gradient[ri, ci] = 0.\n elif signs[ri, ci] > 0.:\n layer.step_size[ri, ci] = min(\n MAX_SS, ETA_PLUS * layer.step_size[ri, ci])\n\n layer.weights += numpy.sign(layer.gradient) * layer.step_size\n layer.previous_gradient = layer.gradient\n layer.gradient = numpy.zeros(layer.gradient.shape)\n layer.backprop_count = 0\n\n # hidden deltas\n hidden_change = (numpy.dot(gradient, layer.weights.T)\n * input_layer.derivative_function(input_layer.visible))\n estimated_hidden = input_layer.visible + hidden_change\n\n return estimated_hidden", "def backward(self, d_out):\n # TODO: Implement backward pass\n # Compute both gradient with respect to input\n # and gradients with respect to W and B\n # Add gradients of W and B to their `grad` attribute\n\n # It should be pretty similar to linear classifier from\n # the previous assignment\n self.W.grad += np.dot(self.X.T, d_out)\n self.B.grad += np.sum(d_out, axis=0)[np.newaxis, :]\n return np.dot(d_out, self.W.value.T)", "def backward(self, accum_grad):\n\n W = self.W\n\n grad_w = self.layer_input.T.dot(accum_grad)\n grad_b = np.sum(accum_grad, axis=0, keepdims=True)\n\n # Update the layer weights\n self.W = self.W_optimizer.update(self.W, grad_w)\n self.b = self.b_optimizer.update(self.b, grad_b)\n\n accum_grad = accum_grad.dot(W.T)\n return accum_grad", "def backward_D(self):\n base_function._unfreeze(self.net_D)\n #print(self.input_P2.shape, self.img_gen.shape)\n self.loss_dis_img_gen = self.backward_D_basic(self.net_D, self.input_P2, self.img_gen)", "def backward(self, inputs, gradients, **kwargs):\n grad_relu = inputs > 0\n return gradients * grad_relu", "def backward_gradient(\n self, input: np.ndarray, head_gradients: Dict[str, np.ndarray]\n ) -> np.ndarray:\n raise NotImplementedError", "def backward(ctx, grad_output):\n diff, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input = grad_input + diff\n return grad_input", "def backward(self,y_out, y_truth):\n gradient = None\n ###########################################################################\n # TODO: #\n # Implement the backward pass. Return the gradient wrt y_out #\n ###########################################################################\n\n gradient = np.multiply(2, np.subtract(y_out, y_truth))\n\n ###########################################################################\n # END OF YOUR CODE #\n ########################################################################### \n return gradient", "def backward(self, residuals):\n in_channel, out_channel, kernel_size, a = self.weights.shape\n dw = np.zeros_like(self.weights) \n \n for i in range(in_channel):\n for o in range(out_channel):\n dw[i, o] += inv_conv2(self.in_val[:,:,i], \n residuals[:,:,o], \n self.stride)\n\n self.db += residuals.sum(axis=1).sum(axis=0)\n self.dw += dw \n gradient_x = np.zeros_like(self.in_val)\n \n for i in range(in_channel):\n for o in range(out_channel):\n gradient_x[:,:,i] += conv_delta(residuals[:,:,o] \n , self.weights[i][o]\n , self.stride\n , self.in_val.shape[0])\n \n return gradient_x", "def backward(ctx, grad_output):\n inds, wgts = ctx.saved_tensors\n grad_inputs = trilinear_devoxelize_backward(grad_output.contiguous(),\n inds, wgts, ctx.r)\n return grad_inputs.view(grad_output.size(0), grad_output.size(1), ctx.r,\n ctx.r, ctx.r), None, None, None", "def conv_backward_naive(dout, cache):\n dx, dw, db = None, None, None\n #############################################################################\n # TODO: Implement the convolutional backward pass. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx, dw, db", "def backward_pass(self):\n self.K_matrix, self.k_vector = self.backward_pass_static(self.m, self.n, self.T, self.C_matrix, self.c_vector, self.F_matrix)\n return self.K_matrix, self.k_vector", "def backward(cls, grad_out, activated_out):\n raise Exception(\"Unimplemented\")", "def backward(cls, grad_out, activated_out):\n raise Exception(\"Unimplemented\")", "def backward(ctx, grad_from_upstream):\n grad_inputX = grad_weight = grad_bias = None\n\n print('Performing custom backward of MyConv2d')\n nOutCh, nInCh, nKnRows, nKnCols, padding, stride = ctx.parameters\n # inX_nSamp_nL_nB, kn_nB_nOutCh = ctx.saved_tensors\n\n # grad_out = torch.ones(out.shape, dtype=torch.float64) / out.numel()\n\n grad_bias = grad_from_upstream.sum(dim=[0, 2, 3]) # done for grad_bias\n\n grad_out_nSamp_nOutCh_nR_nC = grad_from_upstream\n\n # for: out_nSamp_nOutCh_nR_nC = out_nSamp_nOutCh_nL.reshape(nSamples, outCh, nOutRows, nOutCols)\n grad_out_nSamp_nOutCh_nL = grad_out_nSamp_nOutCh_nR_nC.reshape(ctx.out_nSamp_nOutCh_nL_shape)\n\n # for: out_nSamp_nOutCh_nL = out_nSamp_nL_nOutCh.transpose(1, 2)\n grad_out_nSamp_nL_nOutCh = grad_out_nSamp_nOutCh_nL.transpose(1, 2)\n\n # for: out_nSamp_nL_nOutCh = inX_nSamp_nL_nB.matmul(kn_nB_nOutCh)\n grad_inX_nSamp_nL_nB = grad_out_nSamp_nL_nOutCh.matmul(ctx.kn_nB_nOutCh.t())\n\n # continue to finish calculation of the gradient w.r.t \"weight\", i.e. the convolution kernel\n grad_kn_nB_nOutCh = ctx.inX_nSamp_nL_nB.transpose(1, 2).matmul(grad_out_nSamp_nL_nOutCh)\n grad_kn_nB_nOutCh = grad_kn_nB_nOutCh.sum(dim=0)\n grad_kn_nOutCh_nB = grad_kn_nB_nOutCh.t()\n grad_weight = grad_kn_nOutCh_nB.view(nOutCh, nInCh, nKnRows, nKnCols) # done for grad_weight\n\n # for: inX_nSamp_nL_nB = inX_nSamp_nB_nL.transpose(1, 2)\n grad_inX_nSamp_nB_nL = grad_inX_nSamp_nL_nB.transpose(1, 2)\n\n # for: inX_nSamp_nB_nL = torch.nn.functional.unfold(inputX, (ctx.nKnRows, ctx.nKnCols))\n grad_inputX = torch.nn.functional.fold(grad_inX_nSamp_nB_nL, ctx.InImgSize, (nKnRows, nKnCols),\n padding=padding, stride=stride)\n\n return grad_inputX, grad_weight, grad_bias, None", "def backward(self,y_out, y_truth):\n gradient = None\n\n ###########################################################################\n # TODO: #\n # Implement the backward pass. Return the gradient wrt y_out #\n ###########################################################################\n\n gradient = -1 * (np.multiply(y_truth, np.divide(1, y_out)) - np.multiply((1 - y_truth), np.divide(1, (1 - y_out))))\n \n ###########################################################################\n # END OF YOUR CODE #\n ########################################################################### \n return gradient", "def backward(self,y_out, y_truth):\n gradient = None\n ###########################################################################\n # TODO: #\n # Implement the backward pass. Return the gradient wrt y_out #\n # hint: you may use np.where here. #\n ###########################################################################\n\n gradient = np.where(not np.all(y_out==0), 0, 1.e-18)\n\n ###########################################################################\n # END OF YOUR CODE #\n ########################################################################### \n return gradient", "def backward(self, previous_grad, learning_rate):\n conv_grad = np.zeros(self.conv_filter.shape)\n for patch, i, j in self.image_patch(self.current_image):\n for k in range(self.num_filters):\n conv_grad[k] += patch*previous_grad[i,j,k]\n \n self.conv_filter -= learning_rate*conv_grad\n return conv_grad", "def backward(self, grad_output=None):\n # Compute dL/dP_j\n self.loss.backward(grad_output)\n dLdP = self.preds_enc.grad\n\n # Turn batched vector into batched matrix for matmul\n dLdP = dLdP.unsqueeze(-1)\n\n # Compute Jacobians wrt model weights\n if self.rank == self.feature_src:\n jacobians = self._compute_model_jacobians()\n\n # Populate parameter grad fields using Jacobians\n params = torch.nn.utils.parameters_to_vector(self.model.parameters())\n if self.rank == self.feature_src:\n jacobian = torch.cat(\n [jacobians[param] for param in self.model.parameters()], dim=0\n )\n else:\n jacobian_size = (params.numel(), dLdP.size(-2))\n jacobian = torch.empty(jacobian_size)\n\n jacobian = crypten.cryptensor(jacobian, src=self.feature_src)\n\n # Compute gradeints wrt each param\n while jacobian.dim() < dLdP.dim():\n jacobian = jacobian.unsqueeze(0)\n grad = jacobian.matmul(dLdP)\n grad = grad.view(-1, *(params.size()))\n\n # Compute DP noise\n if not self.rr_prob:\n # Determine noise generation function\n generate_noise = (\n self._generate_noise_from_src\n if self.noise_src\n else self._generate_noise_no_src\n )\n noise = generate_noise(params.size())\n grad += noise\n\n # Sum over batch dimension\n while grad.size() != params.size():\n grad = grad.sum(0)\n\n # Decrypt dL/dP_j * dP_j/dW_i with Differential Privacy\n grads = grad.flatten().get_plain_text(dst=self.feature_src)\n\n # Populate grad fields of parameters:\n if self.rank == self.feature_src:\n ind = 0\n for param in self.model.parameters():\n numel = param.numel()\n param.grad = grads[ind : ind + numel].view(param.size())\n ind += numel", "def rnn_backward(dh, cache):\n dx, dh_prev, dWx, dWh, db = None, None, None, None, None\n ##############################################################################\n # TODO: Implement the backward pass for a vanilla RNN running an entire #\n # sequence of data. You should use the rnn_step_backward function that you #\n # defined above. #\n ##############################################################################\n \"\"\"\n x, next_h, prev_h, Wx, Wh, b = cache\n dz = (1-next_h*next_h)*dnext_h\n # THIS ERROR IS SPREAD AMONG THE\n # np.dot(x, Wx) + np.dot(prev_h, Wh) + b)\n dx = np.dot(dz,Wx.T)\n dprev_h = np.dot(dz,Wh.T)\n db = np.sum(dz,axis=0)\n dWx = np.dot(x.T,dz)\n dWh = np.dot(prev_h.T,dz)\n #d(tanh) = 1- tanh*tanh\n \"\"\"\n #pdb.set_trace()\n # dh is not result of forward prop\n # but\n N,T,H = dh.shape\n tmp_x, tmp_next_h, tmp_prev_h, tmp_Wx, tmp_Wh, tmp_b = cache[T-1]\n D = tmp_x.shape[1]\n\n\n dx = np.zeros((N,T,D))\n dh_prev = np.zeros((N,H))\n dWx = np.zeros((D,H))\n dWh = np.zeros((H,H))\n db = np.zeros((H))\n\n for i in reversed(list(range(0,T))):\n # current gradient at timestep is the upstream gradient (provided as input)\n # this may be coming from the Y as in the min_char_rnn.py (see line 59)\n # + downstream gradient provided by rnn_step_backward.\n dh_curr = dh[:,i,:] + dh_prev\n dx_, dh_prev, dWx_, dWh_, db_ = rnn_step_backward(dh_curr, cache[i])\n dWx += dWx_\n dWh += dWh_\n db += db_\n dx[:,i,:]=dx_\n\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dx, dh_prev, dWx, dWh, db", "def conv_backward_naive(dout, cache):\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the convolutional backward pass. #\n ###########################################################################\n #Extract variables from cache.\n x,w,b,conv_param = cache\n stride = conv_param['stride']\n pad = conv_param['pad']\n #Extract shapes(lots of dimensions can become buggy)\n N,F,out_height,out_width = dout.shape\n #Save filter dimensions.\n HH,WW = w.shape[2],w.shape[3]\n #Start by computing gradient of the bias.(always the simplest one)\n db = np.sum(np.sum(np.sum(dout,axis = 3),axis = 2),axis = 0)\n dw = np.zeros_like(w)\n dx = np.zeros_like(x)\n #Start computing gradient of w and x.(Naive implementation)\n #Go over each filter in w.\n for i in range(F):\n #Go over each training example.\n for j in range(N):\n curr_x = x[j,:,:,:]\n #Get current gradient of activation map for j filter on i training example.\n curr_dout = dout[j,i,:,:]\n a = 0;b = 0\n #print(\"HERE\",curr_x.shape)\n #print(\"Stride:\",stride)\n for t in range(0,curr_x.shape[1] - WW + 1,stride):\n for k in range(0,curr_x.shape[2] - HH + 1,stride):\n #print(\"t: %d k: %d WW:%d HH:%d \" % (t,k,WW,HH))\n dw[i,:,:,:] += curr_dout[a,b] * curr_x[:,t:(t + WW),k:(k + HH)]\n dx[j,:,t:(t + WW),k:(k + HH)] += curr_dout[a,b] * w[i,:,:,:]\n if(b == dout.shape[3] - 1):\n a += 1\n b = 0\n else:\n b += 1\n #Remove padding.\n dx = dx[:,:,pad : (dx.shape[2] - pad),pad: (dx.shape[3] - pad)] \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input < 0] = 0\n return grad_input", "def conv_relu_pool_backward(dout, cache):\n conv_cache, relu_cache, pool_cache = cache\n ds = layers.max_pool_backward_naive(dout, pool_cache)\n da = layers.relu_backward(ds, relu_cache)\n dx, dw, db = layers.conv_backward_naive(da, conv_cache)\n return dx, dw, db", "def _backward(self, w=None):\n grad = self.w # Should be I * self.w . We keep a vector for simplicity\n\n # Left multiply input `w` with normalizer gradient\n return w * grad if w is not None else grad", "def forward_backward(self, data_batch):\n total_feature, total_label = self.forward(data_batch, is_train=True)\n self.backward_all(total_feature, total_label)", "def backward(self, gradwrtoutput):\n if Dropout.train_flag:\n gradin = torch.empty(self.data.shape).zero_()\n gradin[self.data > 0] = 1.0\n return gradin*gradwrtoutput\n return gradwrtoutput", "def backward(ctx, grad_output):\n \n grad_input = None # set output to None\n\n input, = ctx.saved_tensors\n if ctx.needs_input_grad[0]:\n grad_input = input.clone()\n\n return grad_input", "def backward(ctx, grad_output):\n if PROFILE:\n batch_tic = time.time()\n tic = time.time()\n timings = defaultdict(float)\n\n feats1, feats2, xxyy, batch_grid_u, params, pow = ctx.saved_tensors\n\n \"\"\"We needed to store the integers as part of a tensor, so the\n unpacking code here is a little convoluted.\"\"\"\n B, C, H, W, stride, norm = [x.item() for x in params]\n h, w = H, W\n pow = pow.item()\n\n \"\"\"This is a pattern that is very convenient - at the top of backward\n unpack saved_tensors and initialize all gradients w.r.t. inputs to\n None. Thanks to the fact that additional trailing Nones are\n ignored, the return statement is simple even when the function has\n optional inputs.\"\"\"\n grad_feats1 = grad_feats2 = grad_xxyy = grad_batch_u = None\n grad_stride = grad_norm = grad_pow = None\n\n \"\"\"Returning gradients for inputs that don't require it is\n not an error.\"\"\"\n assert ctx.needs_input_grad[0], \"expected feats1 to need grad\"\n assert ctx.needs_input_grad[1], \"expected feats2 to need grad\"\n assert not ctx.needs_input_grad[2], \"expected xxyy does not need grad\"\n assert not ctx.needs_input_grad[3], \"expected batch_grid_u does not need grad\"\n assert not ctx.needs_input_grad[4], \"expected stride does not need grad\"\n\n if PROFILE:\n timings[\"back-init\"] = time.time() - tic\n tic = time.time()\n\n with torch.no_grad():\n\n if feats1.is_cuda:\n # TODO: clean up types here\n if feats1.dtype == torch.float32:\n grad_feats1 = torch.cuda.FloatTensor(B, C, H, W).fill_(0)\n grad_feats2 = torch.cuda.FloatTensor(B, C, h, w).fill_(0)\n elif feats1.dtype == torch.float16:\n grad_feats1 = torch.cuda.HalfTensor(B, C, H, W).fill_(0)\n grad_feats2 = torch.cuda.HalfTensor(B, C, h, w).fill_(0)\n else:\n grad_feats1 = torch.zeros((B, C, H, W), dtype=feats1.dtype)\n grad_feats2 = torch.zeros((B, C, h, w), dtype=feats2.dtype)\n\n grad_loss = grad_output / (H * W * B)\n\n if PROFILE:\n timings[\"data transfer\"] = time.time() - batch_tic\n\n for b in range(B):\n\n if PROFILE:\n tic = time.time()\n\n with torch.no_grad():\n diff = batch_grid_u[b, :, :, None, None, :] - \\\n xxyy[None, None, ::stride, ::stride, :]\n diff = (diff * diff).sum(4).sqrt()\n diff = diff.pow(pow)\n\n if PROFILE:\n timings[\"diff-grid\"] += time.time() - tic\n tic = time.time()\n\n # loss gradient for the current minibatch element (expand to tensor)\n grad_loss_b = grad_loss\n grad_smcorr2 = grad_loss_b * diff\n\n if LOCAL_CHECKS:\n ones = torch.ones(diff.shape, dtype=diff.dtype)\n grad_loss_b_ = ones * grad_loss\n smcorr_ = torch.randn(\n diff.shape,\n dtype=torch.double,\n requires_grad=True)\n with torch.autograd.enable_grad():\n L_ = diff * smcorr_\n d_smcorr = torch.autograd.grad(\n outputs=L_,\n inputs=smcorr_,\n grad_outputs=grad_loss_b_,\n )\n rel_diff(grad_smcorr2, d_smcorr[0], \"smax\")\n if torch.any(torch.isnan(grad_smcorr2[0])):\n import ipdb; ipdb.set_trace()\n\n\n if PROFILE:\n timings[\"scale-feats\"] += time.time() - tic\n tic = time.time()\n\n # Re-compute intermediate values\n grad_smcorr2 = grad_smcorr2.view(H, W, -1)\n f1_ = feats1[b].view(C, H * W)\n f2_ = feats2[b].view(C, h * w)\n fa_ = feats1[(b + 1) % B].reshape(C, h * w) # auxiliary\n\n if norm:\n f1_norm = F.normalize(f1_, p=2, dim=0) * JDT_FACTOR\n f2_norm = F.normalize(f2_, p=2, dim=0) * JDT_FACTOR\n fa_norm = F.normalize(fa_, p=2, dim=0) * JDT_FACTOR\n else:\n f1_norm = f1_.clone()\n f2_norm = f2_.clone()\n fa_norm = fa_.clone()\n\n if PROFILE:\n timings[\"fwd-norm\"] += time.time() - tic\n tic = time.time()\n\n # Match the source features against the auxiliaries\n corr = torch.matmul(f1_norm.t(), fa_norm)\n corr = corr.reshape(H, W, h, w)\n\n if PROFILE:\n timings[\"f1-aux-correlation\"] += time.time() - tic\n tic = time.time()\n\n smcorr = F.softmax(corr.view(H, W, -1), dim=2)\n smcorr = smcorr.view(corr.shape)\n if LOCAL_CHECKS:\n # cache a copy of the mega tensor for numerical checks\n smcorr_fa = smcorr[None, ...] * fa_norm.view(-1, 1, 1, h, w)\n f1_via_fa = smcorr_fa.sum((3, 4))\n else:\n \"\"\"This is one of the largest tensors.....\"\"\"\n f1_via_fa = (smcorr[None, ...] *\n fa_norm.view(-1, 1, 1, h, w)).sum((3, 4))\n\n f1_via_fa = f1_via_fa.view(C, H * W)\n\n # Main correlation computation\n corr2 = torch.matmul(f1_via_fa.t(), f2_norm).view(corr.shape)\n\n # Direct backward pass for second softmax\n smcorr2 = F.softmax(corr2.view(H, W, -1), dim=2)\n sum_term = torch.sum(grad_smcorr2 * smcorr2, dim=2, keepdim=True)\n grad_corr2 = smcorr2 * (grad_smcorr2 - sum_term)\n\n if not LOCAL_CHECKS:\n del smcorr2\n\n if PROFILE:\n timings[\"softmax\"] += time.time() - tic\n tic = time.time()\n\n # safety checks\n if LOCAL_CHECKS:\n with torch.enable_grad():\n corr2_num = corr2.clone().requires_grad_()\n corr2_num = corr2_num.reshape(H, W, -1)\n smcorr2_num = F.softmax(corr2_num, dim=2)\n grad_corr2_num = torch.autograd.grad(\n outputs=smcorr2_num,\n inputs=(corr2_num,),\n grad_outputs=grad_smcorr2,\n )\n rel_diff(grad_corr2, grad_corr2_num[0], \"smax-corr2\")\n\n \"\"\"Derivatives through the main correlation correlation\"\"\"\n grad_corr2 = grad_corr2.view(H * W, H * W)\n grad_f1_via_fa = torch.matmul(grad_corr2, f2_norm.t()).t()\n grad_f2_norm = torch.matmul(f1_via_fa, grad_corr2)\n\n if not LOCAL_CHECKS:\n del grad_corr2\n\n if PROFILE:\n timings[\"corr-back\"] += time.time() - tic\n tic = time.time()\n\n if LOCAL_CHECKS:\n with torch.enable_grad():\n f1_via_fa_num = f1_via_fa.clone().requires_grad_()\n f2_norm_num = f2_norm.clone().requires_grad_()\n corr2_num = torch.matmul(f1_via_fa_num.t(), f2_norm_num)\n grad_f1_via_fa_num, grad_f2_norm_num = torch.autograd.grad(\n outputs=corr2_num,\n inputs=(f1_via_fa_num, f2_norm_num),\n grad_outputs=grad_corr2,\n )\n rel_diff(grad_f1_via_fa, grad_f1_via_fa_num,\n \"corr-f1-via-fa\")\n rel_diff(grad_f2_norm, grad_f2_norm_num,\n \"corr->f2-norm\")\n\n if OLD_METHOD:\n # (may be able to collapse all this later)\n grad_f1_via_fa = grad_f1_via_fa.view(-1, H, W, 1, 1)\n\n # This tensor is crashing the GPU\n grad_smcorr_fa = grad_f1_via_fa.repeat(1, 1, 1, h, w)\n\n # safety checks over the summation\n if LOCAL_CHECKS:\n with torch.enable_grad():\n\n smcorr_fa_num = smcorr_fa.clone().requires_grad_()\n f1_via_fa_num = smcorr_fa_num.sum((3, 4))\n # f1_via_fa_num = f1_via_fa_num.view(C, H * W)\n\n grad_smcorr_fa_num = torch.autograd.grad(\n outputs=f1_via_fa_num,\n inputs=(smcorr_fa_num,),\n grad_outputs=grad_f1_via_fa.view(-1, H, w),\n )\n rel_diff(grad_smcorr_fa, grad_smcorr_fa_num[0],\n \"summation of grad_smcorr-fa\")\n\n # smcorr_fa = smcorr[None, ...] * fa_.view(-1, 1, 1, h, w)\n grad_smcorr = (grad_smcorr_fa * fa_norm.view(-1, 1, 1, h, w)).sum(0)\n grad_fa_ = (grad_smcorr_fa * smcorr[None, ...]).sum(1).sum(1)\n grad_fa_ = grad_fa_.reshape(C, h * w)\n\n # safety checks over the weighted sum\n if LOCAL_CHECKS:\n with torch.enable_grad():\n\n smcorr_num = smcorr.clone().requires_grad_()\n fa_norm_num = fa_norm.clone().requires_grad_()\n smcorr_fa_num = smcorr_num[None, ...] \\\n * fa_norm_num.view(-1, 1, 1, h, w)\n\n (grad_smcorr_num, grad_fa_num) = torch.autograd.grad(\n outputs=smcorr_fa_num,\n inputs=(smcorr_num, fa_norm_num),\n grad_outputs=grad_smcorr_fa,\n )\n rel_diff(grad_fa_, grad_fa_num,\n \"product of grad_fa_\")\n rel_diff(grad_smcorr, grad_smcorr_num,\n \"product of grad_smcor\")\n else:\n # -------------------------------------------------------\n # Collapsed summation method\n # -------------------------------------------------------\n # Fwd ops ->\n # smcorr_fa = smcorr[None, ...] * fa.reshape(-1, 1, 1, h, w)\n # f1_via_fa = smcorr_fa.sum((3, 4)).reshape(C, H * w)\n\n # Given gradient ->\n # (grad_f1_via_fa)\n\n # Desired gradients ->\n # (grad_fa_, grad_smcorr)\n\n grad_f1_via_fa = grad_f1_via_fa.view(-1, H, W, 1, 1)\n\n # safety checks over the summation\n if LOCAL_CHECKS:\n # This tensor is crashing the GPU, so should only be\n # used for numerical checks\n grad_smcorr_fa = grad_f1_via_fa.repeat(1, 1, 1, h, w)\n with torch.enable_grad():\n\n smcorr_fa_num = smcorr_fa.clone().requires_grad_()\n f1_via_fa_num = smcorr_fa_num.sum((3, 4))\n # f1_via_fa_num = f1_via_fa_num.view(C, H * W)\n\n grad_smcorr_fa_num = torch.autograd.grad(\n outputs=f1_via_fa_num,\n inputs=(smcorr_fa_num,),\n grad_outputs=grad_f1_via_fa.view(-1, H, w),\n )\n rel_diff(grad_smcorr_fa, grad_smcorr_fa_num[0],\n \"summation of grad_smcorr-fa\")\n\n # Use for-loop over EVC dimension to avoid memory issues\n if feats1.is_cuda:\n if grad_f1_via_fa.dtype == torch.float64:\n grad_smcorr = torch.cuda.DoubleTensor(H, W, h, w).fill_(0)\n grad_fa_ = torch.cuda.DoubleTensor(C, h, w).fill_(0)\n else:\n grad_smcorr = torch.cuda.FloatTensor(H, W, h, w).fill_(0)\n grad_fa_ = torch.cuda.FloatTensor(C, h, w).fill_(0)\n else:\n grad_smcorr = torch.zeros((H, W, h, w), dtype=feats1.dtype)\n grad_fa_ = torch.zeros((C, h, w), dtype=feats1.dtype)\n\n for cc in range(C):\n grad_smcorr += (grad_f1_via_fa[cc] * fa_norm[cc].view(1, 1, h, w))\n grad_fa_[cc] = (grad_f1_via_fa[cc] * smcorr).sum((0, 1))\n grad_fa_ = grad_fa_.reshape(C, h * w)\n\n # safety checks over the weighted sum\n if LOCAL_CHECKS:\n with torch.enable_grad():\n\n smcorr_num = smcorr.clone().requires_grad_()\n fa_norm_num = fa_norm.clone().requires_grad_()\n smcorr_fa_num = smcorr_num[None, ...] \\\n * fa_norm_num.view(-1, 1, 1, h, w)\n\n (grad_smcorr_num, grad_fa_num) = torch.autograd.grad(\n outputs=smcorr_fa_num,\n inputs=(smcorr_num, fa_norm_num),\n grad_outputs=grad_smcorr_fa,\n )\n rel_diff(grad_fa_, grad_fa_num,\n \"product of grad_fa_\")\n rel_diff(grad_smcorr, grad_smcorr_num,\n \"product of grad_smcor\")\n\n if PRINT_MEM:\n key = None\n val = None\n shape_mems = {}\n for key, val in locals().items():\n if hasattr(val, \"shape\"):\n shape_mems[key] = estimate_mem(val)\n\n sorted_mems = sorted(shape_mems.items(), key=lambda kv: -kv[1])\n for key, val in sorted_mems:\n print(\"{}: {:.4f} GiB\".format(key, val))\n\n # Direct backward pass for first softmax\n # smcorr = F.softmax(corr.view(H, W, -1), dim=2)\n grad_smcorr = grad_smcorr.view(H, W, -1)\n smcorr = smcorr.view(H, W, -1)\n sum_term = torch.sum(grad_smcorr * smcorr, dim=2, keepdim=True)\n grad_corr = smcorr * (grad_smcorr - sum_term)\n\n if not LOCAL_CHECKS:\n del grad_smcorr\n del grad_smcorr2\n del smcorr\n del corr\n\n if LOCAL_CHECKS:\n with torch.enable_grad():\n corr_num = corr.clone().requires_grad_()\n smcorr_num = F.softmax(corr_num.view(H, W, -1), dim=2)\n smcorr_num = smcorr_num.reshape(corr_num.shape)\n grad_corr_num = torch.autograd.grad(\n outputs=smcorr_num,\n inputs=(corr_num,),\n grad_outputs=grad_smcorr.view(H, W, h, w),\n )\n rel_diff(grad_corr, grad_corr_num[0].view(H, W, -1),\n \"smax-corr\")\n\n # Back through the first correlation\n # [Fwd op] -> `corr = torch.matmul(f1_norm.t(), fa_norm)`\n grad_corr = grad_corr.view(H * W, h * w)\n grad_f1_norm = torch.matmul(grad_corr, fa_norm.t()).t()\n grad_fa_norm = torch.matmul(f1_norm, grad_corr)\n\n if not LOCAL_CHECKS:\n del grad_corr\n\n\n if LOCAL_CHECKS:\n with torch.enable_grad():\n f1_norm_num = f1_norm.clone().requires_grad_()\n fa_norm_num = fa_norm.clone().requires_grad_()\n corr_num = torch.matmul(f1_norm_num.t(), fa_norm_num)\n grad_f1_norm_num, grad_fa_norm_num = torch.autograd.grad(\n outputs=corr_num,\n inputs=(f1_norm_num, fa_norm_num),\n grad_outputs=grad_corr,\n )\n rel_diff(grad_f1_norm, grad_f1_norm_num, \"corr->f1n-orm\")\n rel_diff(grad_fa_norm, grad_fa_norm_num, \"corr->fa-norm\")\n\n # Combine gradients for two ops using aux features\n grad_fa_norm = grad_fa_norm + grad_fa_\n\n # Back through the norms\n # [Fwd op] -> `f1_norm = F.normalize(f1_, p=2, dim=0) * JDT_FACTOR`\n # [Fwd op] -> `f2_norm = F.normalize(f2_, p=2, dim=0) * JDT_FACTOR`\n # [Fwd op] -> `fa_norm = F.normalize(fa_, p=2, dim=0) * JDT_FACTOR`\n # xNorm = sqrt(sum(x.*x, 3) + opts.epsilon) ;\n\n if norm:\n f1_norm_val = torch.norm(f1_, p=2, dim=0).clamp(min=EPS)\n f2_norm_val = torch.norm(f2_, p=2, dim=0).clamp(min=EPS)\n fa_norm_val = torch.norm(fa_, p=2, dim=0).clamp(min=EPS)\n\n max_val_f1 = torch.max(f1_norm_val)\n max_val_f2 = torch.max(f2_norm_val)\n max_val_fa = torch.max(fa_norm_val)\n if max_val_f1 + max_val_f2 + max_val_fa > 1E8:\n import ipdb; ipdb.set_trace()\n\n grad_f1_norm_ = grad_f1_norm / f1_norm_val\n grad_f1 = JDT_FACTOR * (grad_f1_norm_ -\n (grad_f1_norm_ * f1_).sum(0) * (f1_ / (f1_norm_val ** 2)))\n\n grad_f2_norm_ = grad_f2_norm / f2_norm_val\n grad_f2 = JDT_FACTOR * (grad_f2_norm_ -\n (grad_f2_norm_ * f2_).sum(0) * (f2_ / (f2_norm_val ** 2)))\n\n grad_fa_norm_ = grad_fa_norm / fa_norm_val\n grad_fa = JDT_FACTOR * (grad_fa_norm_ -\n (grad_fa_norm_ * fa_).sum(0) * (fa_ / (fa_norm_val ** 2)))\n\n if LOCAL_CHECKS:\n with torch.enable_grad():\n f1_num = f1_.clone().requires_grad_()\n f2_num = f2_.clone().requires_grad_()\n fa_num = fa_.clone().requires_grad_()\n\n f1_norm_num = F.normalize(f1_num, p=2, dim=0) * JDT_FACTOR\n f2_norm_num = F.normalize(f2_num, p=2, dim=0) * JDT_FACTOR\n fa_norm_num = F.normalize(fa_num, p=2, dim=0) * JDT_FACTOR\n\n grad_f1_num = torch.autograd.grad(\n outputs=f1_norm_num,\n inputs=(f1_num,),\n grad_outputs=grad_f1_norm,\n )\n grad_f2_num = torch.autograd.grad(\n outputs=f2_norm_num,\n inputs=(f2_num,),\n grad_outputs=grad_f2_norm,\n )\n grad_fa_num = torch.autograd.grad(\n outputs=fa_norm_num,\n inputs=(fa_num,),\n grad_outputs=grad_fa_norm,\n )\n rel_diff(grad_f1, grad_f1_num[0], \"norm-f1\")\n rel_diff(grad_f2, grad_f2_num[0], \"norm-f2\")\n rel_diff(grad_fa, grad_fa_num[0], \"norm-fa\")\n else:\n grad_f1 = grad_f1_norm\n grad_f2 = grad_f2_norm\n grad_fa = grad_fa_norm\n\n\n if PRINT_MEM:\n key = None\n val = None\n shape_mems = {}\n print(\"=======================\")\n for key, val in locals().items():\n if hasattr(val, \"shape\"):\n shape_mems[key] = estimate_mem(val)\n\n sorted_mems = sorted(shape_mems.items(), key=lambda kv: -kv[1])\n for key, val in sorted_mems:\n print(\"{}: {:.4f} GiB\".format(key, val))\n import ipdb; ipdb.set_trace()\n\n\n # safety checks over the whole inner loop\n if LOCAL_CHECKS:\n with torch.enable_grad():\n\n f1_num = feats1[b].clone().detach().requires_grad_().reshape(C, H * W)\n f2_num = feats2[b].clone().detach().requires_grad_().reshape(C, h * w)\n fa_num = feats1[(b + 1) % B].clone().detach().requires_grad_().reshape(C, h * w)\n\n if norm:\n f1_norm_num = F.normalize(f1_num, p=2, dim=0) * JDT_FACTOR\n f2_norm_num = F.normalize(f2_num, p=2, dim=0) * JDT_FACTOR\n fa_norm_num = F.normalize(fa_num, p=2, dim=0) * JDT_FACTOR\n else:\n f1_norm_num = f1_num\n f2_norm_num = f2_num\n fa_norm_num = fa_num\n\n # BLock 1 ------------------------------------------\n corr_num = torch.matmul(f1_norm_num.t(), fa_norm_num)\n corr_num = corr_num.reshape(H, W, H, W)\n smcorr_num = F.softmax(corr_num.reshape(H, W, -1), dim=2)\n smcorr_num = smcorr_num.reshape(corr_num.shape)\n # BLock 1 ------------------------------------------\n\n\n # BLock 2 ------------------------------------------\n smcorr_fa_num = smcorr_num[None, ...] * \\\n fa_norm_num.reshape(-1, 1, 1, h, w)\n # BLock 2 ------------------------------------------\n\n\n # BLock 3 ------------------------------------------\n f1_via_fa_num = smcorr_fa_num.sum((3, 4)).reshape(C, H * W)\n # BLock 3 ------------------------------------------\n\n # BLock 4 ------------------------------------------\n corr2_num = torch.matmul(f1_via_fa_num.t(), f2_norm_num)\n corr2_num = corr2_num.reshape(corr_num.shape)\n smcorr2_num = F.softmax(corr2_num.reshape(H, W, -1), dim=2)\n smcorr2_num = smcorr2_num.reshape(corr_num.shape)\n # BLock 4 ------------------------------------------\n\n grad_f1_num, grad_fa_num, grad_f2_num = torch.autograd.grad(\n outputs=(smcorr2_num,),\n inputs=(f1_num, fa_num, f2_num),\n grad_outputs=(grad_smcorr2.view(corr_num.shape)),\n )\n\n rel_diff(grad_f1, grad_f1_num, \"df1_\")\n rel_diff(grad_f2, grad_f2_num, \"df2_\")\n rel_diff(grad_fa, grad_fa_num, \"dfa_\")\n\n \"\"\"Distribute the gradients back among the input tensor\n features that require them.\"\"\"\n grad_feats1[b] += grad_f1.reshape((C, H, W))\n grad_feats1[(b + 1) % B] += grad_fa.reshape((C, h, w))\n grad_feats2[b] += grad_f2.reshape((C, h, w))\n\n if PROFILE:\n timings[\"feat-assign\"] += time.time() - tic\n\n\n if LOCAL_CHECKS_INNER_LOOP:\n with torch.enable_grad():\n loss = 0.\n grad_loss_ = grad_loss * (H * W * B) # unscale\n for b in range(B):\n f1 = feats1[b].reshape(C, H * W) # source\n f2 = feats2[b].reshape(C, h * w) # target\n fa = feats1[(b + 1) % B].reshape(C, h * w) # auxiliary\n\n if norm:\n f1 = F.normalize(f1, p=2, dim=0) * JDT_FACTOR\n f2 = F.normalize(f2, p=2, dim=0) * JDT_FACTOR\n fa = F.normalize(fa, p=2, dim=0) * JDT_FACTOR\n\n corr = torch.matmul(f1.t(), fa)\n corr = corr.reshape(H, W, h, w)\n smcorr = F.softmax(corr.reshape(H, W, -1), dim=2).reshape(corr.shape)\n smcorr_fa = smcorr[None, ...] * fa.reshape(-1, 1, 1, h, w)\n # del smcorr\n\n f1_via_fa = smcorr_fa.sum((3, 4)).reshape(C, H * w)\n # del smcorr_fa\n\n corr2 = torch.matmul(f1_via_fa.t(), f2).reshape(corr.shape)\n smcorr2 = F.softmax(corr2.reshape(H, W, -1), dim=2).reshape(corr.shape)\n # del corr2\n\n with torch.no_grad():\n diff = batch_grid_u[b, :, :, None, None, :] - \\\n xxyy[None, None, ::stride, ::stride, :]\n diff = (diff * diff).sum(4).sqrt()\n diff = diff.pow(pow)\n L = diff * smcorr2\n loss += L.float().sum()\n\n loss = loss / (H * W * B)\n grad_f1_num, grad_f2_num = torch.autograd.grad(\n outputs=loss,\n inputs=(feats1, feats2),\n grad_outputs=grad_loss_,\n )\n\n rel_diff(grad_feats1, grad_f1_num, \"full-loop f2\")\n rel_diff(grad_feats2, grad_f2_num, \"full-loop f2\")\n\n if PROFILE:\n tic = time.time()\n\n if PRINT_MEM:\n key = None\n val = None\n shape_mems = {}\n for key, val in locals().items():\n if hasattr(val, \"shape\"):\n shape_mems[key] = estimate_mem(val)\n\n sorted_mems = sorted(shape_mems.items(), key=lambda kv: -kv[1])\n for key, val in sorted_mems:\n print(\"{}: {:.4f} GiB\".format(key, val))\n\n if PROFILE:\n timings[\"cleanup\"] += time.time() - tic\n\n if PROFILE:\n timings[\"minibatch\"] = time.time() - batch_tic\n print(\"==============\")\n total_ratios = 0\n for key in timings:\n ratio = 100 * timings[key] / timings[\"minibatch\"]\n msg = \"{:.3f} ({:.2f}%) >>> {}\"\n print(msg.format(timings[key], ratio, key))\n total_ratios += ratio\n msg = \"{:.3f}s >>> ratio total {}\"\n print(msg.format(timings[\"minibatch\"], total_ratios - 100))\n print(\"==============\")\n\n return (grad_feats1, grad_feats2, grad_xxyy, grad_batch_u,\n grad_stride, grad_norm, grad_pow)", "def backward(self, *output_grads):\n raise NotImplementedError", "def backward(ctx, grad_output):\n loss, reg, u, lbda = ctx.saved_tensors\n\n device = u.device\n\n # do clever computations\n eps = 1e-10\n grad, = torch.autograd.grad(loss, u, only_inputs=True,\n retain_graph=True)\n x = (u - eps * grad).data\n lbda = lbda.data\n\n prox_x = check_tensor(\n np.array([prox_tv.tv1_1d(xx, eps * lbda) for xx in x]),\n device=device,\n )\n grad_u = (u - prox_x) / eps\n grad_lbda = reg.clone()\n return (torch.ones(0), grad_u, grad_lbda)", "def _poputil_remap_deduce_layer_backward(op, grads):\n return grads", "def _AffLayerReluDrop_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-Batchnorm-relu} X (L-1)]\n dhid = dropout_backward(dhid, cache[i]['drop'])\n dhid = relu_backward(dhid, cache[i]['relu'])\n dhid, grads['gamma{0}'.format(i)], grads['beta{0}'.format(i)] = \\\n layernorm_backward(dhid, cache[i]['layernorm'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_backward(dhid, cache[i]['affine']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def fully_connected_backward(self, dUpper, cache):\n X, W, b = cache\n #############################################################################\n # TODO: Implement the affine backward pass. #\n #############################################################################\n dX = np.matmul(dUpper, W.T)\n dW = np.matmul(X.T, dUpper)\n db = np.sum(dUpper, axis=0)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dX, dW, db", "def conv_relu_backward_naive(dout, cache):\n\tconv_cache, relu_cache = cache\n\tda = relu_backward(dout, relu_cache)\n\tdx, dw, db = conv_backward_naive(da, conv_cache)\n\treturn dx, dw, db", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[torch.abs(input) > 1.001] = 0\n return grad_input", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[torch.abs(input) > 1.001] = 0\n return grad_input", "def linear_activation_backward(dA, AL, cache, activation):\n linear_cache, activation_cache = cache\n \n if activation == \"relu\":\n dZ = relu_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n \n elif activation == \"softmax\":\n dZ = softmax_backward(dA, AL, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n \n return dA_prev, dW, db", "def backward(self, grad_z):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\tgradient = self._layers[-1].backward(grad_z)\n\t\tfor i in range(len(self._layers) - 2, -1, -1):\n\t\t\tgradient = self._layers[i].backward(gradient)\n\t\treturn gradient\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n scores = None\n ############################################################################\n # Implementing the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n ############################################################################\n\n l_input = X.copy()\n out = []\n cache = []\n for i in range(self.num_layers - 1):\n # layerwise compute the forward pass and store outputs in out list\n key = ['W' + str(i+1), 'b' + str(i+1)]\n lout, lcache = affine_sigmoid_forward(l_input, self.params[key[0]], self.params[key[1]])\n out.append(lout)\n cache.append(lcache)\n l_input = lout\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n scores, lcache = affine_forward(out[self.num_layers - 2], self.params[key[0]], self.params[key[1]])\n cache.append(lcache)\n \n # regularization parameter compute by summing square of all weight vectors\n R = 0\n for i in range(1, self.num_layers + 1):\n key = 'W' + str(i)\n R += np.sum(np.power(self.params[key], 2))\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n\n ########################\n # Backward pass to compute the loss and gradients\n ########################\n\n loss, dscore = softmax_loss(scores, y)\n # Apply regularization of the loss \n loss = loss + 0.5 * self.reg * R\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n dx, grads[key[0]], grads[key[1]] = affine_backward(dscore, cache[self.num_layers - 1])\n grads[key[0]] += self.reg * self.params[key[0]] \n\n for i in range(self.num_layers - 1, 0, -1):\n key = ['W' + str(i), 'b' + str(i)]\n dx, grads[key[0]], grads[key[1]] = affine_sigmoid_backward(dx, cache[i-1])\n # Apply regularization to the gradients\n grads[key[0]] += self.reg * self.params[key[0]]\n\n return loss, grads", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def backprop(model, loss_function, optimizer, batch, device):\n model.train()\n model.to(device)\n optimizer.zero_grad()\n\n inputs, targets = batch[0], batch[1]\n\n inputs = inputs.to(device)\n targets = targets.to(device)\n\n outputs = model(inputs)\n loss = loss_function(outputs, targets)\n loss.backward()\n optimizer.step()", "def backward(self, input, grad_output):\r\n relu_grad = input > 0\r\n c = grad_output*relu_grad\r\n if c.ndim == 1:\r\n c = vector_conversion(c, 5)\r\n else:\r\n c = array_conversion(c, 5)\r\n return c", "def L_model_backward(AL, Y, caches):\n pass", "def backward_pass(self, delta):\n\n a = config['learning_rate']\n y = config['momentum_gamma']\n m = config['momentum']\n l = config['L2_penalty']\n\n # print(\"shape of delta incoming: \", delta.shape, \"shape of x: \", self.x.shape)\n self.d_x = delta.T @ self.x\n # print(\"SHAPE OF GRADIENT: \", self.d_x.shape)\n\n # gradient momentum\n self.w_inc = (a * self.d_x.T) + (y * self.d_v) - l * self.w\n \n # saving \n if m:\n self.d_v = self.w_inc\n else:\n self.d_v = np.zeros(self.w.shape)\n\n # backprop for bias weights\n x_0 = np.ones([len(delta), 1])\n\n self.d_b = delta.T @ x_0\n\n # print(\"shape of BIAS GRAD: \", self.d_b.shape)\n\n self.d_w = delta @ self.w.T\n # print(\"shape of w.T: \", self.w.T.shape, \"shape of RETURN delta: \", self.d_w.shape)\n #print(self.w.shape)\n return self.d_w", "def optimize(self, loss):\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()", "def backward_step(activations, targets, layers):\n param_grads = collections.deque() # List of parameter gradients for each layer\n output_grad = None # The error gradient at the output of the current layer\n # Propagate the error backwards through all the layers.\n # Use reversed to iterate backwards over the list of layers.\n for i, layer in enumerate(reversed(layers)):\n cur_layer_idx = len(layers) - i - 1\n if cur_layer_idx <= NUM_LAYERS_SKIP:\n # implement short circuit here\n if layer.is_fc_layer:\n grads = [0.0 for _ in range(layer.W.shape[0]*layer.W.shape[1]+layer.W.shape[1])]\n else:\n # normal gradient computation \n Y = activations.pop() # Get the activations of the last layer on the stack\n # Compute the error at the output layer.\n # The output layer error is calculated different then hidden layer error.\n if output_grad is None:\n input_grad = layer.get_input_grad(Y, targets)\n else: # output_grad is not None (layer is not output layer)\n input_grad = layer.get_input_grad(Y, output_grad)\n # Get the input of this layer (activations of the previous layer)\n X = activations[-1]\n # Compute the layer parameter gradients used to update the parameters\n grads = layer.get_params_grad(X, output_grad)\n param_grads.appendleft(grads)\n # Compute gradient at output of previous layer (input of current layer):\n output_grad = input_grad\n return list(param_grads) # Return the parameter gradients", "def backward(cls, grad_out, activated_out):\n new_grad = grad_out.copy()\n new_grad[activated_out == 0] = 0\n return new_grad", "def _layer_backprop(self, dZ, layer, use_relu=True):\n\n b = self.__getattribute__('b_'+layer)\n w = self.__getattribute__('w_' + layer)\n a = self.__getattribute__('a_' + layer)\n z_prev = self.__getattribute__('z_prev_' + layer)\n\n dA = np.array(dZ, copy=True)\n if use_relu:\n dA[a <= 0] = 0\n\n m = z_prev.shape[0]\n\n # correct mult\n dW = np.dot(dA.T, z_prev)/m\n db = np.sum(dA, axis=0, keepdims=True)/m\n dZ_prev = np.dot(dA, w)\n\n return dZ_prev, dW, db", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n\n sp = F.softplus(input)\n grad_sp = -torch.expm1(sp)\n\n tsp = F.tanh(sp)\n grad_tsp = (1 - tsp * tsp) * grad_sp\n grad = input * grad_tsp + tsp\n return grad", "def backward(self, upstream_grad):\n # couple upstream gradient with local gradient, the result will be sent back to the Linear layer\n self.dZ = upstream_grad * self.A*(1-self.A)", "def backward(self, bottom, top, propagate_down):\n loss = 0.\n top_diff = top[0].diff()\n bottom_data = bottom[0].data()\n # initialize the sub diff\n if propagate_down:\n bottom_diff = bottom[0].init_diff(setzero=False)\n for i in range(self._group):\n top_sub_diff = self._top_sub[i].init_diff(setzero=False)\n bottom_sub_data = self._bottom_sub[i].data()\n in_start = i * self._blocksize\n in_end = in_start + self._blocksize\n out_start = i * self._num_kernels\n out_end = out_start + self._num_kernels\n # Since the convolutional layers will need the input data,\n # we will need to provide them.\n bottom_sub_data[:] = bottom_data[:, :, :, in_start:in_end]\n top_sub_diff[:] = top_diff[:, :, :, out_start:out_end]\n loss += self._conv_layers[i].backward(\n [self._bottom_sub[i]], [self._top_sub[i]], propagate_down)\n if propagate_down:\n bottom_sub_diff = self._bottom_sub[i].init_diff(setzero=False)\n bottom_diff[:, :, :, in_start:in_end] = bottom_sub_diff\n return loss" ]
[ "0.75506145", "0.74210715", "0.708932", "0.7074831", "0.7008279", "0.69728214", "0.6948082", "0.69344884", "0.69324756", "0.6914606", "0.6914606", "0.687406", "0.68707025", "0.68107206", "0.67995775", "0.67714584", "0.67714584", "0.67669773", "0.67323184", "0.67095137", "0.67062384", "0.66799694", "0.6671418", "0.66630554", "0.66605073", "0.662886", "0.66233695", "0.6623193", "0.6623193", "0.6607942", "0.659628", "0.6555399", "0.6528165", "0.65276915", "0.65259445", "0.6513422", "0.650949", "0.6506214", "0.6505494", "0.65002334", "0.6499686", "0.64935666", "0.64788765", "0.6477079", "0.64746124", "0.64696926", "0.6455877", "0.6450522", "0.6421331", "0.6405127", "0.6405", "0.6390263", "0.6387402", "0.63705707", "0.63612664", "0.6344396", "0.6343048", "0.6332729", "0.63317436", "0.6322869", "0.632104", "0.632104", "0.63204587", "0.63183993", "0.6313032", "0.63061696", "0.63055646", "0.62994814", "0.6299233", "0.62914354", "0.6288193", "0.62852573", "0.6279225", "0.62736017", "0.6273171", "0.6265504", "0.62624645", "0.6261882", "0.6261273", "0.62499", "0.6242067", "0.62417424", "0.624104", "0.624104", "0.62392133", "0.62349963", "0.6215734", "0.6207363", "0.6207363", "0.62050897", "0.6203001", "0.62014604", "0.6196785", "0.61962193", "0.6194496", "0.61863774", "0.6180068", "0.61738145", "0.61711967", "0.6170491" ]
0.6278595
73
Computes the forward pass for a layer of rectified linear units (ReLUs).
def relu_forward(x): out = None ########################################################################### # TODO: Implement the ReLU forward pass. # ########################################################################### #out = np.zeros(x.shape) #np.clip(x, 0, None, out) out = np.empty_like(x) #faster than zeros np.clip(x, 0, None, out) #out = x #out [out < 0] = 0 #print(x) #print(out) ########################################################################### # END OF YOUR CODE # ########################################################################### cache = x return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_relus(self):\n\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model._features_extractor._modules.items():\n for layer in module:\n if isinstance(layer, LeakyReLU):\n layer.register_backward_hook(relu_backward_hook_function)\n layer.register_forward_hook(relu_forward_hook_function)", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.features._modules.items():\n if isinstance(module, ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.named_modules():\n if isinstance(module, ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.named_modules():\n if isinstance(module, nn.ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.encoder._modules.items():\n if isinstance(module, ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)", "def forward(self, x):\n res = self.residual(x)\n x = self.gcn(x)\n x = self.tcn(x) + res\n return self.relu(x)", "def L_model_forward(X, parameters):\n pass", "def relu_forward(x):\n ############################################################################\n # TODO: Implement the ReLU forward pass. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n out = x\n out[out<0] = 0\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return out", "def forward(self, x):\n assert not torch.isnan(x).any(), f\"NaN in input {x}\"\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = self.l3(x)\n return x", "def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def relu_forward(self, x):\n #out = None\n #############################################################################\n # TODO: Implement the ReLU forward pass. #\n #############################################################################\n out = np.array(x, copy=True)\n out[out <= 0] = 0\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = x\n return out, cache", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = x.copy()\n out[x<=0] = 0\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def forward(self, x):\n # Pass the input through all the layers apllying ReLU activation, but the last\n for layer in self.fc_layers[:-1]:\n x = F.relu(layer(x))\n # Pass the result through the output layer apllying hyperbolic tangent function\n x = torch.tanh(self.fc_layers[-1](x))\n # Return the better action for the input state\n return x", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = x * (x > 0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def forward(self, state):\n x = state\n for layer in self.linear_layers[:-1]:\n x = F.relu(layer(x))\n x = self.linear_layers[-1](x)\n return x", "def _relu(layer):\n return tf.nn.relu(layer)", "def affine_layernorm_relu_forward(x, w, b, gamma, beta, ln_param):\n fc_out, fc_cache = affine_forward(x, w, b)\n norm_out, norm_cache = layernorm_forward(fc_out, gamma, beta, ln_param)\n out, relu_cache = relu_forward(norm_out)\n cache = (fc_cache, norm_cache, relu_cache)\n return out, cache", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n out = np.where(x<=0, 0, x)\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def forward(self, x):\r\n y = self.en_fc1(x)\r\n y = F.relu(y)\r\n y = self.en_fc2(y)\r\n y = F.relu(y)\r\n y = self.en_fc3(y)\r\n y = F.relu(y)\r\n\r\n mean = self.en_mu(y)\r\n stddev_p = self.en_log(y)\r\n \r\n n = x.shape[0]\r\n z = torch.randn(n,self.latent_dim)\r\n std = torch.exp(stddev_p/2.0)\r\n z = z.mul(std) + mean\r\n \r\n xhat = self.de_fc1(z)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc2(xhat)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc3(xhat)\r\n xhat = F.sigmoid(xhat)\r\n \r\n return y,mean,stddev_p,z,xhat", "def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x.squeeze(0)", "def L_model_forward(X, parameters, use_batchnorm, dropout):\n layer_input = X\n caches = []\n dropout_cache = {} # cache for dropout layer\n batchnorm_cache = {} # cache for batchnorm layer\n num_layers = len([key for key in parameters.keys() if key.startswith('W')])\n use_dropout = dropout != 1\n\n for layer_idx in range(1, num_layers):\n W, b = parameters['W' + str(layer_idx)], parameters['b' + str(layer_idx)]\n layer_input, layer_cache, batchnorm_cache[layer_idx] = linear_activation_forward(layer_input, W, b, 'relu', use_batchnorm)\n caches.append(layer_cache)\n\n if use_dropout:\n layer_input, dropout_cache[layer_idx] = dropout_forward(layer_input, dropout)\n\n # last layer\n W, b = parameters['W' + str(num_layers)], parameters['b' + str(num_layers)]\n last_post_activation, layer_cache, _ = linear_activation_forward(layer_input, W, b, 'softmax', False)\n caches.append(layer_cache)\n\n return last_post_activation, caches, batchnorm_cache, dropout_cache", "def forward(self, x):\n assert not torch.isnan(x).any(), f\"NaN in input {x}\"\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = self.l3(x)\n return torch.clamp(x, -1, 1)", "def forward(self, x: torch.Tensor):\n x = self.linear1(x)\n x = torch.relu(x)\n x = self.linear2(x)\n x = self.dropout(x)\n return x", "def _layer_forward(self, z_prev, layer, use_relu=True):\n\n self.__dict__['z_prev_'+layer] = z_prev\n b = self.__getattribute__('b_'+layer)\n w = self.__getattribute__('w_'+layer)\n\n dim_out = w.shape[0]\n\n # simplification due to np broadcasting\n a = z_prev@w.T + b\n\n z = relu(a) if use_relu else a\n\n return (a, z)", "def test_rnnt_loss_basic_forward_no_grad(self):\n logits, targets, logit_lengths, target_lengths = rnnt_utils.get_basic_data(self.device)\n logits.requires_grad_(False)\n F.rnnt_loss(logits, targets, logit_lengths, target_lengths)", "def forward(self, x):\n x = self.first_deconv(x)\n x = self.first_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.second_deconv(x)\n x = self.second_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.third_deconv(x)\n x = self.third_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.fourth_deconv(x)\n x = self.fourth_batch_norm(x)\n\n x = self.fifth_deconv(x)\n x = self.fifth_batch_norm(x)\n\n x = self.sixth_deconv(x)\n x = self.sixth_batch_norm(x)\n\n x = self.seventh_deconv(x)\n\n # sigmoid_out = nn.functional.sigmoid(x)\n tanh_out = nn.functional.tanh(x)\n\n out = (tanh_out + 1) * 255 / 2\n\n # print 'out.shape =', out.shape\n\n return out", "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "def forward(self, x):\n return self.relu(self.conv(x))", "def relu_forward_hook_function(module, ten_in, ten_out):\n self.forward_relu_outputs.append(ten_out)", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def lrelu(self):\n return self.add_layer(lrelu)", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = np.maximum(0, x)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def _fc_relu(prev_layer, layer_name, weights, reuse_scope=False):\n\n with tf.name_scope(layer_name):\n if reuse_scope is False:\n w_np, b_np = _get_weights(layer_name, weights)\n\n with tf.variable_scope(layer_name):\n w = tf.get_variable('W', shape=tuple(w_np.shape),\n dtype=w_np.dtype, trainable=False,\n initializer=tf.constant_initializer(w_np))\n\n b = tf.get_variable('b', shape=tuple(b_np.shape),\n dtype=b_np.dtype, trainable=False,\n initializer=tf.constant_initializer(b_np))\n\n else:\n with tf.variable_scope(layer_name, reuse=True):\n w = tf.get_variable('W')\n b = tf.get_variable('b')\n return tf.nn.relu(tf.nn.bias_add(tf.matmul(prev_layer, w), b))", "def relu_grad(self, X):\n X[X<=0]=0\n X[X>0]=1\n return X", "def conv_relu_forward(x, w, b, conv_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n out, relu_cache = relu_forward(a)\n cache = (conv_cache, relu_cache)\n return out, cache", "def conv_relu_forward(x, w, b, conv_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n out, relu_cache = relu_forward(a)\n cache = (conv_cache, relu_cache)\n return out, cache", "def L_model_forward(X, parameters):\n L = len(parameters.items())//2\n caches = []\n A_prev = X.copy()\n for l in range(1, L):\n Wl = parameters['W'+str(l)]\n bl = parameters['b'+str(l)]\n A_prev, cache = linear_activation_forward(A_prev, Wl, bl, activation='relu')\n caches.append(cache)\n Al, cache = linear_activation_forward(A_prev, parameters['W'+str(L)], parameters['b'+str(L)], activation='sigmoid')\n caches.append(cache)\n return Al, caches", "def forward_propagation(X, parameters):\n L= len(parameters)//2\n AL = X\n for i in range(1,L):\n A_Prev = AL\n z = tf.add(tf.matmul(parameters[\"W\"+str(i)], A_Prev),parameters[\"b\"+str(i)]) \n z= tf.layers.batch_normalization (z,axis =0, center =True, scale = True, training= True)\n #z=BatchNormalization(z, is_training = True)\n AL = tf.nn.relu(z)\n z = tf.add(tf.matmul(parameters[\"W\" +str(L)],AL),parameters[\"b\"+str(L)])\n #z=BatchNormalization(z, is_training = True)\n z= tf.layers.batch_normalization (z, axis=0,center =True, scale = True, training= True)\n return z", "def forward(self, state):\r\n x = F.relu(self.linear1(state))\r\n x = F.relu(self.linear2(x))\r\n #x = torch.tanh(self.linear3(x))\r\n #x = F.relu(self.linear3(x))\r\n #x = nn.LeakyReLU(self.linear3(x), negative_slope=0.1)# .negativ_slope nur für leakyReLU relevant\r\n x = F.leaky_relu(self.linear3(x), 0.1)\r\n #x = F.softmax(self.linear3(x), dim=0)\r\n \r\n return x#.negativ_slope", "def forward(self, input_):\n out = self.fc(input_)\n out = self.bn(out)\n out = self.relu(out)\n return torch.cat([out, input_], dim=1)", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = np.maximum(x,0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def forward(self, parameters: List[Tuple[str, nn.Parameter]]) -> torch.Tensor:\n # calculate regularized loss \n reg_loss = regularize(parameters, self.weight_decay, self.norm)\n\n return reg_loss", "def forward(self, x):\n l1 = self.dropout(F.relu(self.linear1(x)))\n l2 = self.linear2(l1)\n return l2", "def forward(self, x): \n # Layer 1\n x = F.elu(self.conv1(x)) # bsize x l1_channels x 1 x Nsamples\n x = self.batchnorm1(x)\n x = F.dropout(x, 0.25)\n x = x.permute(0, 2, 1, 3) # bsize x 1 x l1_channels x Nsamples\n\n # Layer 2\n x = self.padding1(x)\n x = F.elu(self.conv2(x)) # bsize x l2_channels x l1_channels x Nsamples\n x = self.batchnorm2(x) \n x = F.dropout(x, 0.25)\n x = self.pooling2(x) # bsize x l2_channels x floor(l1_channels/2) x floor(Nsamples/4)\n\n # Layer 3\n x = self.padding2(x)\n x = F.elu(self.conv3(x)) # bsize x l3_channels x floor(l1_channels/2) x floor(Nsamples/4)\n x = self.batchnorm3(x)\n x = F.dropout(x, 0.25)\n x = self.pooling3(x) # bsize x l3_channels x floor(l1_channels/4) x floor(Nsamples/16)\n\n # Fully-connected Layer\n x = x.view(-1, self.fc1.in_features) # bsize x (l3_channels*floor(l1_channels/4)*floor(Nsamples/16))\n x = F.sigmoid(self.fc1(x)) # bisze x self.fc1.out_features \n \n if self.fc1.out_features == 1:\n x = x.view(-1) # bsize (1D if 1 output unit)\n \n return x", "def forward(self, \n z_prev : torch.Tensor, \n rnn_input : torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n hidden = self.relu(self.lin_z_to_hidden(z_prev) + self.lin_rnn_to_hidden(rnn_input))\n loc = self.hidden_to_loc(hidden)\n scale = F.softplus(self.hidden_to_scale(hidden))\n return loc, scale", "def forward(self, x):\n\n def run0(x, dummy):\n lout1 = self.lconv1(x)\n out1 = self.conv1(lout1)\n lout2 = self.lconv2(out1 + lout1)\n out2 = self.conv2(lout2)\n lout3 = self.lconv3(out2 + lout2)\n out3 = self.conv3(lout3)\n lout4 = self.lconv4(out3 + lout3)\n out4 = self.conv4(lout4)\n lout5 = self.lconv5(out4 + lout4)\n out5 = self.conv5(lout5)\n lout6 = self.lconv6(out5 + lout5)\n out6 = self.conv6(lout6)\n lout7 = self.lconv7(out6 + lout6)\n out7 = self.conv7(lout7)\n mat = out7[:, :, :, None] + out7[:, :, None, :]\n cur = mat\n if self.num_1d:\n output1d = self.final_1d(out7)\n return cur, output1d\n else:\n return cur\n\n dummy = torch.Tensor(1)\n dummy.requires_grad = True\n if self.num_1d:\n cur, output1d = checkpoint(run0, x, dummy)\n else:\n cur = checkpoint(run0, x, dummy)\n\n def run1(cur):\n first = True\n for lm, m in zip(self.lconvtwos[:7], self.convtwos[:7]):\n if first:\n cur = lm(cur)\n\n first = False\n else:\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run2(cur):\n for lm, m in zip(self.lconvtwos[7:13], self.convtwos[7:13]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run3(cur):\n for lm, m in zip(self.lconvtwos[13:], self.convtwos[13:]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n\n cur = self.final(cur)\n cur = 0.5 * cur + 0.5 * cur.transpose(2, 3)\n return cur\n\n cur = checkpoint(run1, cur)\n cur = checkpoint(run2, cur)\n cur = checkpoint(run3, cur)\n\n if self.num_1d:\n return cur, output1d\n else:\n return cur", "def training_step(self, x):\n self.train()\n rec_error, feat, y = self.forward(x)\n # Reconstruction Loss\n rec_loss = torch.mean(rec_error)\n loss = rec_loss\n\n self.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.eval()\n print('Rec Loss: {}'.format(rec_loss.cpu().data))\n print()\n return loss, feat, y", "def forward(self, x):\n\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out", "def forward(self, state):#forward pass\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return torch.tanh(self.fc3(x))", "def training_step(self, x):\n self.train() # Sets network to train mode\n rec_error, feat, y = self.forward(x)\n # Reconstruction Loss\n rec_loss = torch.mean(rec_error)\n loss = rec_loss\n\n self.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.eval() # Sets network to evaluation mode\n print('Rec Loss: {}'.format(rec_loss.cpu().data))\n print()\n return loss, feat, y", "def forward(self, x):\n lay1 = self.linear1(x)\n lay1 = nn.functional.relu(lay1)\n\n lay2 = self.linear2(lay1)\n lay2 = nn.functional.relu(lay2)\n \n lay3_1 = self.linear3_1(lay2)\n lay3_1 = nn.functional.relu(lay3_1)\n\n ## CHECK HERE TOO!!!\n out_1 = self.linear4_1(lay3_1)\n out_1 = out_1.view(-1, ) # reshape it to a 1d-array\n \n # taken care by BCEWithLogitsLoss\n # out_1 = nn.functional.softmax(out_1, dim=0) \n \n lay3_2 = self.linear3_2(lay2)\n lay3_2 = nn.functional.relu(lay3_2)\n \n out_2 = self.linear4_2(lay3_2)\n \n return out_1, out_2", "def forward(self, state):\n x = F.relu(self.linear1(state))\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n return x", "def forward(self, x):\n # x = state\n \n x = F.relu(self.input(x))\n x = self.output(x)\n \n return x", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward(self, x):\n # define feedforward behavior, applying activations as necessary\n out = self.leaky_relu(self.conv1(x))\n out = self.leaky_relu(self.conv2(out))\n out = self.leaky_relu(self.conv3(out))\n out = self.leaky_relu(self.conv4(out))\n\n out = self.res_blocks(out)\n\n out = self.leaky_relu(self.deconv1(out))\n out = self.leaky_relu(self.deconv2(out))\n out = self.leaky_relu(self.deconv3(out))\n\n # tanh applied to last layer\n out = F.tanh(self.out_layer(out))\n out = torch.clamp(out, min=-0.5, max=0.5)\n\n return out", "def forward(self, x, h_last=None):\n\n # forward pass of the RNNLM network\n\n embedding = self.emb(x)\n y, h = self.gru(embedding, h_last)\n out = self.linear(y)\n return out, h", "def forward(self,x):\n x = x.transpose(1,2).contiguous()\n x = F.leaky_relu(self.fc1(x), 0.2)\n x = F.leaky_relu(self.bn2(self.fc2(x)), 0.2)\n x = F.leaky_relu(self.bn3(self.fc3(x)), 0.2)\n x = torch.sigmoid(self.fc4(x))\n return x.transpose(1,2)", "def _register_relu_hooks(self):\n\n # Save forward propagation output of the ReLU layer\n def _record_output(module, input_, output):\n self.relu_outputs.append(output)\n\n def _clip_gradients(module, grad_in, grad_out):\n # keep positive forward propagation output\n relu_output = self.relu_outputs.pop()\n relu_output[relu_output > 0] = 1\n\n # keep positive backward propagation gradient\n positive_grad_out = torch.clamp(grad_out[0], min=0.0)\n\n # generate modified guided gradient\n modified_grad_out = positive_grad_out * relu_output\n\n return (modified_grad_out, )\n\n for _, module in self.model.named_modules():\n if isinstance(module, nn.ReLU):\n forward_handle = module.register_forward_hook(_record_output)\n backward_handle = module.register_backward_hook(_clip_gradients)\n self.handle.append(forward_handle)\n self.handle.append(backward_handle)", "def relu(input, inplace=False):\n return FunctionLib.apply(\n 'Relu', input.device, [input],\n outputs=[input if inplace else None], alpha=0.)", "def linear_forward_model(self, shot, m0, m1, frequencies, return_parameters=[]):\n\n # Sanitize the input\n if not np.iterable(frequencies):\n frequencies = [frequencies]\n\n # Local references\n solver = self.solver\n solver.model_parameters = m0 # this updates dt and the number of steps so that is appropriate for the current model\n\n mesh = solver.mesh\n\n d = solver.domain\n dt = solver.dt\n nsteps = solver.nsteps\n source = shot.sources\n\n m1_padded = m1.with_padding()\n\n # Storage for the field\n u1hats = dict()\n for nu in frequencies:\n u1hats[nu] = 0.0\n\n # Setup data storage for the forward modeled data\n if 'simdata' in return_parameters:\n simdata = dict()\n\n # Setup data storage for the forward modeled data (in time, if it is needed, and it frequently is)\n if 'simdata_time' in return_parameters:\n simdata_time = np.zeros((solver.nsteps, shot.receivers.receiver_count))\n\n # Storage for the time derivatives of p\n if 'dWaveOp0' in return_parameters:\n dWaveOp0 = dict()\n u0hats = dict()\n for nu in frequencies:\n dWaveOp0[nu] = 0.0\n u0hats[nu] = 0.0\n\n # Storage for the time derivatives of p\n if 'dWaveOp1' in return_parameters:\n dWaveOp1 = dict()\n for nu in frequencies:\n dWaveOp1[nu] = 0.0\n\n subsample_indices = self._compute_subsample_indices(frequencies)\n\n # Step k = 0\n # p_0 is a zero array because if we assume the input signal is causal\n # and we assume that the initial system (i.e., p_(-2) and p_(-1)) is\n # uniformly zero, then the leapfrog scheme would compute that p_0 = 0 as\n # well. ukm1 is needed to compute the temporal derivative.\n solver_data = solver.SolverData()\n\n # (***) Given that these modeling tools are for frequency methods, we do not\n # have the time derivatives / wave operator derivatives (aka dWaveOp) in\n # time available. This saves space, but as a result we have to recompute\n # it.\n # Also, because implicit and some ODE methods require uhat_1 at times k\n # and k+1, we need uhat_0 at k, k+1, and k+2, so all of this rigamaroll\n # is to get that.\n solver_data_u0 = solver.SolverData()\n\n # For u0, set up the right hand sides\n rhs_u0_k = np.zeros(mesh.shape(include_bc=True))\n rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))\n rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f(0*dt))\n rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f(1*dt))\n\n # compute u0_kp1 so that we can compute dWaveOp0_k (needed for u1)\n solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)\n\n # compute dwaveop_0 (k=0) and allocate space for kp1 (needed for u1 time step)\n dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)\n dWaveOp0_kp1 = dWaveOp0_k.copy()\n\n solver_data_u0.advance()\n # from here, it makes more sense to refer to rhs_u0 as kp1 and kp2, because those are the values we need\n # to compute u0_kp2, which is what we need to compute dWaveOp0_kp1\n rhs_u0_kp1, rhs_u0_kp2 = rhs_u0_k, rhs_u0_kp1 # to reuse the allocated space and setup the swap that occurs a few lines down\n\n for k in range(nsteps):\n\n uk = solver_data.k.primary_wavefield\n uk_bulk = mesh.unpad_array(uk)\n\n t = k*dt\n\n # Record the data at t_k\n if 'simdata_time' in return_parameters:\n shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata_time)\n\n for nu in frequencies:\n idx = subsample_indices[nu]\n if np.mod(k, idx) == 0:\n u1hats[nu] += uk*(np.exp(-1j*2*np.pi*nu*t)*dt*idx)\n\n if 'dWaveOp0' in return_parameters:\n for nu in frequencies:\n idx = subsample_indices[nu]\n if np.mod(k, idx) == 0:\n u0hats[nu] += solver_data_u0.k.primary_wavefield*(np.exp(-1j*2*np.pi*nu*t)*dt*idx)\n\n # Note, we compute result for k+1 even when k == nsteps-1. We need\n # it for the time derivative at k=nsteps-1.\n\n # See comment (***) above.\n # compute u0_kp2 so we can get dWaveOp0_kp1 for the rhs for u1\n rhs_u0_kp1, rhs_u0_kp2 = rhs_u0_kp2, rhs_u0_kp1\n rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f((k+2)*dt))\n solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)\n\n # shift the dWaveOp0's (ok at k=0 because they are equal then)\n # The derivative component is computed after the time step so that\n # information from time k+1 can be used to compute the derivative.\n dWaveOp0_k, dWaveOp0_kp1 = dWaveOp0_kp1, dWaveOp0_k\n dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)\n\n solver_data_u0.advance()\n\n if k == 0:\n rhs_k = m1_padded*(-1*dWaveOp0_k)\n rhs_kp1 = m1_padded*(-1*dWaveOp0_kp1)\n else:\n rhs_k, rhs_kp1 = rhs_kp1, m1_padded*(-1*dWaveOp0_kp1)\n\n solver.time_step(solver_data, rhs_k, rhs_kp1)\n\n # When k is the nth step, the next step is uneeded, so don't swap\n # any values. This way, uk at the end is always the final step\n if(k == (nsteps-1)): break\n\n # Don't know what data is needed for the solver, so the solver data\n # handles advancing everything forward by one time step.\n # k-1 <-- k, k <-- k+1, etc\n solver_data.advance()\n\n # Compute time derivative of p at time k\n if 'dWaveOp0' in return_parameters:\n for nu in frequencies:\n dWaveOp0[nu] = solver.compute_dWaveOp('frequency', u0hats[nu],nu)\n\n # Compute time derivative of p at time k\n if 'dWaveOp1' in return_parameters:\n for nu in frequencies:\n dWaveOp1[nu] = solver.compute_dWaveOp('frequency', u1hats[nu],nu)\n\n # Record the data at t_k\n if 'simdata' in return_parameters:\n for nu in frequencies:\n simdata[nu] = shot.receivers.sample_data_from_array(mesh.unpad_array(u1hats[nu]))\n\n retval = dict()\n\n if 'dWaveOp0' in return_parameters:\n retval['dWaveOp0'] = dWaveOp0\n if 'wavefield1' in return_parameters:\n _u1hats = dict()\n _u1hats = {nu: mesh.unpad_array(u1hats[nu], copy=True) for nu in frequencies}\n retval['wavefield1'] = _u1hats\n if 'dWaveOp1' in return_parameters:\n retval['dWaveOp1'] = dWaveOp1\n if 'simdata' in return_parameters:\n retval['simdata'] = simdata\n if 'simdata_time' in return_parameters:\n retval['simdata_time'] = simdata_time\n\n return retval", "def forward(self, x):\n residues = []\n # Downward Pass\n x = self.layers[0](x.unsqueeze(1))\n for layer in self.layers[1:self.half]:\n x = layer(x)\n residues.insert(0, x)\n\n # Upward Pass\n for idx, layer in enumerate(self.layers[self.half:(len(self.layers)-1)]):\n x = layer(x, residues[idx])\n x = self.layers[-1](x)\n\n return(x)", "def forward(self, state):\n '''\n state = F.relu(self.conv1(state))\n state = F.relu(self.conv2(state))\n state = F.relu(self.conv3(state))\n state = F.relu(self.fc1(state))\n \n action = F.relu(self.fc2(state))\n \n return action\n '''\n \n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n \n return x", "def forward(self, x):\n\n x = F.max_pool2d(F.relu(self.batch_norm1(self.conv1(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm2(self.conv2(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm3_b(self.conv3_b(F.relu(self.batch_norm3_a(self.conv3_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm4_b(self.conv4_b(F.relu(self.batch_norm4_a(self.conv4_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm5_b(self.conv5_b(F.relu(self.batch_norm5_a(self.conv5_a(x)))))), 3, stride=2, padding=1)\n x = self.avg_pool(x).view(-1,512)\n out = self.linear(x)\n\n return out", "def forward(self, x, h, u, time, feat_kernels_enc_conv, feat_bias_enc_conv, feat_kernels_enc_fc, feat_bias_enc_fc, feat_kernels_enc_3dgru, feat_bias_enc_3dgru):\n\n\n conv1a_wt,conv1b_wt,conv2a_wt,conv2b_wt,conv2c_wt,conv3a_wt,conv3b_wt,conv3c_wt,conv4a_wt,conv4b_wt,conv5a_wt,conv5b_wt,conv5c_wt,conv6a_wt,conv6b_wt = feat_kernels_enc_conv\n conv1a_bias,conv1b_bias,conv2a_bias,conv2b_bias,conv2c_bias,conv3a_bias,conv3b_bias,conv3c_bias,conv4a_bias,conv4b_bias,conv5a_bias,conv5b_bias,conv5c_bias,conv6a_bias,conv6b_bias = feat_bias_enc_conv\n t_x_s_update_fc_layer, t_x_s_update_conv3d, t_x_s_reset_fc_layer, t_x_s_reset_conv3d, t_x_rs_fc_layer, t_x_rs_conv3d = feat_kernels_enc_3dgru\n t_x_s_update_bias, t_x_s_reset_bias, t_x_rs_bias = feat_bias_enc_3dgru\n\n conv1a = F.conv2d(x, conv1a_wt, bias=conv1a_bias, padding=3) #self.conv1a(x)\n rect1a = self.leaky_relu(conv1a)\n conv1b = F.conv2d(rect1a, conv1b_wt, bias=conv1b_bias, padding=1) #self.conv1b(rect1a)\n rect1 = self.leaky_relu(conv1b)\n pool1 = self.pool(rect1)\n \n \n conv2a = F.conv2d(pool1, conv2a_wt, bias=conv2a_bias, padding=1) #self.conv2a(pool1)\n rect2a = self.leaky_relu(conv2a)\n conv2b = F.conv2d(rect2a, conv2b_wt, bias=conv2b_bias, padding=1) #self.conv2b(rect2a)\n rect2 = self.leaky_relu(conv2b)\n conv2c = F.conv2d(pool1, conv2c_wt, bias=conv2c_bias) #self.conv2c(pool1)\n res2 = conv2c + rect2\n pool2 = self.pool(res2)\n \n \n conv3a = F.conv2d(pool2, conv3a_wt, bias=conv3a_bias, padding=1) #self.conv3a(pool2)\n rect3a = self.leaky_relu(conv3a)\n conv3b = F.conv2d(rect3a, conv3b_wt, bias=conv3b_bias, padding=1) #self.conv3b(rect3a)\n rect3 = self.leaky_relu(conv3b)\n conv3c = F.conv2d(pool2, conv3c_wt, bias=conv3c_bias) #self.conv3c(pool2)\n res3 = conv3c + rect3\n pool3 = self.pool(res3)\n \n conv4a = F.conv2d(pool3, conv4a_wt, bias=conv4a_bias, padding=1) #self.conv4a(pool3)\n rect4a = self.leaky_relu(conv4a)\n conv4b = F.conv2d(rect4a, conv4b_wt, bias=conv4b_bias, padding=1) #self.conv4b(rect4a)\n rect4 = self.leaky_relu(conv4b)\n pool4 = self.pool(rect4)\n \n \n conv5a = F.conv2d(pool4, conv5a_wt, bias=conv5a_bias, padding=1) #self.conv5a(pool4)\n rect5a = self.leaky_relu(conv5a)\n conv5b = F.conv2d(rect5a, conv5b_wt, bias=conv5b_bias, padding=1) #self.conv5b(rect5a)\n rect5 = self.leaky_relu(conv5b)\n conv5c = F.conv2d(pool4, conv5c_wt, bias=conv5c_bias) #self.conv5c(pool4)\n res5 = conv5c + rect5\n pool5 = self.pool(res5)\n \n \n conv6a = F.conv2d(pool5, conv6a_wt, bias=conv6a_bias, padding=1) #self.conv6a(pool5)\n rect6a = self.leaky_relu(conv6a)\n conv6b = F.conv2d(rect6a, conv6b_wt, bias=conv6b_bias, padding=1) #self.conv6b(rect6a)\n rect6 = self.leaky_relu(conv6b)\n res6 = pool5 + rect6\n pool6 = self.pool(res6)\n \n \n pool6 = pool6.view(pool6.size(0), -1)\n \n \n fc7 = F.linear(pool6, feat_kernels_enc_fc[0], bias=feat_bias_enc_fc[0]) #self.fc7(pool6)\n rect7 = self.leaky_relu(fc7)\n \n t_x_s_update = self.t_x_s_update(rect7, h, t_x_s_update_fc_layer, t_x_s_update_conv3d, t_x_s_update_bias)\n t_x_s_reset = self.t_x_s_reset(rect7, h, t_x_s_reset_fc_layer, t_x_s_reset_conv3d, t_x_s_reset_bias)\n \n update_gate = self.sigmoid(t_x_s_update)\n complement_update_gate = 1 - update_gate\n reset_gate = self.sigmoid(t_x_s_reset)\n \n rs = reset_gate * h\n t_x_rs = self.t_x_rs(rect7, rs, t_x_rs_fc_layer, t_x_rs_conv3d, t_x_rs_bias)\n tanh_t_x_rs = self.tanh(t_x_rs)\n \n gru_out = update_gate * h + complement_update_gate * tanh_t_x_rs\n \n return gru_out, update_gate", "def forward(self, x: torch.Tensor, context: torch.Tensor = None) -> torch.Tensor:\n residual = x\n x = self.norm(x)\n x = self.att(x, context) + residual # residual post-layernorm\n\n return x", "def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out += residual\n\n residual = out\n out = self.conv2(out)\n out = self.bn2(out)\n out += residual\n\n residual = out\n out = self.conv3(out)\n out = self.bn3(out)\n out += residual\n if self.apply_activation: out = self.relu(out)\n return out", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n identity = x\n\n out = self.conv_reduce(x)\n out = self.relu(self.bn_reduce(out))\n\n out = self.conv_conv(out)\n out = self.relu(self.bn(out))\n\n out = self.conv_expand(out)\n out = self.bn_expand(out)\n\n res = self.shortcut(identity)\n return self.relu(res + out)", "def _AffLayerRelu_Loss(self, X):\n cache = [None]\n hid = X\n for i in range(1, self.num_layers): #hidden layers\n hidcache = {} \n thisW, thisb = self.params['W{0}'.format(i)], self.params['b{0}'.format(i)]\n hid, hidcache['affine'] = affine_forward(hid, thisW, thisb)\n thisbeta, thisgamma = self.params['beta{0}'.format(i)], self.params['gamma{0}'.format(i)]\n hid, hidcache['layernorm'] = layernorm_forward(hid, thisgamma, thisbeta, self.bn_params[i-1])\n hid, hidcache['relu'] = relu_forward(hid)\n cache.append(hidcache)\n #last affine\n thisW, thisb = self.params['W{0}'.format(self.num_layers)], self.params['b{0}'.format(self.num_layers)]\n scores, hidcache = affine_forward(hid, thisW, thisb)\n cache.append(hidcache)\n return cache, scores", "def backward_linear(Data):\n\n # data\n graph = tf.Graph()\n X = Data[0]\n Y_ = Data[1]\n\n if X.shape[0] != Y_.shape[0]:\n raise Exception(\"The quantity of Input X and Compare Y_ are not same!\")\n\n Loss = []\n with graph.as_default():\n print(\"This is the process of all the Dose!\")\n print(\"There are %d data in this process.\" % X.shape[0])\n print(\"Features of X: %d\" % X.shape[1])\n print(\"Learning rate is: %f\" % learning_rate)\n # Init all the parameters\n global_step = tf.Variable(0, trainable=False)\n\n STEPS = int(Epoch * X.shape[0] / BATCH_SIZE) + 1\n epoch = 0\n\n with tf.name_scope('inputs'):\n x = tf.placeholder(tf.float32, [None, Forward.INPUT_NODE], name='x_Input')\n y_ = tf.placeholder(tf.float32, [None, Forward.OUTPUT_NODE], name='y_Exact')\n y = Forward.forward_linear(x, regularizer=None)\n\n # lost function\n with tf.name_scope('loss'):\n loss_mse = tf.reduce_mean(tf.square(y - y_))\n loss = loss_mse + tf.add_n(tf.get_collection(\"losses\"))\n tf.summary.scalar('loss', loss)\n\n # Todo\n # LM algorithm\n\n # learning_rate = tf.train.exponential_decay(\n # LEARNING_RATE_BASE,\n # global_step,\n # X.shape[0] / BATCH_SIZE,\n # LEARNING_RATE_DECAY,\n # staircase=True\n # )\n\n with tf.name_scope('train'):\n # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step)\n # train_step = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(loss, global_step)\n train_step = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.99).minimize(loss, global_step)\n\n # EMA algorithm\n ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)\n ema_op = ema.apply(tf.trainable_variables())\n with tf.control_dependencies([train_step, ema_op]):\n train_op = tf.no_op(name='train')\n\n # ready for storing the model\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n init_op = tf.global_variables_initializer()\n sess.run(init_op)\n\n # Get the check point\n ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH_LINEAR)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n # begin multi threads\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess, coord)\n print(\"Begin the multi threads!\")\n\n # Graph\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(\"./logs_linear/\", sess.graph)\n\n # Training\n for i in range(STEPS):\n start = (i * BATCH_SIZE) % int(X.shape[0])\n end = start + BATCH_SIZE\n # if finish all the data\n if end >= X.shape[0]:\n end = X.shape[0]\n\n _, loss_value, step = sess.run([train_op, loss, global_step],\n feed_dict={x: X[start:end], y_: Y_[start:end]})\n\n if i % 4000 == 0:\n print(\"Steps are: %d , loss is: %g.\" % (step, loss_value))\n rs = sess.run(merged, feed_dict={x: X[start:end], y_: Y_[start:end]})\n writer.add_summary(rs, i)\n saver.save(sess, os.path.join(MODEL_SAVE_PATH_LINEAR, MODEL_NAME), global_step)\n\n # a round\n if end == X.shape[0]:\n # get the results\n epoch += 1\n loss_total = sess.run(loss, feed_dict={x: X, y_: Y_})\n\n Loss.append(loss_total)\n # Loss.append(loss_total*10000)\n print(\"After %d epoch(s), steps are: %d, loss total is: %g.\\n\" % (epoch, step, loss_total))\n saver.save(sess, os.path.join(MODEL_SAVE_PATH_LINEAR, MODEL_NAME), global_step)\n\n # close the multi threads\n coord.request_stop()\n coord.join(threads)\n print(\"Close the multi threads!\")\n\n return Loss", "def _forward(self, x, X, upto=None):\n if upto is not None: # cannot use 'if upto' here since it is 0-indexed\n # and layer0 is the first layer\n assert 0<=upto<=self._layer_counter\n counter = upto + 1\n else: counter = self._layer_counter\n\n y_previous, Y_previous = x, X\n # TODO: because we always need to compute F_i(X) at each layer i, this\n # is a huge overhead\n # feedforward\n for i in range(counter):\n layer = getattr(self, 'layer'+str(i))\n y, Y = layer(y_previous, Y_previous), layer(Y_previous, Y_previous)\n y_previous, Y_previous = y, Y\n\n return y", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n return self.fc4(x)", "def forward(self, query, key, value, mask):\n # linear -> GLU -> lightconv -> linear\n x = query\n B, T, C = x.size()\n H = self.wshare\n\n # first liner layer\n x = self.linear1(x)\n\n # GLU activation\n x = self.act(x)\n\n # convolution along frequency axis\n weight_f = F.softmax(self.weight_f, dim=-1)\n weight_f = F.dropout(weight_f, self.dropout_rate, training=self.training)\n weight_new = torch.zeros(\n B * T, 1, self.kernel_size, device=x.device, dtype=x.dtype\n ).copy_(weight_f)\n xf = F.conv1d(\n x.view(1, B * T, C), weight_new, padding=self.padding_size, groups=B * T\n ).view(B, T, C)\n\n # lightconv\n x = x.transpose(1, 2).contiguous().view(-1, H, T) # B x C x T\n weight = F.dropout(self.weight, self.dropout_rate, training=self.training)\n if self.use_kernel_mask:\n self.kernel_mask = self.kernel_mask.to(x.device)\n weight = weight.masked_fill(self.kernel_mask == 0.0, float(\"-inf\"))\n weight = F.softmax(weight, dim=-1)\n x = F.conv1d(x, weight, padding=self.padding_size, groups=self.wshare).view(\n B, C, T\n )\n if self.use_bias:\n x = x + self.bias.view(1, -1, 1)\n x = x.transpose(1, 2) # B x T x C\n x = torch.cat((x, xf), -1) # B x T x Cx2\n\n if mask is not None and not self.use_kernel_mask:\n mask = mask.transpose(-1, -2)\n x = x.masked_fill(mask == 0, 0.0)\n\n # second linear layer\n x = self.linear2(x)\n return x", "def forward(self, x):\n \n x = F.relu(self.conv1_bn(self.conv1(self.conv0_bn(x))))\n x = F.relu(self.conv2_bn(self.conv2(x)))\n x = F.relu(self.conv3_bn(self.conv3( self.maxpool2(x))))\n x = F.relu(self.conv4_bn(self.conv4( self.maxpool3(x))))\n x = self.maxpool4(x) \n x = x.view(-1, 1184)\n x = F.relu(self.fc1(x))\n x = self.dense1_bn(x)\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x)", "def forward(self, x):\n\n out = torch.relu(self.conv1(x))\n out = torch.relu(self.conv2(out))\n\n out = torch.relu(self.resnet_block(out))\n\n out = torch.relu(self.deconv1(out))\n out = torch.tanh(self.deconv2(out))\n\n return out", "def forward_step_layer(t1, t2, activation_f=torchfun.relu):\n return batch_norm_tensor(activation_f(t1.bmm(t2)))", "def forward(self, x):\r\n h_0 = torch.zeros(\r\n self.num_layers, x.size(0), self.hidden_size).to(device)\r\n\r\n c_0 = torch.zeros(\r\n self.num_layers, x.size(0), self.hidden_size).to(device)\r\n\r\n # Propagate input through LSTM\r\n ula, (h_out, _) = self.lstm(x, (h_0, c_0))\r\n\r\n h_out = h_out.view(-1, self.hidden_size)\r\n\r\n out = self.fc(h_out)\r\n\r\n return out", "def backward_deconvnet_relu(x):\n def grad(dy):\n return tf.nn.relu(dy)\n return tf.nn.relu(x), grad", "def L_model_forward(X, parameters):\n \n caches = []\n A = X\n L = len(parameters) // 2 # number of layers in the neural network\n\n A_zero = np.zeros((1,1))\n # Implement [LINEAR -> RELU]*(L-1). Add \"cache\" to the \"caches\" list.\n for l in range(1, L):\n A_prev = A \n A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation = \"relu\")\n caches.append(cache)\n \n # Implement LINEAR -> SOFTMAX. Add \"cache\" to the \"caches\" list.\n AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], activation = \"softmax\")\n caches.append(cache)\n\n assert(AL.shape == (13,X.shape[1])) \n \n return AL, caches", "def _forward(self, x):\n\n batch_size = x.size()[0]\n x = self._data_augment(x)\n\n path_prob = self.inner_nodes(x)\n path_prob = torch.unsqueeze(path_prob, dim=2)\n path_prob = torch.cat((path_prob, 1 - path_prob), dim=2)\n\n mu = x.data.new(batch_size, 1, 1).fill_(1.0)\n penalty_ = torch.tensor(0.0).to(self.device)\n\n # Iterate through internal odes in each layer to compute the final path\n # probabilities and the regularization term.\n begin_idx = 0\n end_idx = 1\n\n for layer_idx in range(0, self.depth):\n path_prob_ = path_prob[:, begin_idx:end_idx, :]\n\n # Extract internal nodes in the current layer to compute the\n # regularization term\n penalty_ = penalty_ + self._cal_penalty(layer_idx, mu, path_prob_)\n mu = mu.view(batch_size, -1, 1).repeat(1, 1, 2)\n\n mu = mu * path_prob_ # update path probabilities\n\n begin_idx = end_idx\n end_idx = begin_idx + 2 ** (layer_idx + 1)\n\n mu = mu.view(batch_size, self.leaf_node_num_)\n\n return mu, penalty_", "def relu(x, name):\n\n with tf.name_scope(name):\n outputs = tf.nn.relu(x)\n # Return layer's output\n return outputs", "def _fc_linear(prev_layer, layer_name, weights, reuse_scope=False):\n with tf.name_scope(layer_name):\n if reuse_scope is False:\n w_np, b_np = _get_weights(layer_name, weights)\n\n with tf.variable_scope(layer_name):\n w = tf.get_variable('W', shape=tuple(w_np.shape),\n dtype=w_np.dtype, trainable=False,\n initializer=tf.constant_initializer(w_np))\n\n b = tf.get_variable('b', shape=tuple(b_np.shape),\n dtype=b_np.dtype, trainable=False,\n initializer=tf.constant_initializer(b_np))\n\n else:\n with tf.variable_scope(layer_name, reuse=True):\n w = tf.get_variable('W')\n b = tf.get_variable('b')\n return tf.nn.bias_add(tf.matmul(prev_layer, w), b)", "def forward(self, x):\n\n # x = [batch size, seq len, hid dim]\n\n x = self.dropout(torch.relu(self.fc_1(x)))\n\n # x = [batch size, seq len, pf dim]\n\n x = self.fc_2(x)\n\n # x = [batch size, seq len, hid dim]\n\n return x", "def _forward(self, X):\n firstLayer = True\n for layer, fcn in self.model.named_children():\n if 'recurrent' in layer:\n if firstLayer:\n Y, hidden = fcn(X)\n else:\n Y, hidden = fcn(Y)\n elif 'dropout' in layer:\n Y = fcn(Y)\n elif 'linear' in layer:\n Y = fcn(Y.view((Y.shape[1], Y.shape[0]*Y.shape[-1])))\n else:\n Y = fcn(Y)\n\n firstLayer = False\n\n return Y", "def grad_ReLU(self):\n y = self.x\n y[y<=0] = 0\n y[y>0] = 1\n return y\n raise NotImplementedError(\"ReLU gradient not implemented\")", "def forward(self,x):\n \n out,_ = self.lstm(x,self.hidden)\n out = out.detach()\n x = out[:,-1,:][-1]\n x = self.relu(x)\n if self.stochastic:\n x= x+ (x- self.memory[0]).detach()* torch.rand(x.shape) * self.sigmoid(self.SM.gamma[0])\n self.memory[0] = x.clone().detach()\n\n x = self.relu(self.layer1(x))\n if self.stochastic:\n x= x+ (x- self.memory[1]).detach()* torch.rand(x.shape) * self.sigmoid(self.SM.gamma[1])\n self.memory[1] = x.clone().detach()\n\n x = self.relu(self.layer2(x))\n if self.stochastic:\n x= x+ (x- self.memory[2]).detach()* torch.rand(x.shape) * self.sigmoid(self.SM.gamma[2])\n self.memory[2] = x.clone().detach()\n\n x = self.relu(self.layer3(x))\n if self.stochastic:\n x= x+ (x- self.memory[3]).detach()* torch.rand(x.shape) * self.sigmoid(self.SM.gamma[3])\n self.memory[3] = x.clone().detach()\n\n x = self.relu(self.layer4(x))\n return x", "def forward(self, x):\n return trl(x, self.weight, bias=self.bias)", "def forward(self, state):\n x = F.relu(self.linear1(state))\n x = F.relu(self.linear2(x))\n x = torch.tanh(self.linear3(x))\n\n return x", "def forward(self, x):\n x = self.pool(x)\n x = self.conv(x)\n x = x.reshape(x.shape[0], -1)\n x = self.relu(self.fc1(x))\n x = self.dropout1(x)\n x = self.fc2(x)\n x = self.dropout2(x)\n x = self.fc3(x)\n x = self.dropout3(x)\n x = self.fc4(x)\n\n return x", "def L_model_forward(X, parameters):\n caches = []\n A = X\n L = len(parameters) // 2\n\n for i in range(1,L):\n A_prev = A\n A , cache = linear_activation_forward(A_prev,parameters['W'+str(i)],parameters['b'+str(i)],activation = 'relu')\n caches.append(cache)\n\n AL,cache = linear_activation_forward(A,parameters['W'+str(L)],parameters['b'+str(L)],activation = 'sigmoid')\n caches.append(cache)\n\n assert (AL.shape == (1, X.shape[1]))\n\n return AL,caches", "def eval_forward(self, u):\n\n if self.eval_forward_f is None:\n\n # masked random numbers\n tt_u = tt.matrix('u')\n mu = self.mask * tt_u\n\n # scale net\n s_net = nn.FeedforwardNet(self.n_inputs, mu)\n for h in self.s_hiddens:\n s_net.addLayer(h, self.s_act)\n s_net.addLayer(self.n_inputs, 'linear')\n util.copy_model_parms(self.s_net, s_net)\n s = s_net.output\n\n # translate net\n t_net = nn.FeedforwardNet(self.n_inputs, mu)\n for h in self.t_hiddens:\n t_net.addLayer(h, self.t_act)\n t_net.addLayer(self.n_inputs, 'linear')\n util.copy_model_parms(self.t_net, t_net)\n t = t_net.output\n\n # transform u -> x\n x = mu + (1.0 - self.mask) * (tt_u * tt.exp(s) + t)\n\n # compile theano function\n self.eval_forward_f = theano.function(\n inputs=[tt_u],\n outputs=x\n )\n\n return self.eval_forward_f(u.astype(dtype))", "def forward(self, state):\n \n x = F.relu(self.fc1(state)) \n x = F.relu(self.fc2(x)) \n x = F.tanh(self.fc3(x)) \n \n \n ####x = F.relu(self.bn1(self.fc1(state)))\n ####x = F.relu(self.bn2(self.fc2(x)))\n ####x = torch.tanh(self.bn3(self.fc3(x)))\n ##x = torch.tanh(self.fc3(x))\n\n return x", "def eval_forward(self, x, u):\n\n if self.eval_forward_f is None:\n\n # conditional input\n tt_x = tt.matrix('x')\n\n # masked random numbers\n tt_u = tt.matrix('u')\n mu = self.mask * tt_u\n\n # scale net\n s_net = nn.FeedforwardNet(self.n_inputs + self.n_outputs, tt.concatenate([tt_x, mu], axis=1))\n for h in self.s_hiddens:\n s_net.addLayer(h, self.s_act)\n s_net.addLayer(self.n_outputs, 'linear')\n util.copy_model_parms(self.s_net, s_net)\n s = s_net.output\n\n # translate net\n t_net = nn.FeedforwardNet(self.n_inputs + self.n_outputs, tt.concatenate([tt_x, mu], axis=1))\n for h in self.t_hiddens:\n t_net.addLayer(h, self.t_act)\n t_net.addLayer(self.n_outputs, 'linear')\n util.copy_model_parms(self.t_net, t_net)\n t = t_net.output\n\n # transform (x,u) -> y\n y = mu + (1.0 - self.mask) * (tt_u * tt.exp(s) + t)\n\n # compile theano function\n self.eval_forward_f = theano.function(\n inputs=[tt_x, tt_u],\n outputs=y\n )\n\n return self.eval_forward_f(x.astype(dtype), u.astype(dtype))", "def forward_rnn(self, inputs, state, seq_lens):\n x = nn.functional.relu(self.fc1(inputs))\n lstm_out = self.lstm(\n x, [torch.unsqueeze(state[0], 0),\n torch.unsqueeze(state[1], 0)])\n action_out = self.action_branch(lstm_out[0])\n self._cur_value = torch.reshape(self.value_branch(lstm_out[0]), [-1])\n return action_out, [\n torch.squeeze(lstm_out[1][0], 0),\n torch.squeeze(lstm_out[1][1], 0)\n ]", "def rnn_forward(self, x, h0, U=None, W=None, b=None):\n\n U = U if U is not None else self.U\n W = W if W is not None else self.W\n b = b if b is not None else self.b\n\n x = x.transpose(1, 0, 2)\n h = [h0]\n cache = []\n\n for x_item in x:\n h_item, cache_item = self.rnn_step_forward(x_item, h[-1], U, W, b)\n cache.append(cache_item)\n h.append(h_item)\n\n return np.array(h[1:]).transpose(1, 0, 2), cache # Batch major h", "def test_lu_forward_sub():\t\n\t# test 1\n\tL = np.array([\n\t\t[ 2, 3,-4, 2],\n\t\t[-2, 1,-2, 1],\n\t\t[ 1,-1, 3,-1],\n\t\t[-3, 2, 2, 2]])\t\n\n\tb = np.array([4, -8, 9, 6])\n\n\ty = lu_forward_sub(L, b) \t\t\n\ty_soln = np.array([4,0,5,8])\t\t\t\t\t\t# correct output of LU_FORWARD_SUB\n\tassert norm(y - y_soln) < 1.e-10\n\n\t# test 2\n\tL2 = np.array([\n\t\t [0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 1],\n\t\t [-100., 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 100],\n\t\t [0., -100., 0.01, 0., 0., 0., 0., 0., 0., 0., 10000],\n\t\t [0., 0., -100., 0.01, 0., 0., 0., 0., 0., 0., 1000000],\n\t\t [0., 0., 0., -100., 0.01, 0., 0., 0., 0., 0., 100000000],\n\t\t [0., 0., 0., 0., -100., 0.01, 0., 0., 0., 0., 10000000000],\n\t\t [0., 0., 0., 0., 0., -100., 0.01, 0., 0., 0., 1000000000000],\n\t\t [0., 0., 0., 0., 0., 0., -100., 0.01, 0., 0., 100000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., -100., 0.01, 0., 10000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., -100, 0.01, 1000000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., 0., -100., 100000000000000000000]])\n\n\tb2 = np.array ([[1.01], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [0.]])\n\n\ty2 = lu_forward_sub(L2, b2) \t\t\n\ty_soln2 = np.array([1.01, -101.99, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 99])\t\t\t\t\t\t# correct output of LU_FORWARD_SUB\n\tassert norm(y2 - y_soln2) < 1.e-10" ]
[ "0.6775336", "0.6629373", "0.6602302", "0.6523369", "0.6508466", "0.64465874", "0.63842785", "0.626676", "0.620465", "0.6178559", "0.61525744", "0.613782", "0.60611176", "0.6054329", "0.6047199", "0.6046748", "0.59962803", "0.5989819", "0.596913", "0.5962442", "0.5948833", "0.5906844", "0.58905584", "0.58871675", "0.58764976", "0.5870903", "0.58586717", "0.5845341", "0.5845341", "0.5845341", "0.5845341", "0.5845341", "0.58413637", "0.5828903", "0.5816473", "0.5806257", "0.5797344", "0.57896036", "0.5786045", "0.5777932", "0.5777932", "0.576875", "0.5755664", "0.5753631", "0.5750117", "0.57488096", "0.5746764", "0.5743141", "0.5742796", "0.5735113", "0.57305264", "0.5728202", "0.5723594", "0.57125366", "0.56956923", "0.5693678", "0.56916714", "0.56858885", "0.56747913", "0.5667629", "0.5661473", "0.5658391", "0.565188", "0.56464094", "0.5646385", "0.56417483", "0.562597", "0.5620617", "0.5614897", "0.56126827", "0.5611585", "0.56091064", "0.5607595", "0.56053907", "0.5589892", "0.55817324", "0.5579997", "0.5563873", "0.55628693", "0.5559111", "0.55570084", "0.55480576", "0.55456144", "0.55437374", "0.55433285", "0.553467", "0.553437", "0.55331665", "0.5530059", "0.5528652", "0.55226314", "0.55177397", "0.5509812", "0.54999095", "0.5498392", "0.5498208", "0.5491015", "0.5489993", "0.54873264", "0.54863995" ]
0.60177517
16
Computes the backward pass for a layer of rectified linear units (ReLUs).
def relu_backward(dout, cache): dx, x = None, cache ########################################################################### # TODO: Implement the ReLU backward pass. # ########################################################################### #print(dout) dx = np.empty_like(dout) np.copyto(dx, dout) dx[x < 0] = 0 #print(dx) ########################################################################### # END OF YOUR CODE # ########################################################################### return dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relu_backward(self, dUpper, cache):\n x = cache\n #############################################################################\n # TODO: Implement the ReLU backward pass. #\n #############################################################################\n x = np.array(x , copy=True)\n x[x <= 0] = 0\n x[x > 0] = 1\n drelu = dUpper * x\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return drelu", "def backward(ctx, grad_output):\n loss, reg, u, lbda = ctx.saved_tensors\n\n device = u.device\n\n # do clever computations\n eps = 1e-10\n grad, = torch.autograd.grad(loss, u, only_inputs=True,\n retain_graph=True)\n x = (u - eps * grad).data\n lbda = lbda.data\n\n prox_x = check_tensor(\n np.array([prox_tv.tv1_1d(xx, eps * lbda) for xx in x]),\n device=device,\n )\n grad_u = (u - prox_x) / eps\n grad_lbda = reg.clone()\n return (torch.ones(0), grad_u, grad_lbda)", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = layers.relu_backward(dout, relu_cache)\n dx, dw, db = layers.conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def backward_deconvnet_relu(x):\n def grad(dy):\n return tf.nn.relu(dy)\n return tf.nn.relu(x), grad", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n dx = np.where(x<=0, 0, 1) * dout\n \n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def backward_pass(self, loss):\n\n self.optimizer.zero_grad()\n self.optimizer.backward(loss)\n self.optimizer.step()", "def backward_D(self):\n self.loss_D.backward()", "def backward(ctx, grad_L):\n A, T = ctx.saved_tensors\n\n grad_A = None\n grad_T = None\n\n B = A.shape[0]\n\n # We only need to compute gradients for tensors that are flagged to\n # require gradients!\n if ctx.needs_input_grad[0]:\n grad_A = (A - T) / B\n\n if ctx.needs_input_grad[1]:\n grad_T = (T - A) / B\n\n return grad_A, grad_T", "def _backward(loss):\n\n loss.backward()", "def backward_pass(self, grad):\n pass", "def L_model_backward(AL, Y, caches):\n pass", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n dx = dout.copy()\n dx[x<=0] = 0\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def backward(self, inputs, grad_loss_input):\n raise NotImplementedError", "def relu_backward(dout, x):\n ############################################################################\n # TODO: Implement the ReLU backward pass. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n judge = x>0\n dx = dout*judge\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return dx", "def backward_G(self):\n self.loss_G.backward()", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n dx = dout * (x > 0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n dx = dout * (x > 0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def backward(self, lhs: Tensor, rhs: Tensor, acc_grad: np.ndarray):\n raise NotImplementedError", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def backward(ctx, dy):\n y = ctx.y\n if ctx.eagerly_discard_variables:\n del ctx.y\n for i in range(len(ctx.reversible_blocks) - 1, -1, -1):\n y, dy = ctx.reversible_blocks[i].backward_pass(y, dy, not ctx.eagerly_discard_variables)\n if ctx.eagerly_discard_variables:\n del ctx.reversible_blocks\n return dy, None, None", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def L_model_backward(AL, Y, caches):\n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n\n dAL = -(np.divide(Y,AL)-np.divide(1-Y,1-AL))\n \"\"\"\n cache = caches[-1]\n grads[\"dA\"+str(L)],grads[\"dW\"+str(L)],grads[\"db\"+str(L)] = linear_activation_backward(dAL,cache,activation = 'sigmoid')\n\n for i in reversed(range(L-1)):\n grads[\"dA\"+str(i+1)],grads[\"dW\"+str(i+1)],grads[\"db\"+str(i+1)] = linear_activation_backward(grads[\"dA\"+str(i+2)],caches[i],activation = 'relu')\n \"\"\"\n\n current_cache = caches[-1]\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_backward(sigmoid_backward(dAL, current_cache[1]),current_cache[0])\n\n for l in reversed(range(L - 1)):\n # lth layer: (RELU -> LINEAR) gradients.\n # Inputs: \"grads[\"dA\" + str(l + 2)], caches\". Outputs: \"grads[\"dA\" + str(l + 1)] , grads[\"dW\" + str(l + 1)] , grads[\"db\" + str(l + 1)]\n ### START CODE HERE ### (approx. 5 lines)\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_backward(sigmoid_backward(dAL, current_cache[1]), current_cache[0])\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n ### END CODE HERE ###\n\n return grads", "def _AffLayerReluDrop_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-Batchnorm-relu} X (L-1)]\n dhid = dropout_backward(dhid, cache[i]['drop'])\n dhid = relu_backward(dhid, cache[i]['relu'])\n dhid, grads['gamma{0}'.format(i)], grads['beta{0}'.format(i)] = \\\n layernorm_backward(dhid, cache[i]['layernorm'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_backward(dhid, cache[i]['affine']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def backward(self, inputs, gradients, **kwargs):\n grad_relu = inputs > 0\n return gradients * grad_relu", "def relu_backward(dout, cache):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n x=cache\n dout[x<=0]=0\n dx=dout\n return dx", "def backward(self, loss):\n global_timer.my_timer.start_profile(\"BWD\")\n mgr = PatrickStarManager()\n mgr.set_training_stage(TrainingStage.BWD)\n\n for param_fp16 in self.client.chunk_based_param_fp16:\n param_fp16.ps_attr.bwd_used_cnt = 0\n\n self.optimizer.zero_grad()\n if self.loss_scaler:\n self.loss_scaler.backward(loss)\n else:\n loss.backward()\n mgr.update_margin_mem()\n global_timer.my_timer.finish_profile(\"BWD\")", "def affine_relu_backward(dout, cache):\n fc_cache, relu_cache = cache\n da = layers.relu_backward(dout, relu_cache)\n dx, dw, db = layers.affine_backward(da, fc_cache)\n return dx, dw, db", "def relu_backward(dA, internal_params):\n\n Z = internal_params\n dZ = np.array(dA, copy=True)\n\n dZ[Z <= 0] = 0\n\n return dZ", "def backward(\n self, X: np.ndarray, y: np.ndarray, lr: float, reg: float = 0.0\n ) -> float:\n y_hat = self.forward(X)\n\n y_one_hot = self.one_hot_encode(y)\n loss = CrossEntropy.forward(y_one_hot, y_hat)\n\n d_layer = CrossEntropy.backward(y, y_hat)\n\n w_grads = []\n b_grads = []\n\n for idx, layer in reversed(list(enumerate(self.layers))):\n # Not output layer\n if (idx + 1) < len(self.layers):\n next_layer = self.layers[idx + 1]\n\n d_layer = d_layer.dot(next_layer.w.T)\n d_layer = layer.activation_func.backward(d_layer, layer.activated_out)\n\n d_w = layer.linear_in.T.dot(d_layer) + 2 * reg * layer.w\n d_b = np.sum(d_layer, axis=0)\n\n w_grads.insert(0, d_w)\n b_grads.insert(0, d_b)\n\n self.optimizer.step(self.layers, w_grads, b_grads, lr)\n\n if self.norm_weights:\n w_norm = max(np.linalg.norm(l.w) for l in self.layers) / len(self.layers)\n b_norm = max(np.linalg.norm(l.w) for l in self.layers) / len(self.layers)\n for layer in self.layers:\n layer.w /= w_norm\n layer.b /= b_norm\n\n return loss", "def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)", "def backward_and_step(self, loss):\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()", "def backward(self, X, Y, P, H, lmd):\n G = - (Y - P)\n _, Npts = P.shape\n n_layers = len(self.hidden_units)\n\n gradients_W = []\n gradients_b = []\n\n for i in range(n_layers, -1, -1):\n\n if i == 0:\n grad_W = G @ X.T * (1/Npts) + 2 * lmd * self.W[i]\n grad_b = G @ np.ones((Npts, 1)) * (1/Npts)\n\n else:\n\n h = H[i - 1]\n w = self.W[i]\n grad_W = G @ h.T * (1/Npts) + 2 * lmd * w\n grad_b = G @ np.ones((Npts, 1)) * (1/Npts)\n\n G = w.T @ G\n G = G * np.where(h > 0, 1, 0)\n\n gradients_W.append(grad_W)\n gradients_b.append(grad_b)\n\n return gradients_W, gradients_b", "def leakrelu_backward(dA, Z):\n\n # When z <= 0, dz = 0.01\n derivative = np.ones(Z.shape)\n derivative[Z < 0] = 0.01\n\n dZ = dA * derivative\n\n assert (dZ.shape == Z.shape)\n\n return dZ", "def update_relus(self):\n\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs", "def conv_bn_relu_backward(dout, cache):\n conv_cache, sbn_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dan, dgamma, dbeta = spatial_batchnorm_backward(da, sbn_cache)\n dx, dw, db = conv_backward_fast(dan, conv_cache)\n return dx, dw, db, dgamma, dbeta", "def backward(self, gradient):\n raise NotImplementedError()", "def relu_backward(dout, cache):\n dx, x = None, cache\n ###########################################################################\n # TODO: Implement the ReLU backward pass. #\n ###########################################################################\n dx = dout * ((x > 0).astype(int))\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def affine_relu_backward(dout, cache):\n fc_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = affine_backward(da, fc_cache)\n return dx, dw, db", "def affine_relu_backward(dout, cache):\n fc_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = affine_backward(da, fc_cache)\n return dx, dw, db", "def affine_relu_backward(dout, cache):\n fc_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = affine_backward(da, fc_cache)\n return dx, dw, db", "def affine_relu_backward(dout, cache):\n fc_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = affine_backward(da, fc_cache)\n return dx, dw, db", "def relu_backward(dout, cache):\n dx, x = None, cache\n dx = dout\n dout[x <= 0] = 0.0\n return dx", "def backward(self, gradient):\n #TODO\n pass", "def backward(self, gradient):\n #TODO\n pass", "def L_model_backward(AL, Y, caches, use_batchnorm, batchnorm_cache, dropout_cache):\n\n grads = {}\n num_layers = len(caches)\n use_dropout = len(dropout_cache) != 0\n\n last_layer_idx = num_layers\n dA, dW, db = linear_backward(AL - Y, caches[-1]['linear_cache'])\n grads['dA' + str(last_layer_idx)] = dA\n grads['dW' + str(last_layer_idx)] = dW\n grads['db' + str(last_layer_idx)] = db\n\n for layer_idx in reversed(range(1, num_layers)):\n if use_dropout:\n dA = dropout_backward(dA, dropout_cache[layer_idx])\n\n dA, dW, db = linear_activation_backward(dA , caches[layer_idx - 1], \"relu\", use_batchnorm, batchnorm_cache[layer_idx])\n grads['dA' + str(layer_idx)] = dA\n grads['dW' + str(layer_idx)] = dW\n grads['db' + str(layer_idx)] = db\n\n return grads", "def backwards(delta,params,name='',activation_deriv=sigmoid_deriv):\n # everything you may need for this layer\n W = params['W' + name]\n b = params['b' + name]\n X, pre_act, post_act = params['cache_' + name]\n # your code here\n # do the derivative through activation first\n # then compute the derivative W,b, and X\n \n delta_pre = activation_deriv(post_act) * delta\n # (in_dim, out_dim) = (in_dim, examples) @ (examples, out_dim)\n grad_W = X.transpose() @ delta_pre\n grad_b = np.sum(delta_pre, axis=0, keepdims=True) # (1, out_dim)\n # (examples, in_dim) = (examples, out_dim) @ (out_dim, in_dim)\n grad_X = delta_pre @ W.transpose()\n\n # store the gradients\n params['grad_W' + name] = grad_W\n params['grad_b' + name] = grad_b\n return grad_X", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model._features_extractor._modules.items():\n for layer in module:\n if isinstance(layer, LeakyReLU):\n layer.register_backward_hook(relu_backward_hook_function)\n layer.register_forward_hook(relu_forward_hook_function)", "def relu_backward(dout, cache):\r\n x = cache\r\n dx = dout * (x > 0)\r\n return dx", "def grad_ReLU(self):\n y = self.x\n y[y<=0] = 0\n y[y>0] = 1\n return y\n raise NotImplementedError(\"ReLU gradient not implemented\")", "def backward(self, dout):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)-1,-1,-1):\n act_dout = self.activations[l].backward(dout)\n dout = self.layers[l].backward(act_dout)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return", "def _AffLayerRelu_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-Batchnorm-relu} X (L-1)]\n dhid = relu_backward(dhid, cache[i]['relu'])\n dhid, grads['gamma{0}'.format(i)], grads['beta{0}'.format(i)] = \\\n layernorm_backward(dhid, cache[i]['layernorm'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_backward(dhid, cache[i]['affine']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def conv_relu_backward_naive(dout, cache):\n\tconv_cache, relu_cache = cache\n\tda = relu_backward(dout, relu_cache)\n\tdx, dw, db = conv_backward_naive(da, conv_cache)\n\treturn dx, dw, db", "def backward_pass(self, delta):\n\n a = config['learning_rate']\n y = config['momentum_gamma']\n m = config['momentum']\n l = config['L2_penalty']\n\n # print(\"shape of delta incoming: \", delta.shape, \"shape of x: \", self.x.shape)\n self.d_x = delta.T @ self.x\n # print(\"SHAPE OF GRADIENT: \", self.d_x.shape)\n\n # gradient momentum\n self.w_inc = (a * self.d_x.T) + (y * self.d_v) - l * self.w\n \n # saving \n if m:\n self.d_v = self.w_inc\n else:\n self.d_v = np.zeros(self.w.shape)\n\n # backprop for bias weights\n x_0 = np.ones([len(delta), 1])\n\n self.d_b = delta.T @ x_0\n\n # print(\"shape of BIAS GRAD: \", self.d_b.shape)\n\n self.d_w = delta @ self.w.T\n # print(\"shape of w.T: \", self.w.T.shape, \"shape of RETURN delta: \", self.d_w.shape)\n #print(self.w.shape)\n return self.d_w", "def _AffBatchReluDrop_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-Batchnorm-relu-drop} X (L-1)]\n dhid = dropout_backward(dhid, cache[i]['drop'])\n dhid = relu_backward(dhid, cache[i]['relu'])\n dhid, grads['gamma{0}'.format(i)], grads['beta{0}'.format(i)] = \\\n batchnorm_backward_alt(dhid, cache[i]['batchnorm'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_backward(dhid, cache[i]['affine']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def on_iter_backward(self, runner):\n runner.optimizer.zero_grad()\n runner.loss.backward()\n runner.optimizer.step()", "def linear_backward(dZ, cache):\n pass", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.features._modules.items():\n if isinstance(module, ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)", "def relu_backward(dA, Z):\n\n dZ = np.array(dA, copy=True) # just converting dz to a correct object.\n\n # When z <= 0, you should set dz to 0 as well.\n dZ[Z <= 0] = 0\n\n assert (dZ.shape == Z.shape)\n\n return dZ", "def unrolled_backward(self, trn_X, trn_y, val_X, val_y, lr, w_optim, amended: bool = False):\n # do virtual step (calc w`)\n self.virtual_step(trn_X, trn_y, lr, w_optim)\n\n # calc unrolled loss\n normal_alphas, reduce_alphas = self.v_net.alpha_weights()\n loss = self.v_net.loss(val_X, val_y) # L_val(w`)\n\n # Loss += SUM[ none - mean(others) ]\n loss += self.normal_none_penalty * sum([(alpha[:, -1] - alpha[:, :-1].mean()).sum() for alpha in normal_alphas])\n loss += self.reduce_none_penalty * sum([(alpha[:, -1] - alpha[:, :-1].mean()).sum() for alpha in reduce_alphas])\n\n # compute gradient\n v_alphas = tuple(self.v_net.alphas())\n v_weights = tuple(self.v_net.weights())\n v_grads = torch.autograd.grad(loss, v_alphas + v_weights)\n dalpha = v_grads[:len(v_alphas)]\n dw = v_grads[len(v_alphas):]\n\n hessian = self.amended_gradient(dw, trn_X, trn_y) if amended else self.compute_hessian(dw, trn_X, trn_y)\n\n # update final gradient = dalpha - lr*hessian\n with torch.no_grad():\n for alpha, da, h in zip(self.net.alphas(), dalpha, hessian):\n alpha.grad = da - lr * h", "def backward(self, grad_z):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\tgradient = self._layers[-1].backward(grad_z)\n\t\tfor i in range(len(self._layers) - 2, -1, -1):\n\t\t\tgradient = self._layers[i].backward(gradient)\n\t\treturn gradient\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def backward(self, upstream_grad):\n # couple upstream gradient with local gradient, the result will be sent back to the Linear layer\n self.dZ = upstream_grad * self.A*(1-self.A)", "def L_model_backward(AL, Y, caches, X):\n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n \n # Initializing the backpropagation \n dZ = AL - Y # Derivative of Cross Entropy Loss with Softmax\n \n # Lth layer (SOFTMAX -> LINEAR) gradients. Inputs: \"AL, Y, caches\". Outputs: \"grads[\"dAL\"], grads[\"dWL\"], grads[\"dbL\"]\n current_cache = caches[L-1]\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_activation_backward(dZ, AL, current_cache, activation = \"softmax\")\n \n for l in reversed(range(L-1)):\n # lth layer: (RELU -> LINEAR) gradients.\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l + 2)], AL, current_cache, activation = \"relu\")\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n\n return grads", "def backward(self):\n gradient = blah\n return gradient", "def backward(self):\n gradient = blah\n return gradient", "def backward_step(activations, targets, layers):\n param_grads = collections.deque() # List of parameter gradients for each layer\n output_grad = None # The error gradient at the output of the current layer\n # Propagate the error backwards through all the layers.\n # Use reversed to iterate backwards over the list of layers.\n for i, layer in enumerate(reversed(layers)):\n cur_layer_idx = len(layers) - i - 1\n if cur_layer_idx <= NUM_LAYERS_SKIP:\n # implement short circuit here\n if layer.is_fc_layer:\n grads = [0.0 for _ in range(layer.W.shape[0]*layer.W.shape[1]+layer.W.shape[1])]\n else:\n # normal gradient computation \n Y = activations.pop() # Get the activations of the last layer on the stack\n # Compute the error at the output layer.\n # The output layer error is calculated different then hidden layer error.\n if output_grad is None:\n input_grad = layer.get_input_grad(Y, targets)\n else: # output_grad is not None (layer is not output layer)\n input_grad = layer.get_input_grad(Y, output_grad)\n # Get the input of this layer (activations of the previous layer)\n X = activations[-1]\n # Compute the layer parameter gradients used to update the parameters\n grads = layer.get_params_grad(X, output_grad)\n param_grads.appendleft(grads)\n # Compute gradient at output of previous layer (input of current layer):\n output_grad = input_grad\n return list(param_grads) # Return the parameter gradients", "def affine_batchnorm_relu_backward(dout, cache):\n fc_cache, norm_cache, relu_cache = cache\n d_norm_out = relu_backward(dout, relu_cache)\n d_affine_out, dgamma, dbeta = batchnorm_backward_alt(d_norm_out, norm_cache)\n dx, dw, db = affine_backward(d_affine_out, fc_cache)\n return dx, dw, db, dgamma, dbeta", "def backward(self, delta):\n if self.activation_type == \"sigmoid\":\n grad = self.grad_sigmoid()\n\n elif self.activation_type == \"tanh\":\n grad = self.grad_tanh()\n\n elif self.activation_type == \"ReLU\":\n grad = self.grad_ReLU()\n\n return grad * delta", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.named_modules():\n if isinstance(module, ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)", "def backward_step(activations, targets, layers):\n param_grads = collections.deque() # List of parameter gradients for each layer\n output_grad = None # The error gradient at the output of the current layer\n # Propagate the error backwards through all the layers.\n # Use reversed to iterate backwards over the list of layers.\n for layer in reversed(layers): \n Y = activations.pop() # Get the activations of the last layer on the stack\n # Compute the error at the output layer.\n # The output layer error is calculated different then hidden layer error.\n if output_grad is None:\n input_grad = layer.get_input_grad(Y, targets)\n else: # output_grad is not None (layer is not output layer)\n input_grad = layer.get_input_grad(Y, output_grad)\n # Get the input of this layer (activations of the previous layer)\n X = activations[-1]\n # Compute the layer parameter gradients used to update the parameters\n grads = layer.get_params_grad(X, output_grad)\n param_grads.appendleft(grads)\n # Compute gradient at output of previous layer (input of current layer):\n output_grad = input_grad\n return list(param_grads) # Return the parameter gradients", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.named_modules():\n if isinstance(module, nn.ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)", "def affine_layernorm_relu_backward(dout, cache):\n fc_cache, norm_cache, relu_cache = cache\n d_norm_out = relu_backward(dout, relu_cache)\n d_affine_out, dgamma, dbeta = layernorm_backward(d_norm_out, norm_cache)\n dx, dw, db = affine_backward(d_affine_out, fc_cache)\n return dx, dw, db, dgamma, dbeta", "def backward(self, out_grad, input):\n raise NotImplementedError", "def rnn_backward(dh, cache):\n dx, dh_prev, dWx, dWh, db = None, None, None, None, None\n ##############################################################################\n # TODO: Implement the backward pass for a vanilla RNN running an entire #\n # sequence of data. You should use the rnn_step_backward function that you #\n # defined above. #\n ##############################################################################\n \"\"\"\n x, next_h, prev_h, Wx, Wh, b = cache\n dz = (1-next_h*next_h)*dnext_h\n # THIS ERROR IS SPREAD AMONG THE\n # np.dot(x, Wx) + np.dot(prev_h, Wh) + b)\n dx = np.dot(dz,Wx.T)\n dprev_h = np.dot(dz,Wh.T)\n db = np.sum(dz,axis=0)\n dWx = np.dot(x.T,dz)\n dWh = np.dot(prev_h.T,dz)\n #d(tanh) = 1- tanh*tanh\n \"\"\"\n #pdb.set_trace()\n # dh is not result of forward prop\n # but\n N,T,H = dh.shape\n tmp_x, tmp_next_h, tmp_prev_h, tmp_Wx, tmp_Wh, tmp_b = cache[T-1]\n D = tmp_x.shape[1]\n\n\n dx = np.zeros((N,T,D))\n dh_prev = np.zeros((N,H))\n dWx = np.zeros((D,H))\n dWh = np.zeros((H,H))\n db = np.zeros((H))\n\n for i in reversed(list(range(0,T))):\n # current gradient at timestep is the upstream gradient (provided as input)\n # this may be coming from the Y as in the min_char_rnn.py (see line 59)\n # + downstream gradient provided by rnn_step_backward.\n dh_curr = dh[:,i,:] + dh_prev\n dx_, dh_prev, dWx_, dWh_, db_ = rnn_step_backward(dh_curr, cache[i])\n dWx += dWx_\n dWh += dWh_\n db += db_\n dx[:,i,:]=dx_\n\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dx, dh_prev, dWx, dWh, db", "def backward(ctx, grad_output):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n # Retrieve saved tensors and constants\n gamma, ivar, mean, input = ctx.saved_tensors\n eps = ctx.saved_tensors\n\n # Check which inputs need gradients\n input_needs_grad, gamma_needs_grad, beta_needs_grad = ctx.needs_input_grad\n\n # Get the batch size (=N)\n N, _ = grad_output.shape\n\n # reconstruct the input_norm\n input_norm = (input - mean) * ivar\n grand_input_norm = grad_output * gamma\n\n ##### Gradient wrt beta #####\n grad_beta = grad_output.sum(dim=0) if beta_needs_grad else None\n\n #### Gradient wrt gamma ####\n grad_gamma = (input_norm*grad_output).sum(dim=0) if gamma_needs_grad else None\n \n #### Gradient wrt input ####\n term1 = N*grand_input_norm \n term2 = torch.sum(grand_input_norm, dim=0)\n term3 = input_norm*torch.sum(grand_input_norm*input_norm, dim=0)\n grad_input = (1. / N) * ivar * (term1 - term2 - term3) if input_needs_grad else None\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n # return gradients of the three tensor inputs and None for the constant eps\n return grad_input, grad_gamma, grad_beta, None", "def affine_batchnorm_relu_backward(dout, cache):\n af_cache, bf_cache, relu_cache = cache\n \n dbf_out = relu_backward(dout, relu_cache)\n daf_out, dgamma, dbeta = batchnorm_backward(dbf_out, bf_cache)\n dx, dw, db = affine_backward(daf_out, af_cache)\n return dx, dw, db, dgamma, dbeta", "def backward_pass(architecture,gradient_layerwise,grad_weights,grad_bias):\n \n for layer in range(len(architecture)-1,-1,-1):\n X_input,X_output,weightsi,biasi,X_input_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imxi = architecture['layer{}'.format(layer+1)]\n# print(\"Operation is:{} and Layer is: {}\".format(operationi,layer+1))\n if operationi == 'softmax': # Last layer -> Dont apply softmax in any layer other than the last layer!\n # not taking gradients here because we need dz_dX(secondlastlayer) which is y_pred - y\n continue\n \n if operationi == 'conv_bn_relu' or operationi == 'conv_relu' or operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if operationi__1 == 'softmax':\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # .\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # .\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input_im2col)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input_im2col)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi #\n elif operationi__1 == 'maxpool': # need to do something here to fix the problem\n None\n\n elif 'flatten' in operationi__1:\n # we currently have dz_doutput of flatten -> we want dz_doutput of the conv_bn_relu before flatten\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2] # weights2\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput of flatten\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5] # i\n try:\n dz_dXi = torch.t(weightsi__1).mm(dz_dXi__1)\n except:\n dz_dXi = weightsi__1.mm(dz_dXi__1)\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n\n dz_dXi = torch.reshape(dz_dXi,(output_shapei[1]*output_shapei[2],-1))\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n dz_dweightsi = X_input_im2col.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n dz_dbi = dz_dXi\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)# Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi) # Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi) # Can also set this to layer like in line ~800\n \n else:\n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dX2 -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n \n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n if 'sigmoid' in operationi__1: # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi__1: # ...\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dXi = torch.reshape(dz_dXi,(output_shape_current_layer[1]*output_shape_current_layer[2],-1))\n dz_dbi = torch.reshape(dz_dXi,bias_current_layer.shape)\n dz_dweightsi = X_im2col_current_layer.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n \n if operationi == 'maxpool':\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n \n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n try:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n except:\n Y = torch.t(weightsi__1).mm(dz_dXi__1) # Ensuring valid matrix multiplication here\n \n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n \n if operationi__1 == 'conv_sigmoid' or operationi__1 == 'conv_bn_sigmoid': # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n else:\n dz_dXi[X_output <= 0] = 0\n\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n \n dz_dXinput = torch.zeros((X_input.shape))\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+1)][0] # output = output of maxpool\n\n dz_dXoutput = torch.reshape(dz_dXoutput,(output_shapei[0],X_input_im2col.shape[2]))\n \n for i in range(output_shapei[0]):\n for j in range(X_input_im2col.shape[2]):\n Xi2ci = X_im2col_current_layer[i,:,:]\n idx = torch.argmax(Xi2ci[:,j]).item()\n value = imxi[i][(idx,j)]\n dz_dXinput[value[0],value[1],value[2]] += float(dz_dXoutput[i,j])\n\n# dz_dXinput = torch.reshape(dz_dXinput,output_shapei)\n \n X_prev_im2col = architecture['layer{}'.format(layer)][4]\n X_output_prev = architecture['layer{}'.format(layer)][1]\n X_output_prev = torch.reshape(X_output_prev,dz_dXinput.shape)\n X_input_prev = architecture['layer{}'.format(layer)][0]\n prev_bias = architecture['layer{}'.format(layer)][3]\n output_shape_prev = architecture['layer{}'.format(layer)][6]\n prev_operation = architecture['layer{}'.format(layer)][9]\n \n if prev_operation == 'conv_sigmoid' or prev_operation == 'conv_bn_sigmoid':\n dz_dXinput *= sigmoid(X_output_prev)*(1-sigmoid(X_output_prev)) # Taking the derivative of the sigmoid function\n else:\n dz_dXinput[X_output_prev <= 0] = 0\n \n if len(dz_dXinput.shape) == 3:\n dz_dXinput = torch.reshape(dz_dXinput,(-1,output_shape_prev[0]))\n \n dz_dbi = torch.reshape(dz_dXinput,prev_bias.shape)\n dz_dweightsi = X_prev_im2col.mm(dz_dXinput)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer)][0] = torch.Tensor(dz_dXinput) # ...\n \n if 'flatten_dense' in operationi:\n \n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n \n if operationi__1 == 'softmax':\n \n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n X_output = torch.reshape(X_output,(-1,1))\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if 'sigmoid' in operationi:\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # Can also set this to layer like in line ~800\n \n else:\n # Have to modify and test this before implementation -> Specifically\n # the backprop implementation is not consistent with the ones above\n #\n X_output = torch.reshape(X_output,(-1,1))\n weights__i = architecture['layer{}'.format(layer+2)][2]\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+2)][0]\n dz_dXoutput = torch.reshape(torch.Tensor(dz_dXoutput),X_output.shape)\n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n\n if 'relu' in operationi:\n dz_dXoutput[X_output<0] = 0\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n if 'sigmoid' in operationi:\n dz_dXoutput*= sigmoid(X_output)*(1-sigmoid(X_output))\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n else:\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n \n unflattened_Xinput = architecture['layer{}'.format(layer+1)][0]\n dz_dXinput = torch.reshape(dz_dXinput,unflattened_Xinput.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXinput)\n \n if gradient_layerwise['layer{}'.format(layer+1)][1] is not None:\n try:\n grad_weights['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][1]\n except:\n grad_weights['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][1])\n if gradient_layerwise['layer{}'.format(layer+1)][2] is not None:\n try:\n grad_bias['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][2]\n except:\n grad_bias['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][2])\n \n gc.collect()\n return", "def backward(self, grad_z):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\tself._grad_W_current = self._cache_current.T @ grad_z\n\t\tself._grad_b_current = np.ones((1,self._cache_current.shape[0])) @ grad_z\n\n\t\treturn grad_z @ self._W.T\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def backward(self, gradient: Tensor) -> Tensor:\n self.b_grad = np.sum(gradient, axis=0)\n self.w_grad = self.inputs.T @ gradient\n return gradient @ self.w.T", "def _AffReluDrop_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n thiscache = cache[i]\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-relu-drop} X (L-1)]\n dhid = dropout_backward(dhid, thiscache['drop'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_relu_backward(dhid, thiscache['affine_relu']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def backwardpass(self, grad):\n return (self.x>0) * grad", "def compute_backsubstitution(self, length: int, dim: int, layer_gradient_manager=None):\n if self.args.cpu:\n self.lb = torch.zeros(length, dim)\n else:\n self.lb = torch.zeros(length, dim).cuda()\n self.ub = self.lb.clone()\n self.final_lw = self.final_uw = None\n self.final_lb = self.final_ub = None\n\n original_layers = self.layers[:] # We keep a copy, because layers might get modified during the backsubstitution with jacobian\n for layer in self.layers[::-1]:\n if layer.lw is not None:\n layer.backward(layer_gradient_manager)\n # print(\"Controller has grad: \", self.lb.requires_grad)\n self.layers = original_layers # We restore the original layers", "def update_relus(self):\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs\n for pos, module in self.model.encoder._modules.items():\n if isinstance(module, ReLU):\n module.register_backward_hook(relu_backward_hook_function)\n module.register_forward_hook(relu_forward_hook_function)", "def relu_backward(dA, Z):\r\n dZ = np.array(dA, copy=True)\r\n dZ[Z <= 0] = 0\r\n return dZ", "def backward(self, residuals):\n in_channel, out_channel, kernel_size, a = self.weights.shape\n dw = np.zeros_like(self.weights) \n \n for i in range(in_channel):\n for o in range(out_channel):\n dw[i, o] += inv_conv2(self.in_val[:,:,i], \n residuals[:,:,o], \n self.stride)\n\n self.db += residuals.sum(axis=1).sum(axis=0)\n self.dw += dw \n gradient_x = np.zeros_like(self.in_val)\n \n for i in range(in_channel):\n for o in range(out_channel):\n gradient_x[:,:,i] += conv_delta(residuals[:,:,o] \n , self.weights[i][o]\n , self.stride\n , self.in_val.shape[0])\n \n return gradient_x", "def L_model_backward(AL, Y, caches):\n grads = {}\n dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))\n L = len(caches)\n cache = caches[L-1]\n dA, dW, db = linear_activation_backward(dAL, cache, 'sigmoid')\n grads['dA'+str(L)] = dAL\n grads['dW'+str(L)] = dW\n grads['db'+str(L)] = db\n for i in range(L-2, -1, -1):\n grads['dA' + str(i + 1)] = dA\n cache = caches[i]\n dA, dW, db = linear_activation_backward(dA, cache, 'relu')\n grads['dW' + str(i + 1)] = dW\n grads['db' + str(i + 1)] = db\n return grads", "def backward(self, inGradient, lr=0.001): # batchSize = 1\n wGradient = np.dot(inGradient.T, self.data)\n bGradient = np.sum(inGradient, axis=0)\n outGradient = np.dot(inGradient, self.weights)\n\n self.weights = self.weights - lr * wGradient\n self.bias = self.bias - lr * bGradient\n self.wGradient = wGradient\n self.bGradient = bGradient\n\n #print \"weight gradient \", wGradient\n #print \"bias gradient \", bGradient\n\n return outGradient", "def relu_backward(dA, cache):\n\n Z = cache\n dZ = np.array(dA, copy=True) # just converting dz to a correct object.\n\n # When z <= 0, you should set dz to 0 as well.\n dZ[Z <= 0] = 0\n\n assert (dZ.shape == Z.shape)\n\n return dZ", "def backward(self, accum_grad):\n\n W = self.W\n\n grad_w = self.layer_input.T.dot(accum_grad)\n grad_b = np.sum(accum_grad, axis=0, keepdims=True)\n\n # Update the layer weights\n self.W = self.W_optimizer.update(self.W, grad_w)\n self.b = self.b_optimizer.update(self.b, grad_b)\n\n accum_grad = accum_grad.dot(W.T)\n return accum_grad", "def backward_linear(Data):\n\n # data\n graph = tf.Graph()\n X = Data[0]\n Y_ = Data[1]\n\n if X.shape[0] != Y_.shape[0]:\n raise Exception(\"The quantity of Input X and Compare Y_ are not same!\")\n\n Loss = []\n with graph.as_default():\n print(\"This is the process of all the Dose!\")\n print(\"There are %d data in this process.\" % X.shape[0])\n print(\"Features of X: %d\" % X.shape[1])\n print(\"Learning rate is: %f\" % learning_rate)\n # Init all the parameters\n global_step = tf.Variable(0, trainable=False)\n\n STEPS = int(Epoch * X.shape[0] / BATCH_SIZE) + 1\n epoch = 0\n\n with tf.name_scope('inputs'):\n x = tf.placeholder(tf.float32, [None, Forward.INPUT_NODE], name='x_Input')\n y_ = tf.placeholder(tf.float32, [None, Forward.OUTPUT_NODE], name='y_Exact')\n y = Forward.forward_linear(x, regularizer=None)\n\n # lost function\n with tf.name_scope('loss'):\n loss_mse = tf.reduce_mean(tf.square(y - y_))\n loss = loss_mse + tf.add_n(tf.get_collection(\"losses\"))\n tf.summary.scalar('loss', loss)\n\n # Todo\n # LM algorithm\n\n # learning_rate = tf.train.exponential_decay(\n # LEARNING_RATE_BASE,\n # global_step,\n # X.shape[0] / BATCH_SIZE,\n # LEARNING_RATE_DECAY,\n # staircase=True\n # )\n\n with tf.name_scope('train'):\n # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step)\n # train_step = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(loss, global_step)\n train_step = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.99).minimize(loss, global_step)\n\n # EMA algorithm\n ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)\n ema_op = ema.apply(tf.trainable_variables())\n with tf.control_dependencies([train_step, ema_op]):\n train_op = tf.no_op(name='train')\n\n # ready for storing the model\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n init_op = tf.global_variables_initializer()\n sess.run(init_op)\n\n # Get the check point\n ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH_LINEAR)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n # begin multi threads\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess, coord)\n print(\"Begin the multi threads!\")\n\n # Graph\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(\"./logs_linear/\", sess.graph)\n\n # Training\n for i in range(STEPS):\n start = (i * BATCH_SIZE) % int(X.shape[0])\n end = start + BATCH_SIZE\n # if finish all the data\n if end >= X.shape[0]:\n end = X.shape[0]\n\n _, loss_value, step = sess.run([train_op, loss, global_step],\n feed_dict={x: X[start:end], y_: Y_[start:end]})\n\n if i % 4000 == 0:\n print(\"Steps are: %d , loss is: %g.\" % (step, loss_value))\n rs = sess.run(merged, feed_dict={x: X[start:end], y_: Y_[start:end]})\n writer.add_summary(rs, i)\n saver.save(sess, os.path.join(MODEL_SAVE_PATH_LINEAR, MODEL_NAME), global_step)\n\n # a round\n if end == X.shape[0]:\n # get the results\n epoch += 1\n loss_total = sess.run(loss, feed_dict={x: X, y_: Y_})\n\n Loss.append(loss_total)\n # Loss.append(loss_total*10000)\n print(\"After %d epoch(s), steps are: %d, loss total is: %g.\\n\" % (epoch, step, loss_total))\n saver.save(sess, os.path.join(MODEL_SAVE_PATH_LINEAR, MODEL_NAME), global_step)\n\n # close the multi threads\n coord.request_stop()\n coord.join(threads)\n print(\"Close the multi threads!\")\n\n return Loss", "def _backward(self):\n if self.units[0].value > 0:\n self.units[0].gradient += 1 * self.utop.gradient\n else:\n self.units[0].gradient += 0 * self.utop.gradient", "def backward(last_layer: str) -> Callable:\n\n def closure() -> Tuple[Optional[torch.Tensor], torch.Tensor]:\n optimizer.zero_grad()\n output = model(data)\n if last_layer == \"output\":\n output.backward(torch.ones_like(target))\n return None, output\n elif last_layer == 'loss':\n loss = compute_loss(output - target)\n loss.backward()\n return loss, output\n else:\n assert False, 'last layer must be \"output\" or \"loss\"'\n\n return closure", "def backward(last_layer: str) -> Callable:\n\n def closure() -> Tuple[Optional[torch.Tensor], torch.Tensor]:\n optimizer.zero_grad()\n output = model(data)\n if last_layer == \"output\":\n output.backward(torch.ones_like(target))\n return None, output\n elif last_layer == 'loss':\n loss = compute_loss(output - target)\n loss.backward()\n return loss, output\n else:\n assert False, 'last layer must be \"output\" or \"loss\"'\n\n return closure", "def _backward(self, w=None):\n grad = self.w # Should be I * self.w . We keep a vector for simplicity\n\n # Left multiply input `w` with normalizer gradient\n return w * grad if w is not None else grad", "def lstm_backward(dh, cache):\n dx, dh_prev, dWx, dWh, db = None, None, None, None, None\n #############################################################################\n # TODO: Implement the backward pass for an LSTM over an entire timeseries. #\n # You should use the lstm_step_backward function that you just defined. #\n #############################################################################\n\n N,T,H = dh.shape\n tmp_x = cache[T-1][0]\n D = tmp_x.shape[1]\n # cache is a list of these values -- so cache[0] is\n # x, i, f, o, g, prev_h, next_h, prev_c, next_c, Wx, Wh, b = cache\n dnext_h = dh\n\n dx = np.zeros((N,T,D))\n dh_prev = np.zeros((N,H))\n dc_prev = np.zeros((N,H))\n dWx = np.zeros((D,4*H))\n dWh = np.zeros((H,4*H))\n db = np.zeros((4*H))\n\n for t in reversed(range(0,T)):\n # current gradient at timestep is the upstream gradient (provided as input)\n # this may be coming from the Y as in the min_char_rnn.py (see line 59)\n # + downstream gradient provided by rnn_step_backward.\n dh_curr = dh[:,t,:] + dh_prev\n dx_, dh_prev, dc_prev, dWx_, dWh_, db_ = lstm_step_backward(dh_curr, dc_prev, cache[t])\n dWx += dWx_\n dWh += dWh_\n db += db_\n dx[:,t,:]=dx_\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n\n return dx, dh_prev, dWx, dWh, db" ]
[ "0.70412636", "0.694716", "0.6897693", "0.6847592", "0.6847592", "0.6846935", "0.6841099", "0.68178034", "0.6766849", "0.6754919", "0.67536426", "0.67530656", "0.67419785", "0.6739244", "0.6713423", "0.6708748", "0.6699276", "0.6696307", "0.6696307", "0.66860896", "0.6665928", "0.6665928", "0.6665928", "0.6665928", "0.6665928", "0.6660715", "0.66584796", "0.66544616", "0.66292745", "0.6601731", "0.6597477", "0.65969926", "0.65857387", "0.6578184", "0.6577647", "0.6571897", "0.6550648", "0.6547087", "0.6527253", "0.6526073", "0.6525951", "0.6517347", "0.6513479", "0.6510078", "0.6510078", "0.6510078", "0.6510078", "0.6509752", "0.650212", "0.650212", "0.64962935", "0.6495831", "0.6467036", "0.64651346", "0.64641494", "0.6461148", "0.6455365", "0.64549667", "0.6451134", "0.6438869", "0.64368945", "0.64281213", "0.64232516", "0.6420366", "0.64168423", "0.64149886", "0.64119864", "0.6399041", "0.63936764", "0.63936764", "0.63901347", "0.6368449", "0.6367812", "0.63578916", "0.63576764", "0.6349778", "0.63466066", "0.6345023", "0.6344269", "0.63408977", "0.6330927", "0.63286996", "0.63099647", "0.63084453", "0.63078564", "0.6298531", "0.62972474", "0.62874126", "0.62787354", "0.62769204", "0.6276831", "0.62735784", "0.62666297", "0.62607527", "0.6256333", "0.6241953", "0.6241652", "0.6241652", "0.6235367", "0.62284124" ]
0.66615254
25
Forward pass for batch normalization. During training the sample mean and (uncorrected) sample variance are computed from minibatch statistics and used to normalize the incoming data. During training we also keep an exponentially decaying running mean of the mean and variance of each feature, and these averages are used to normalize data at testtime. At each timestep we update the running averages for mean and variance using
def batchnorm_forward(x, gamma, beta, bn_param): mode = bn_param['mode'] eps = bn_param.get('eps', 1e-5) momentum = bn_param.get('momentum', 0.9) N, D = x.shape running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype)) running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype)) out, cache = None, None mu = np.mean(x, axis=0) var = np.var(x, axis=0) sigma = np.sqrt(var+eps) if mode == 'train': ####################################################################### # TODO: Implement the training-time forward pass for batch norm. # # Use minibatch statistics to compute the mean and variance, use # # these statistics to normalize the incoming data, and scale and # # shift the normalized data using gamma and beta. # # # # You should store the output in the variable out. Any intermediates # # that you need for the backward pass should be stored in the cache # # variable. # # # # You should also use your computed sample mean and variance together # # with the momentum variable to update the running mean and running # # variance, storing your result in the running_mean and running_var # # variables. # # # # Note that though you should be keeping track of the running # # variance, you should normalize the data based on the standard # # deviation (square root of variance) instead! # # Referencing the original paper (https://arxiv.org/abs/1502.03167) # # might prove to be helpful. # ####################################################################### out = gamma * (x - mu)/sigma + beta #out = (x - mu)/sigma #out = out * gamma.T + beta.T #print(gamma.shape) #out = out * gamma + beta #print(out.shape) running_mean = momentum * running_mean + (1 - momentum) * mu running_var = momentum * running_var + (1 - momentum) * (var+eps) ####################################################################### # END OF YOUR CODE # ####################################################################### elif mode == 'test': ####################################################################### # TODO: Implement the test-time forward pass for batch normalization. # # Use the running mean and variance to normalize the incoming data, # # then scale and shift the normalized data using gamma and beta. # # Store the result in the out variable. # ####################################################################### out = (x - running_mean) / np.sqrt(running_var) * gamma + beta ####################################################################### # END OF YOUR CODE # ####################################################################### else: raise ValueError('Invalid forward batchnorm mode "%s"' % mode) # Store the updated running means back into bn_param bn_param['running_mean'] = running_mean bn_param['running_var'] = running_var cache = (x, mu, sigma, gamma, beta) return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(self, x, train=True):\n if train is not None:\n mean, variance = tf.nn.moments(x, [0,1,2])\n assign_mean = self.mean.assign(mean)\n assign_variance = self.variance.assign(variance)\n with tf.control_dependencies([assign_mean, assign_variance]):\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, self.beta, self.gamma,\n self.epsilon, self.scale_after_norm)\n else:\n mean = self.ewma_trainer.average(self.mean)\n variance = self.ewma_trainer.average(self.variance)\n local_beta = tf.identity(self.beta)\n local_gamma = tf.identity(self.gamma)\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, local_beta, local_gamma,\n self.epsilon, self.scale_after_norm)", "def PostTrainingStepUpdate(self):\n p = self.params\n # Get sufficient stats that accumulates over microbatches.\n counts = self.accumulators.counts.GetValue()\n mean_ss = self.accumulators.mean_ss.GetValue()\n variance_ss = self.accumulators.variance_ss.GetValue()\n # Compute batch mean and batch variance from sufficient stats\n mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)\n decay = tf.convert_to_tensor(1.0 - p.decay, p.dtype)\n # Update moving_mean, moving_variance from batch mean and batch variance.\n with tf.name_scope(p.name) as scope:\n with tf.ops.colocate_with(self.vars.moving_mean):\n mean_update = tf.assign_sub(\n self.vars.moving_mean,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_mean - tf.cast(mean, p.dtype)) * decay,\n tf.zeros_like(self.vars.moving_mean)),\n name='moving_mean_update')\n with tf.ops.colocate_with(self.vars.moving_variance):\n var_update = tf.assign_sub(\n self.vars.moving_variance,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_variance - tf.cast(variance, p.dtype)) *\n decay, tf.zeros_like(self.vars.moving_variance)),\n name='moving_variance_update')\n py_utils.CheckNumerics(\n self.vars.moving_mean,\n 'moving mean of {} failed numeric check'.format(scope))\n py_utils.CheckNumerics(\n self.vars.moving_variance,\n 'moving variance of {} failed numeric check'.format(scope))\n self.accumulators.counts.Reset()\n self.accumulators.mean_ss.Reset()\n self.accumulators.variance_ss.Reset()\n return tf.group(mean_update, var_update)", "def BatchNorm(inputs, axis=None, training=None, momentum=0.9, epsilon=1e-5,\n center=True, scale=True,\n beta_initializer=tf.zeros_initializer(),\n gamma_initializer=tf.ones_initializer(),\n virtual_batch_size=None,\n internal_update=False):\n # parse shapes\n shape = inputs.get_shape().as_list()\n ndims = len(shape)\n\n assert axis is not None\n\n # parse training/ctx\n ctx = get_current_tower_context()\n if training is None:\n training = ctx.is_training\n training = bool(training)\n TF_version = get_tf_version_number()\n if not training and ctx.is_training:\n assert TF_version >= 1.4, \\\n \"Fine tuning a BatchNorm model with fixed statistics is only \" \\\n \"supported after https://github.com/tensorflow/tensorflow/pull/12580 \"\n if ctx.is_main_training_tower: # only warn in first tower\n logger.warn(\"[BatchNorm] Using moving_mean/moving_variance in training.\")\n # Using moving_mean/moving_variance in training, which means we\n # loaded a pre-trained BN and only fine-tuning the affine part.\n\n coll_bk = backup_collection([tf.GraphKeys.UPDATE_OPS])\n with rename_get_variable(\n {'moving_mean': 'mean/EMA',\n 'moving_variance': 'variance/EMA'}):\n if TF_version >= 1.5:\n layer = tf.layers.BatchNormalization(\n axis=axis,\n momentum=momentum, epsilon=epsilon,\n center=center, scale=scale,\n beta_initializer=beta_initializer,\n gamma_initializer=gamma_initializer,\n virtual_batch_size=virtual_batch_size,\n fused=True,\n _reuse=tf.get_variable_scope().reuse\n )\n else:\n assert virtual_batch_size is None, \"Feature not supported in this version of TF!\"\n layer = tf.layers.BatchNormalization(\n axis=axis,\n momentum=momentum, epsilon=epsilon,\n center=center, scale=scale,\n beta_initializer=beta_initializer,\n gamma_initializer=gamma_initializer,\n fused=True,\n _reuse=tf.get_variable_scope().reuse\n )\n xn = layer.apply(inputs, training=training, scope=tf.get_variable_scope())\n\n # maintain EMA only on one GPU is OK, even in replicated mode.\n # because training time doesn't use EMA\n if ctx.is_main_training_tower:\n for v in layer.non_trainable_variables:\n add_model_variable(v)\n if not ctx.is_main_training_tower or internal_update:\n restore_collection(coll_bk)\n\n if training and internal_update:\n assert layer.updates\n with tf.control_dependencies(layer.updates):\n ret = tf.identity(xn, name='output')\n else:\n ret = tf.identity(xn, name='output')\n\n vh = ret.variables = VariableHolder(\n moving_mean=layer.moving_mean,\n mean=layer.moving_mean, # for backward-compatibility\n moving_variance=layer.moving_variance,\n variance=layer.moving_variance) # for backward-compatibility\n if scale:\n vh.gamma = layer.gamma\n if center:\n vh.beta = layer.beta\n return ret", "def batch_normalization(x, phase_train, out_size):\n\n\twith tf.variable_scope('bn'):\n\t\tbeta = tf.Variable(tf.constant(0.0, shape=[out_size]), name='beta', trainable=True)\n\t\tgamma = tf.Variable(tf.constant(1.0, shape=[out_size]), name='gamma', trainable=True)\n\t\tbatch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\n\t\tema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n\t\tdef mean_var_with_update():\n\t\t\tema_apply_op = ema.apply([batch_mean, batch_var])\n\t\t\twith tf.control_dependencies([ema_apply_op]):\n\t\t\t\treturn tf.identity(batch_mean), tf.identity(batch_var)\n\n\t\tmean, var = tf.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))\n\t\tnormed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n\treturn normed", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n \n sample_mean = np.mean(x, axis=0)\n sample_variance = np.var(x, axis=0)\n \n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_variance\n \n num = x - sample_mean\n denom = np.sqrt(sample_variance + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n \n cache = (gamma, x_hat, num, denom, eps, sample_variance)\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n num = x - running_mean\n denom = np.sqrt(running_var + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def batch_normalization(x, phase_train, out_size):\r\n with tf.variable_scope('bn'):\r\n beta = tf.Variable(tf.constant(0.0, shape=[out_size]),\r\n name='beta', trainable=True)\r\n gamma = tf.Variable(tf.constant(1.0, shape=[out_size]),\r\n name='gamma', trainable=True)\r\n batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\r\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\r\n\r\n def mean_var_with_update():\r\n ema_apply_op = ema.apply([batch_mean, batch_var])\r\n with tf.control_dependencies([ema_apply_op]):\r\n return tf.identity(batch_mean), tf.identity(batch_var)\r\n\r\n mean, var = tf.cond(phase_train,\r\n mean_var_with_update,\r\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\r\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\r\n return normed", "def normalize(self, x, train=True):\n if train:\n mean, variance = tf.nn.moments(x, [0])\n assign_mean = self.mean.assign(mean)\n assign_variance = self.variance.assign(tf.mul(variance, self.keep_prob_prior))\n with tf.control_dependencies([assign_mean, assign_variance]):\n act_bn = tf.mul((x - mean), tf.rsqrt(variance + self.epsilon), name=\"act_bn\")\n return tf.add(tf.mul(act_bn, self.gamma), self.beta)\n \n else:\n mean = self.ewma_trainer.average(self.mean) or self.epsilon\n variance = self.ewma_trainer.average(self.variance) or self.epsilon\n local_beta = tf.identity(self.beta)\n local_gamma = tf.identity(self.gamma)\n act_bn = tf.mul((x-mean), tf.rsqrt(variance + self.epsilon), name=\"act1_bn\")\n return tf.add(tf.mul(act_bn, local_gamma), local_beta)", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n Xshape = x.shape\n\n if len(Xshape) > 2: #deal with 2d inputs\n N,C,H,W = x.shape\n x = np.swapaxes(x,1,3)\n D = C\n x = np.reshape(x,[N*H*W,C])\n else:\n N = x.shape[0]\n x = np.reshape(x,[N,-1])\n _, D = x.shape\n\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n mu = np.mean(x,axis=0)\n var = np.var(x, axis=0)\n x_norm = (x - mu)/np.sqrt(var + eps)\n out = gamma * x_norm + beta\n running_mean = momentum*running_mean + (1-momentum)*mu\n running_var = momentum*running_var + (1-momentum)*var\n cache = (x_norm, gamma, np.sqrt(var + eps))\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n x_norm = (x - running_mean)/np.sqrt(running_var + eps)\n out = gamma * x_norm + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n if len(Xshape) > 2:\n out = np.reshape(out,[N,W,H,C])\n out = np.swapaxes(out,1,3)\n else:\n out = np.reshape(out,Xshape)\n return out, cache", "def normalize_batch(batch, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n # normalize using imagenet mean and std\n batch = batch.clone()\n mean = torch.tensor(mean).view(-1, 1, 1)\n std = torch.tensor(std).view(-1, 1, 1)\n # if your image data is scaled to scale 0-255, uncomment the line below\n # batch.div_(255.0)\n return (batch - mean) / std", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n #######################################################################\n #Compute mean and variance of each element of the data.\n sample_mean = np.mean(x,axis = 0)\n sample_var = np.var(x,axis = 0)\n #Normalize\n x_normalized = (x - sample_mean) / (np.sqrt(sample_var + eps))\n #scale and shift.\n out = x_normalized * gamma + beta\n #Update running mean and variance.\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_var\n #Save the sample mean and variance as cache for backprop.\n cache = (x_normalized,np.power(sample_var + eps,-0.5),gamma)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n #Normalize with running mean and var.\n x_normalized = (x - running_mean) / (np.sqrt(running_var + eps))\n #scale and shift.\n out = gamma * x_normalized + beta\n #Save the sample mean and variance as cache for backprop.\n cache = (x_normalized,np.power(running_var + eps,-0.5),gamma)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def forward(ctx, input, gamma, beta, eps=1e-5):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n ####### Forward pass of batch normalization ######\n\n # In this section, we have to perform the forward pass of batch normalization\n # with more intermediate steps, since we want to propagate error terms. \n # To illustrate it better, we began from the bottom and follow our way to the top.\n # In that way, we unfolded every function step by step.\n\n # Step 3.2.3: Calculate variance\n var = input.var(dim=0, unbiased=False)\n\n # Step 3.2.2: add eps for numerical stability, then sqrt\n sqrt_var = torch.sqrt(var + eps)\n\n # Step 3.2: ivert sqrtwar\n inv_sqrt_var = 1./sqrt_var\n\n # Step 3.1.1: Calculate mean\n mean = input.mean(dim=0)\n\n # Step 3.1: subtract mean vector of every trainings example\n input_mean = input - mean\n\n # Step 3 - Execute normalization\n input_norm = input_mean * inv_sqrt_var \n\n # Step 2: Nor the two transformation steps\n scaled_input_norm = gamma * input_norm\n\n # Step 1: scale and shift\n out = scaled_input_norm + beta\n #################################################\n # store tensors and non-tensorial constants\n ctx.save_for_backward(gamma, inv_sqrt_var, mean, input)\n ctx.foo = eps\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def _batch_norm(inputs, decay = 0.999, center = True, scale = False, epsilon = 0.001, \n\t\t\t\tmoving_vars = 'moving_vars', activation = None, is_training = None, \n\t\t\t\ttrainable = True, restore = True, scope = None, reuse = None):\n inputs_shape = inputs.get_shape()\n with tf.variable_op_scope([inputs], scope, 'BatchNorm', reuse = reuse):\n axis = list(range(len(inputs_shape) - 1))\n params_shape = inputs_shape[-1:]\n beta, gamma = None, None\n\n if center:\n beta = _variable_on_cpu('beta', params_shape, tf.zeros_initializer)\n if scale:\n gamma = _variable_on_cpu('gamma', params_shape, tf.ones_initializer)\n\n # moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES]\n moving_mean = _variable_on_cpu('moving_mean', params_shape,tf.zeros_initializer, trainable = False)\n # tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, moving_mean)\n moving_variance = _variable_on_cpu('moving_variance', params_shape, tf.ones_initializer, trainable = False)\n # tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, moving_variance)\n \n def train_phase():\n mean, variance = tf.nn.moments(inputs, axis)\n update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)\n update_moving_variance = moving_averages.assign_moving_average(moving_variance, \n variance, decay)\n with tf.control_dependencies([update_moving_mean, update_moving_variance]):\n return tf.identity(mean), tf.identity(variance)\n\n def test_phase():\n return moving_mean, moving_variance\t\n\n mean, variance = tf.cond(is_training, train_phase, test_phase)\n outputs = tf.nn.batch_normalization(inputs, mean, variance, beta, gamma, epsilon)\n outputs.set_shape(inputs.get_shape()) \n\n if activation:\n outputs = activation(outputs)\n\n return outputs", "def PostTrainingStepUpdate(self, global_step):\n p = self.params\n # Get sufficient stats that accumulates over microbatches.\n counts = self.accumulators.counts.GetValue()\n mean_ss = self.accumulators.mean_ss.GetValue()\n variance_ss = self.accumulators.variance_ss.GetValue()\n # Compute batch mean and batch variance from sufficient stats\n mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)\n decay = tf.convert_to_tensor(1.0 - p.decay, p.dtype)\n # Update moving_mean, moving_variance from batch mean and batch variance.\n with tf.name_scope(p.name) as scope:\n with tf.colocate_with(self.vars.moving_mean):\n mean_update = tf.assign_sub(\n self.vars.moving_mean,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_mean - tf.cast(mean, p.dtype)) * decay,\n tf.zeros_like(self.vars.moving_mean)),\n name='moving_mean_update')\n with tf.colocate_with(self.vars.moving_variance):\n var_update = tf.assign_sub(\n self.vars.moving_variance,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_variance - tf.cast(variance, p.dtype)) *\n decay, tf.zeros_like(self.vars.moving_variance)),\n name='moving_variance_update')\n py_utils.CheckNumerics(\n self.vars.moving_mean,\n 'moving mean of {} failed numeric check'.format(scope))\n py_utils.CheckNumerics(\n self.vars.moving_variance,\n 'moving variance of {} failed numeric check'.format(scope))\n self.accumulators.counts.Reset()\n self.accumulators.mean_ss.Reset()\n self.accumulators.variance_ss.Reset()\n return tf.group(mean_update, var_update)", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def forward_pass(self):\n # Compute the support set's mean and var and use these as the moments for\n # batch norm on the query set.\n train_embeddings = self.embedding_fn(self.episode.train_images,\n self.is_training)\n self.train_embeddings = train_embeddings['embeddings']\n support_set_moments = None\n if not self.transductive_batch_norm:\n support_set_moments = train_embeddings['moments']\n test_embeddings = self.embedding_fn(\n self.episode.test_images,\n self.is_training,\n moments=support_set_moments,\n backprop_through_moments=self.backprop_through_moments)\n self.test_embeddings = test_embeddings['embeddings']", "def _update_normalize_params(self, x):\n # Flatten x to be a tensor of embeddings.\n assert x.size()[-1:] == self.mean.size()\n x_flattened = x.view(-1, x.size(-1))\n\n # Update mean.\n x_mean = x_flattened.mean(dim=0)\n self.mean.data = (\n self.normalize_decay_rate * self.mean.data\n + (1.0 - self.normalize_decay_rate) * x_mean\n )\n\n # Update variance.\n x_var = ((x_flattened - self.mean) ** 2).mean(dim=0)\n self.var.data = (\n self.normalize_decay_rate * self.var.data\n + (1.0 - self.normalize_decay_rate) * x_var\n )", "def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)", "def batch_norm(\n input,\n running_mean,\n running_var,\n weight,\n bias,\n training=False,\n momentum=0.1,\n eps=1e-5,\n):\n return FunctionLib.apply(\n 'BatchNorm', input.device,\n [input, weight, bias, running_mean, running_var],\n axis=1, epsilon=eps, use_stats=int(not training),\n momentum=1.0 - momentum)", "def fit(self, data):\n batch_count = data.shape[0]\n batch_mu = np.mean(data, axis=0, keepdims=True)\n batch_var = np.var(data, axis=0, keepdims=True)\n new_mean, new_var, new_count = self.running_mean_var_from_batch(batch_mu, batch_var, batch_count)\n #sigma[sigma < 1e-8] = 1.0\n self.mu.load(new_mean)\n self.var.load(new_var)\n self.count.load(new_count)\n self.fitted = True\n self.cache()", "def batch_norm(x, phase_train, scope='bn', affine=True):\n\n with tf.variable_scope(scope):\n og_shape = x.get_shape().as_list()\n if len(og_shape) == 2:\n x = tf.reshape(x, [-1, 1, 1, og_shape[1]])\n shape = x.get_shape().as_list()\n beta = tf.Variable(tf.constant(0.0, shape=[shape[-1]]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[shape[-1]]),\n name='gamma', trainable=affine)\n\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.9)\n ema_apply_op = ema.apply([batch_mean, batch_var])\n ema_mean, ema_var = ema.average(batch_mean), ema.average(batch_var)\n\n def mean_var_with_update():\n \"\"\"Summary\n Returns\n -------\n name : TYPE\n Description\n \"\"\"\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema_mean, ema_var))\n\n normed = tf.nn.batch_norm_with_global_normalization(\n x, mean, var, beta, gamma, 1e-3, affine)\n if len(og_shape) == 2:\n normed = tf.reshape(normed, [-1, og_shape[-1]])\n return normed", "def _normalize_feature(self, feature):\n\n for ic in range(self.data_shape[0]):\n feature[ic] = (feature[ic] - self.feature_mean[ic]\n ) / self.feature_std[ic]\n return feature", "def BatchNormalize(S):\n mu = np.mean(S, axis=0)\n v = np.mean((S-mu)**2, axis=0)\n S = (S - mu) / np.sqrt(v + epsilon)\n return S", "def batchnorm_forward(x, gamma, beta, bn_param):\n\tmode = bn_param['mode']\n\teps = bn_param.get('eps', 1e-5)\n\tmomentum = bn_param.get('momentum', 0.9)\n\n\tN, D = x.shape\n\trunning_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n\trunning_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n\tout, cache = None, None\n\tif mode == 'train':\n\t\t# normalize data\n\t\tmu = np.mean(x, axis=0)\n\t\tvar = np.var(x, axis=0)\n\t\tnormalized = (x-mu)/np.sqrt(var+eps)\n\t\tout = gamma*normalized + beta\n\t\t# Update running mean and variance\n\t\trunning_mean = momentum*running_mean + (1 - momentum)*mu\n\t\trunning_var = momentum*running_var + (1 - momentum)*var\n\t\t# Cache for backwards pass\n\t\tcache = (x, normalized, gamma, beta, mu, var, eps)\n\telif mode == 'test':\n\t\t# normalize data using running mean and variance from training\n\t\tnormalized = (x - running_mean)/np.sqrt(running_var+eps)\n\t\tout = gamma*normalized + beta\n\telse:\n\t\traise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n\t# Store the updated running means back into bn_param\n\tbn_param['running_mean'] = running_mean\n\tbn_param['running_var'] = running_var\n\n\treturn out, cache", "def train(self, data_source, batch_size=4096):\n self.mean, self.std_dev = stats_batchwise(data_source, batch_size)", "def normalizeData(meanAndStd, dataset):\n\n for i in range(len(dataset)):\n for j in range(len(dataset[i])-1):\n mean = meanAndStd[j][\"mean\"]\n std = meanAndStd[j][\"std\"]\n dataset[i][j] = (dataset[i][j] - mean)/std", "def __preprocess(data, sample_size: int = 200000):\n mean = data[:sample_size].mean(axis=0)\n data -= mean\n stdev = data[:sample_size].std(axis=0)\n data /= stdev\n return data", "def update(self, batch_mean, batch_var, batch_count):\n delta = batch_mean - self.mean\n new_count = (self.count + batch_count)\n new_mean = self.mean + delta * (batch_count / new_count)\n new_var = self.count * self.var + batch_count * batch_var\n new_var += (delta**2) * self.count * batch_count / new_count\n new_var /= new_count\n self.count.copy_(new_count)\n self.mean.copy_(new_mean)\n self.var.copy_(new_var)\n self.std = torch.sqrt(self.var)", "def Batchnorm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True, labels=None, n_labels=None):\n if axes != [0,2,3]:\n raise Exception('unsupported')\n batch_mean, batch_var = tf.nn.moments(inputs, axes, keep_dims=True)\n shape = batch_mean.get_shape().as_list() # shape is [1,n,1,1]\n offset_m = lib.param(name+'.offset', np.zeros([n_labels,shape[1]], dtype='float32'))\n scale_m = lib.param(name+'.scale', np.ones([n_labels,shape[1]], dtype='float32'))\n offset = tf.nn.embedding_lookup(offset_m, labels)\n # offset = tf.Print(offset,['offset',offset])\n scale = tf.nn.embedding_lookup(scale_m, labels)\n # scale = tf.Print(scale,['scale',scale])\n\n moving_mean = lib.param(name + '.moving_mean', np.zeros(batch_mean.get_shape(), dtype='float32'), trainable=False)\n moving_variance = lib.param(name + '.moving_variance', np.ones(batch_var.get_shape(), dtype='float32'),trainable=False)\n\n def _batch_norm_training():\n return tf.nn.batch_normalization(inputs, batch_mean, batch_var, offset[:,:,None,None], scale[:,:,None,None], 1e-5)\n\n def _batch_norm_inference():\n # Version which blends in the current item's statistics\n mean = moving_mean[None, :, None, None]\n var = moving_variance[None, :, None, None]\n '''\n batch_size = tf.cast(tf.shape(inputs)[0], 'float32')\n mean, var = tf.nn.moments(inputs, [2,3], keep_dims=True)\n mean = ((1./batch_size)*mean) + (((batch_size-1.)/batch_size)*moving_mean)[None,:,None,None]\n var = ((1./batch_size)*var) + (((batch_size-1.)/batch_size)*moving_variance)[None,:,None,None]\n '''\n return tf.nn.batch_normalization(inputs, mean, var, offset[:,:,None,None], scale[:,:,None,None],\n 1e-5), mean, var\n\n if is_training is None:\n outputs = _batch_norm_training()\n else:\n if is_training:\n outputs = _batch_norm_training()\n else:\n outputs = _batch_norm_inference()\n\n if update_moving_stats:\n no_updates = lambda: outputs\n\n def _force_updates():\n \"\"\"Internal function forces updates moving_vars if is_training.\"\"\"\n float_stats_iter = tf.cast(stats_iter, tf.float32)\n update_moving_mean = tf.assign(moving_mean,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_mean) + (\n (1 / (float_stats_iter + 1)) * batch_mean))\n update_moving_variance = tf.assign(moving_variance,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_variance) + (\n (1 / (float_stats_iter + 1)) * batch_var))\n with tf.control_dependencies([update_moving_mean, update_moving_variance]):\n return tf.identity(outputs)\n\n if is_training:\n outputs = _force_updates()\n else:\n outputs = no_updates()\n\n return outputs", "def compute_means(opts, train_data, sampler):\n exp_names = train_data[\"exp_names\"].value\n means = []\n stds = []\n if opts[\"flags\"].normalize is True:\n running_stats = []\n # a running stat for each channel\n running_stats = RunningStats(3)\n # loop over the experiments\n\n # for exp_name in exp_names:\n for j in range(0, len(exp_names), 2):\n batch = sampler.get_minibatch()\n exp_name = batch[2][0]\n print(exp_name)\n # loop over the keys\n\n seq_len = train_data[\"exps\"][exp_name][\"labels\"].shape[0]\n temp_feat = batch[0].cpu().numpy()\n temp_feat = temp_feat[:seq_len, :, :, :]\n\n channel_feats = []\n for i in range(3):\n # channel_feat = temp_feat[0, :, i, :]\n # sample frames\n channel_feat = temp_feat[::100, i, :]\n channel_feat = channel_feat.reshape(-1, 1)\n channel_feats.append(channel_feat)\n\n channel_feats = np.concatenate(channel_feats, axis=1)\n running_stats.add_data(\n channel_feat\n )\n\n means = running_stats.mean.tolist()\n stds = running_stats.compute_std().tolist()\n else:\n means = [.5, .5, .5]\n stds = [1, 1, 1]\n # for key in opts[\"flags\"].feat_keys:\n # temp_feat = train_data[\"exps\"][exp_names[0]][key].value\n # mean = np.zeros((temp_feat.shape[2], ))\n # std = np.ones((temp_feat.shape[2], ))\n # means.append(mean)\n # stds.append(std)\n normalize = transforms.Normalize(mean=means,\n std=stds)\n\n return normalize", "def batch_norm_template(inputs, is_training, scope, moments_dims, bn_ema_decay):\n with tf.variable_scope(scope) as sc:\n num_channels = inputs.get_shape()[-1].value\n\n beta = tf.Variable(tf.constant(0.0, shape=[num_channels]), name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]), name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')\n\n decay = bn_ema_decay if bn_ema_decay is not None else 0.9\n ema = tf.train.ExponentialMovingAverage(decay=decay)\n # Operator that maintains moving averages of variables.\n ema_apply_op = tf.cond(is_training,\n true_fn=lambda: ema.apply([batch_mean, batch_var]),\n false_fn=lambda: tf.no_op())\n\n # Update moving average and return current batch's avg and var.\n def mean_var_with_update():\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n # ema.average returns the Variable holding the average of var.\n mean, var = tf.cond(is_training,\n true_fn=mean_var_with_update,\n false_fn=lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)\n return normed", "def normalize_data(batch_data):\n B, N, C = batch_data.shape\n normal_data = np.zeros((B, N, C))\n for b in range(B):\n pc = batch_data[b]\n centroid = np.mean(pc, axis=0)\n pc = pc - centroid\n m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))\n pc = pc / m\n normal_data[b] = pc\n return normal_data", "def feature_normalization(train, test):\n (N,p) = np.shape(train)\n mins = np.amin(train,axis=0)\n maxs = np.amax(train,axis=0) + mins\n train = (train + mins)/maxs\n test = (test + mins)/maxs\n return train, test", "def batch_norm(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def normalize(img, mean, std, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n mean = paddle.to_tensor(mean, place=img.place)\n std = paddle.to_tensor(std, place=img.place)\n\n if _is_channel_first(data_format):\n mean = mean.reshape([-1, 1, 1])\n std = std.reshape([-1, 1, 1])\n\n return (img - mean) / std", "def batch_norm(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def train(data, model, opt):\r\n ravg = RunningAverage()\r\n model.train()\r\n for step, task in enumerate(data):\r\n y_mean, y_std = model(task['x_context'], task['y_context'], task['x_target'])\r\n obj = -gaussian_logpdf(task['y_target'], y_mean, y_std, 'batched_mean')\r\n obj.backward()\r\n opt.step()\r\n opt.zero_grad()\r\n ravg.update(obj.item() / data.batch_size, data.batch_size)\r\n return ravg.avg", "def batch_norm(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.compat.v1.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)", "def batch_norm(x, n_out, phase_train, scope='bn'):\n with tf.variable_scope(scope):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.99)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def normalise(self):\n fitness_sum = np.sum(self.fitness)\n for i in range(self.loops):\n self.normalised_fitness[i] = self.fitness[i] / fitness_sum", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def sync_batch_norm(\n input,\n running_mean,\n running_var,\n weight,\n bias,\n training=False,\n momentum=0.1,\n eps=1e-5,\n process_group=None,\n):\n if process_group is None:\n kwargs = locals()\n kwargs.pop('process_group')\n return batch_norm(**kwargs)\n return FunctionLib.apply(\n 'SyncBatchNorm', input.device,\n [input, weight, bias, running_mean, running_var],\n axis=1, epsilon=eps, use_stats=int(not training),\n momentum=1.0 - momentum, **process_group.arguments)", "def train_step(self, batch_sample, epoch_it):\n batch_x = batch_sample['waveform']\n data_type = batch_sample['data_type']\n batch_target = {\n 'ov': batch_sample['ov'],\n 'sed': batch_sample['sed_label'],\n 'doa': batch_sample['doa_label'],\n }\n if self.cuda:\n batch_x = batch_x.cuda(non_blocking=True)\n batch_target['sed'] = batch_target['sed'].cuda(non_blocking=True)\n batch_target['doa'] = batch_target['doa'].cuda(non_blocking=True)\n\n\n self.optimizer.zero_grad()\n self.af_extractor.train()\n self.model.train()\n\n (batch_x, batch_target) = self.af_extractor((batch_x, batch_target,'train', data_type))\n batch_x = (batch_x - self.mean) / self.std\n if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':\n pred, pred_constraint = self.model(batch_x)\n if self.cfg['training']['model'] == 'EINV2':\n pred = self.model(batch_x)\n if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':\n loss_dict = self.losses.calculate_attention(pred, pred_constraint,batch_target, epoch_it,self.model)\n if self.cfg['training']['model'] == 'EINV2':\n loss_dict = self.losses.calculate(pred, batch_target, epoch_it, self.model)\n\n loss_dict[self.cfg['training']['loss_type']].backward(retain_graph=False)\n self.optimizer.step()\n\n self.train_losses['train_loss_all'] += loss_dict['all'].item()\n self.train_losses['train_loss_sed'] += loss_dict['sed'].item()\n self.train_losses['train_loss_doa'] += loss_dict['doa'].item()\n\n if self.cfg['training']['weight_constraints']:\n self.train_losses['train_loss_weight_orthogonal'] += loss_dict['loss_weight_orthogonal'].item()\n\n if self.cfg['training']['weight_constraints_1']:\n self.train_losses['train_loss_weight_orthogonal_1'] += loss_dict['loss_weight_orthogonal_1'].item()\n\n if self.cfg['training']['layer_constraints']:\n self.train_losses['train_loss_layer_orthogonal'] += loss_dict['loss_layer_orthogonal'].item()\n\n if self.cfg['training']['layer_constraints_1']:\n self.train_losses['train_loss_layer_orthogonal_1'] += loss_dict['loss_layer_orthogonal_1'].item()\n\n if self.cfg['training']['smoothness_loss']:\n self.train_losses['train_loss_doa_smoothness'] += loss_dict['loss_doa_smoothness'].item()", "def batch_norm(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)", "def apply(self,\n x,\n batch_stats=None,\n use_running_average=False,\n axis=-1,\n momentum=0.99,\n epsilon=1e-5,\n dtype=jnp.float32,\n bias=True,\n scale=True,\n bias_init=initializers.zeros,\n scale_init=initializers.ones,\n axis_name=None,\n axis_index_groups=None):\n x = jnp.asarray(x, jnp.float32)\n axis = axis if isinstance(axis, tuple) else (axis,)\n axis = _absolute_dims(x.ndim, axis)\n feature_shape = tuple(d if i in axis else 1 for i, d in enumerate(x.shape))\n reduced_feature_shape = tuple(d for i, d in enumerate(x.shape) if i in axis)\n reduction_axis = tuple(i for i in range(x.ndim) if i not in axis)\n if self.is_stateful() or batch_stats:\n ra_mean = self.state('mean', reduced_feature_shape,\n initializers.zeros, collection=batch_stats)\n ra_var = self.state('var', reduced_feature_shape,\n initializers.ones, collection=batch_stats)\n else:\n ra_mean = None\n ra_var = None\n\n if use_running_average:\n if ra_mean is None:\n raise ValueError('when use_running_averages is True '\n 'either use a stateful context or provide batch_stats')\n mean, var = ra_mean.value, ra_var.value\n else:\n mean = jnp.mean(x, axis=reduction_axis, keepdims=False)\n mean2 = jnp.mean(lax.square(x), axis=reduction_axis, keepdims=False)\n if axis_name is not None and not self.is_initializing():\n concatenated_mean = jnp.concatenate([mean, mean2])\n mean, mean2 = jnp.split(\n lax.pmean(\n concatenated_mean,\n axis_name=axis_name,\n axis_index_groups=axis_index_groups), 2)\n var = mean2 - lax.square(mean)\n\n if ra_mean and not self.is_initializing():\n ra_mean.value = momentum * ra_mean.value + (1 - momentum) * mean\n ra_var.value = momentum * ra_var.value + (1 - momentum) * var\n\n y = x - mean.reshape(feature_shape)\n mul = lax.rsqrt(var + epsilon)\n if scale:\n mul = mul * self.param(\n 'scale', reduced_feature_shape, scale_init).reshape(feature_shape)\n y = y * mul\n if bias:\n y = y + self.param(\n 'bias', reduced_feature_shape, bias_init).reshape(feature_shape)\n return jnp.asarray(y, dtype)", "def BatchNorm(X): # (X - mu) / sigma -> Have to implement trainable parameters gamma and beta on this\n epsilon = 0.001 # To prevent overflow and ensure numerical stability\n bn = (X - torch.mean(X)) / (torch.std(X)+epsilon)\n sigma.append(torch.std(X)+epsilon)\n return bn", "def test_train_DA_split_maybe_normalize(X, settings):\n\n\n M, n = DataLoader.get_dim_X(X, settings)\n\n hist_idx = int(M * settings.HIST_FRAC)\n hist_X = X[: hist_idx] #select historical data (i.e. training set in ML terminology)\n # that will be used for normalize\n\n #use only the training set to calculate mean and std\n mean = np.mean(hist_X, axis=0)\n std = np.std(hist_X, axis=0)\n\n #Some std are zero - set the norm to 1 in this case so that feature is zero post-normalization\n std = np.where(std <= 0., 1, std)\n\n\n if settings.NORMALIZE:\n X = (X - mean)\n X = (X / std)\n\n\n # Split X into historical and present data. We will\n # assimilate \"observations\" at a single timestep t_DA\n # which corresponds to the control state u_c\n # We will take initial condition u_0, as mean of historical data\n\n t_DA = M - (settings.TDA_IDX_FROM_END + 1) #idx of Data Assimilation\n assert t_DA >= hist_idx, (\"Cannot select observation from historical data.\"\n \"Reduce HIST_FRAC or reduce TDA_IDX_FROM_END to prevent overlap.\\n\"\n \"t_DA = {} and hist_idx = {}\".format(t_DA, hist_idx))\n assert t_DA > hist_idx, (\"Test set cannot have zero size\")\n\n train_X = X[: hist_idx]\n test_X = X[hist_idx : t_DA]\n u_c = X[t_DA] #control state (for DA)\n\n\n if settings.SHUFFLE_DATA:\n set_seeds()\n np.random.shuffle(train_X)\n np.random.shuffle(test_X)\n\n\n return train_X, test_X, u_c, X, mean, std", "def _bootstrap_step(self, mean_dict: dict) -> None:\n torch.no_grad()\n running_metrics = {metric.__name__: [] for metric in self.metrics}\n\n for sample_step in range(self.bootstrap_size):\n batch_x, batch_y, input_lengths, target_lengths = next(iter(self.data_loader))\n\n batch = (batch_x, batch_y, input_lengths, target_lengths)\n batch = self._recursive_to_cuda(batch) # move to GPU\n batch_x, batch_y, input_lengths, target_lengths = batch\n\n model_output = self.model(batch_x, input_lengths)\n\n self._compute_running_metrics(model_output, batch, running_metrics)\n\n self._fill_mean_dict(running_metrics, mean_dict)", "def _compute_mean_std(self, sum_, ssum, size):\n assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'\n mean = sum_ / size\n sumvar = ssum - sum_ * mean\n unbias_var = sumvar / (size - 1)\n bias_var = sumvar / size\n\n self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data\n self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data\n\n return mean, bias_var.clamp(self.eps) ** -0.5", "def normalize(self):\n self._data /= self.norm()", "def compute_mean(self):\n # load_in_all_parameters(self.save_directory, self.auto_encoder)\n for i, data_row in enumerate(self.X_train_naive):\n input_nn = data_row\n if torch.cuda.is_available():\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda())\n else:\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n\n prediction_embedding = self.model.forward_only_encoding(input_nn)\n print(prediction_embedding)\n if i == 0:\n self.embedding_np = prediction_embedding.data.clone().cpu().numpy()[0]\n else:\n self.embedding_np = np.vstack((self.embedding_np, prediction_embedding.data.clone().cpu().numpy()[0]))\n self.mean_embedding = np.average(self.embedding_np, axis=0)\n print('mean embedding is ', self.mean_embedding)", "def modelmean(self, model_params, this_data, this_suff_stat):\n pass", "def forward(self, input):\n\n # transform input into matrix of vectors [Batch, 784]\n input = input.view(-1, 784).to(self.device)\n\n # full pass\n mean, std = self.encoder(input)\n z = self.reparameterize(mean, std)\n output = self.decoder(z)\n\n # calculate each part of the loss\n reg_loss = self.reg_loss(mean, std)\n recon_loss = self.recon_loss(input, output)\n\n # avarage over batch\n average_negative_elbo = torch.mean(reg_loss + recon_loss, dim=0)\n return average_negative_elbo", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def featureNormalization(X):\n mean=np.hstack(np.mean(X[:,0]),np.mean(X[:,1]),np.mean(X[:,2]))\n std=np.hstack(np.std(X[:,0]),np.std(X[:,1]),np.std(X[:,2]))\n \n X_norm = (X - mean)/std\n \n return X_norm", "def batch_normalization(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 30)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('l1_reg', 0.00)\n hp('l2_reg', 0.0001)\n hp('patience', 5000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(hp.batch_size, 28 * 28), input_var=input_var, name='Input')\n # Batch Normalization\n l_bn1 = BatchNormalization(incoming=l_in, name='Batch Normalization 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_bn1, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 1')\n # Batch Normalization\n l_bn2 = BatchNormalization(incoming=l_hid1, name='Batch Normalization 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_bn2, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 2')\n # Batch Normalization\n l_bn3 = BatchNormalization(incoming=l_hid2, name='Batch Normalization 3')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_bn3, n_class=10, l1=hp.l1_reg,\n l2=hp.l2_reg, name='Logistic regression')\n\n # Create network and add layers\n net = Network('mlp with batch normalization')\n net.add(l_in)\n net.add(l_bn1)\n net.add(l_hid1)\n net.add(l_bn2)\n net.add(l_hid2)\n net.add(l_bn3)\n net.add(l_out)\n\n return net, hp", "def normalize_feature_data(feature, X_train, X_valid, X_test):\r\n if type(feature) == list:\r\n for i, f in enumerate(feature):\r\n \r\n if f in __normalizing_features__:\r\n stds = np.std(X_train[i], axis=0)\r\n stds[stds==0.0] = 1.0\r\n means = np.mean(X_train[i], axis=0)\r\n X_train[i] = (X_train[i]-means)/stds\r\n X_valid[i] = (X_valid[i]-means)/stds\r\n X_test[i] = (X_test[i]-means)/stds\r\n else:\r\n if feature in __normalizing_features__:\r\n stds = np.std(X_train, axis=0)\r\n stds[stds==0.0] = 1.0\r\n means = np.mean(X_train, axis=0)\r\n X_train = (X_train-means)/stds\r\n X_valid = (X_valid-means)/stds\r\n X_test = (X_test-means)/stds\r\n \r\n return X_train, X_valid, X_test", "def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std", "def preprocess(self, data, scope):\n if scope != 'train':\n # reshape\n data = self._data_reshape(data)\n\n # normalize\n if data.dtype == np.int16:\n start_unit = -1000\n end_unit = 300\n data = 2 * (data.astype(np.float32) - start_unit) / (end_unit - start_unit) - 1\n\n # subtract train mean and divide by train std\n if scope == 'train':\n self.mean = np.mean(data)\n data -= self.mean\n self.std = np.std(data)\n data /= self.std\n else:\n data -= self.mean\n data /= self.std\n\n # reshape for channel\n s = data.shape\n if len(data.shape) == 4:\n data = data.reshape((s[0], s[1], s[2], s[3], 1))\n else:\n data = data.reshape((s[0], s[1], s[2], 1))\n return data", "def rescale_data(self):\n\n # Dividing every array of simulated data vectors by the mean of that array.\n '''# Didnt work\n for key in self.data.keys():\n self.data[key] /= np.mean(self.data[key])\n '''\n\n self.rescaled = True\n\n # Mean normalization\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.mean(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Median normalization\n \"\"\" didnt work, still dividing by large number \n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Divide by median\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.median(self.data[key]))\n \"\"\"\n\n # Take logarithm of data\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] = np.log10(self.data[key])\n \"\"\"\n\n # Scale by length of vector\n \"\"\"\n for key in self.data.keys():\n self.data[key] /= np.linalg.norm(self.Cl_noiseless)\n \"\"\"\n\n \n # Scale by negative of the natural logarithm \n for key in self.data.keys():\n self.data[key] = -1 * np.log(self.data[key]) \n \n \"\"\"\n # Scale by subtracting the mean and dividing by std\n std = np.nanstd(self.data['data'])\n mean = np.nanmean(self.data['data'])\n for key in self.data.keys():\n # self.data[key] -= np.log(self.Cl_noiseless) # -1* # scale this same way\n # self.data[key] -= self.Cl_noiseless # -1* # scale this same way\n self.data[key] -= mean \n self.data[key] /= std\n \"\"\"", "def test_batch_norm_fold(self):\n inputs = tf.keras.Input(shape=(32, 32, 3,))\n conv = tf.keras.layers.Conv2D(32, (3, 3))(inputs)\n bn = tf.keras.layers.BatchNormalization(fused=True)(conv, training=False)\n relu = tf.nn.relu(bn)\n model = tf.keras.Model(inputs=inputs, outputs=relu)\n\n np.random.seed(0)\n w_shape = model.layers[0].input.shape\n numpy_data = np.random.rand(1, w_shape[1], w_shape[2], w_shape[3]).astype(np.float32)\n\n baseline_output = model(numpy_data)\n\n _, model = fold_all_batch_norms(model)\n output_after_fold = model(numpy_data)\n\n assert np.allclose(baseline_output, output_after_fold, atol=1.e-4)", "def normalize_train_data(self, data_vector, clf_type = \"generic\"):\n\t\tassert(clf_type in [\"generic\", \"specific\"])\n\n\t\tif clf_type == \"generic\":\n\t\t\tself.mean_per_dim_generic = []\n\t\t\tmean_per_dim = self.mean_per_dim_generic\n\t\t\tself.std_per_dim_generic = []\n\t\t\tstd_per_dim = self.std_per_dim_generic\n\t\telse:\n\t\t\tself.mean_per_dim_specific = []\n\t\t\tmean_per_dim = self.mean_per_dim_specific\n\t\t\tself.std_per_dim_specific = []\n\t\t\tstd_per_dim = self.std_per_dim_specific\n\n\t\tper_dim = zip(*data_vector)\n\n\t\tfor i in xrange(len(per_dim)):\n\t\t\n\t\t\tm = np.float64(sum (per_dim[i]) / float (len(per_dim[i])))\n\t\t\ts = np.std(per_dim[i])\n\t\t\tper_dim[i] -= m\n\t\t\tif s>0:\n\t\t\t\tper_dim[i] /= s\n\t\t\n\t\t\tmean_per_dim.append(m)\n\t\t\tstd_per_dim.append(s)\n\t\n\t\tdata_vector = zip(*per_dim)\n\t\tfor i in xrange(len(data_vector)):\n\t\t\tdata_vector[i] = list(data_vector[i])\n\n\t\treturn data_vector", "def epoch_diagnostics(self, train_loss, train_err, test_loss, test_err):\n m = self.nbatches\n logging.info(\"Epoch diagnostics computation\")\n\n layernum = 0\n layer_gradient_norm_sqs = []\n gavg_norm_acum = 0.0\n gavg_acum = []\n for group in self.param_groups:\n for p in group['params']:\n\n layer_gradient_norm_sqs.append([])\n gavg = self.state[p]['gavg'].cpu()\n gavg_acum.append(gavg.numpy())\n gavg_norm_acum += gavg.norm()**2 #torch.dot(gavg, gavg)\n layernum += 1\n\n gradient_norm_sqs = []\n vr_step_variance = []\n cos_acums = []\n variances = []\n\n for batch_id in range(m):\n norm_acum = 0.0\n ginorm_acum = 0.0\n vr_acum = 0.0\n layernum = 0\n cos_acum = 0.0\n var_acum = 0.0\n for group in self.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n\n gktbl = param_state['gktbl']\n gavg = param_state['gavg'].type_as(p.data).cpu()\n\n gi = gktbl[batch_id, :]\n var_norm_sq = (gi-gavg).norm()**2 #torch.dot(gi-gavg, gi-gavg)\n norm_acum += var_norm_sq\n ginorm_acum += gi.norm()**2 #torch.dot(gi, gi)\n layer_gradient_norm_sqs[layernum].append(var_norm_sq)\n\n gktbl_old = param_state['gktbl_old']\n gavg_old = param_state['gavg_old'].type_as(p.data).cpu()\n gi_old = gktbl_old[batch_id, :]\n #pdb.set_trace()\n vr_step = gi - gi_old + gavg_old\n vr_acum += (vr_step - gavg).norm()**2 #torch.dot(vr_step - gavg, vr_step - gavg)\n cos_acum += torch.sum(gavg*gi)\n\n var_acum += (gi - gavg).norm()**2\n\n layernum += 1\n gradient_norm_sqs.append(norm_acum)\n vr_step_variance.append(vr_acum)\n cosim = cos_acum/math.sqrt(ginorm_acum*gavg_norm_acum)\n #pdb.set_trace()\n cos_acums.append(cosim)\n variances.append(var_acum)\n\n variance = sum(variances)/len(variances)\n\n print(\"mean cosine: {}\".format(sum(cos_acums)/len(cos_acums)))\n\n #pdb.set_trace()\n\n with open('stats/{}fastdiagnostics_epoch{}.pkl'.format(self.test_name, self.epoch), 'wb') as output:\n pickle.dump({\n 'train_loss': train_loss,\n 'train_err': train_err,\n 'test_loss': test_loss,\n 'test_err': test_err,\n 'epoch': self.epoch,\n #'layer_gradient_norm_sqs': layer_gradient_norm_sqs,\n #'gradient_norm_sqs': gradient_norm_sqs,\n #'vr_step_variance': vr_step_variance,\n #'cosine_distances': cos_acums,\n #'variances': variances,\n 'variance': variance,\n #'gavg_norm': gavg_norm_acum,\n #'gavg': gavg_acum,\n #'iterate_distances': self.inrun_iterate_distances,\n #'grad_distances': self.inrun_grad_distances,\n }, output)\n print(\"Epoch diagnostics saved\")\n #pdb.set_trace()\n\n self.inrun_iterate_distances = []\n self.inrun_grad_distances = []", "def batch_normal(x, is_train, name, activation_fn=None):\n with tf.name_scope(name), tf.variable_scope(name):\n outputs = tf.contrib.layers.batch_norm(x,\n decay=0.999,\n scale=True,\n activation_fn=activation_fn,\n is_training=is_train)\n return outputs", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay=None, affine=True):\n with tf.variable_scope(scope) as sc:\n if len(moments_dims) == 1:\n num_channels = inputs.get_shape()[-1].value\n elif len(moments_dims) == 3:\n if 1 in moments_dims:\n # NHWC order\n num_channels = inputs.get_shape()[-1].value\n else:\n # NCHW order\n num_channels = inputs.get_shape()[1].value\n else:\n raise ValueError('custom_batch_norm_act suppose len(moments_dim) is either 1 or 3: moments_dim={}\\n'.format(moments_dim))\n\n beta = _get_variable('beta', [num_channels], initializer=tf.zeros_initializer, dtype=tf.float32)\n gamma = _get_variable('gamma', [num_channels], initializer=tf.ones_initializer, dtype=tf.float32)\n\n batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')\n decay = bn_decay if bn_decay is not None else 0.9\n ema = tf.train.ExponentialMovingAverage(decay=decay)\n # Operator that maintains moving averages of variables.\n ema_apply_op = tf.cond(is_training,\n lambda: ema.apply([batch_mean, batch_var]),\n lambda: tf.no_op())\n\n # Update moving average and return current batch's avg and var.\n def mean_var_with_update():\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n # ema.average returns the Variable holding the average of var.\n mean, var = tf.cond(is_training,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n\n if _SHOW_VAR_SUMMARY:\n tf.summary.histogram('beta', beta)\n tf.summary.histogram('gamma', gamma)\n tf.summary.histogram('mean', mean)\n tf.summary.histogram('var', var)\n\n normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)\n\n return normed", "def __call__(self, x, **kwargs):\n argument.check_unexpected_kwargs(\n kwargs, test='test argument is not supported anymore. '\n 'Use chainer.using_config')\n finetune, = argument.parse_kwargs(kwargs, ('finetune', False))\n\n if hasattr(self, 'gamma'):\n gamma = self.gamma\n else:\n with cuda.get_device_from_id(self._device_id):\n gamma = variable.Variable(self.xp.ones(\n self.avg_mean.shape, dtype=x.dtype))\n\n if hasattr(self, 'beta'):\n beta = self.beta\n else:\n with cuda.get_device_from_id(self._device_id):\n beta = variable.Variable(self.xp.zeros(\n self.avg_mean.shape, dtype=x.dtype))\n\n if configuration.config.train:\n if finetune:\n self.N += 1\n decay = 1. - 1. / self.N\n else:\n decay = self.decay\n\n ret = functions.batch_normalization(\n x, gamma, beta, eps=self.eps, running_mean=self.avg_mean,\n running_var=self.avg_var, decay=decay, axis=self.axis)\n else:\n # Use running average statistics or fine-tuned statistics.\n mean = variable.Variable(self.avg_mean)\n var = variable.Variable(self.avg_var)\n ret = functions.fixed_batch_normalization(\n x, gamma, beta, mean, var, self.eps, axis=self.axis)\n return ret", "def norm_data(self):\n if (self.nrows, self.ncolumns) < self.data.shape:\n self.data = self.data[0:self.nrows, 0:self.ncolumns]\n if self.data.dtype != np.float64:\n self.data = self.data.astype(np.float64)\n self.meanval = self.data.mean()\n self.stdval = self.data.std()", "def _train(self):\n epoch_training_time = 0\n epoch_metrics_time = 0\n self.epoch_ += 1\n for i_batch, sample_batched in enumerate(self.dataloader):\n self.global_step_ += 1\n batch_start_time = time.time()\n data_sample = sample_batched[0].to(self.device)\n\n # Get model samples, either from replay buffer or noise.\n if self.model_samples_ is None:\n self.model_samples_ = deque(\n [\n self.net_.sample_from_prior(\n data_sample.shape[0], device=self.device\n ).detach()\n ]\n )\n elif len(self.model_samples_) > self.max_replay:\n self.model_samples_.popleft()\n replay_sample = random.choices(\n self.model_samples_,\n # favor more recent samples:\n weights=list(range(1, len(self.model_samples_) + 1)),\n )[0]\n noise_sample = self.net_.sample_from_prior(\n replay_sample.shape[0], device=self.device\n )\n mask = torch.rand(replay_sample.shape[0]) < self.replay_prob\n while len(mask.shape) < len(replay_sample.shape):\n # Add extra feature-dims\n mask.unsqueeze_(dim=-1)\n\n model_sample = torch.where(\n mask.to(self.device), replay_sample, noise_sample\n )\n\n self.net_.eval()\n # Run at least one iteration\n model_sample = self.net_.sample_fantasy(\n model_sample,\n num_mc_steps=self.num_mc_steps,\n mc_dynamics=self.sampler,\n ).detach()\n\n self.model_samples_.append(model_sample)\n\n # Sanity checks:\n assert (\n data_sample.shape[1:] == self.net_.input_shape\n ), \"Data is incompatible with network.\"\n assert (\n model_sample.shape[1:] == data_sample.shape[1:]\n ), \"Model and data samples are incompatible.\"\n\n # Forward gradient:\n self.net_.train()\n self.net_.zero_grad()\n data_energy_mean = self.net_(data_sample).mean()\n model_energy = self.net_(model_sample)\n model_energy_mean = model_energy.mean()\n\n # Estimate the odds of the data's energy based on a normal fitted to\n # model samples:\n data_erf = torch.erf(\n (data_energy_mean - model_energy_mean) / model_energy.std()\n )\n\n objective = data_energy_mean - model_energy_mean\n objective.backward()\n torch.nn.utils.clip_grad.clip_grad_value_(self.net_.parameters(), 1e2)\n self.optimizer_.step()\n\n batch_training_time = time.time() - batch_start_time\n epoch_training_time += batch_training_time\n self.logger_(energy_diff=float(objective))\n self.logger_(data_erf=float(data_erf))\n\n tr_metrics_start_time = time.time()\n for callback in self.step_callbacks:\n callback(\n net=self.net_,\n data_sample=data_sample,\n model_sample=model_sample,\n epoch=self.epoch_,\n global_step=self.global_step_,\n validation=False,\n )\n tr_metrics_time = time.time() - tr_metrics_start_time\n epoch_metrics_time += tr_metrics_time\n if self.verbose:\n print(\n f\"on epoch {self.epoch_}, batch {i_batch}, data erf: {data_erf}, objective: {objective}\"\n )\n print(f\"model energy: {model_energy_mean} +- {model_energy.std()}\")\n print(f\"data energy: {data_energy_mean}\")\n print(\n f\"training time: {batch_training_time:0.3f}s, metrics time: {tr_metrics_time:0.3f}s\"\n )\n means = self.logger_.means()\n if self.verbose:\n print(f\"on epoch {self.epoch_}\")\n for k, v in means.items():\n print(f\"{k}: {v}\")\n self.logger_.flush()\n means[\"loss\"] = energy_model.utils.constraints.add_soft_constraint(\n means[\"loss_ais\"], means[\"data_erf\"], lower_bound=-1\n )\n return means", "def forward(self, x, alpha=1e-8):\r\n batch_size, _, height, width = x.shape\r\n\r\n # [B x C x H x W] Subtract mean over batch.\r\n y = x - x.mean(dim=0, keepdim=True)\r\n\r\n # [1 x C x H x W] Calc standard deviation over batch\r\n y = torch.sqrt(y.pow(2.).mean(dim=0, keepdim=False) + alpha)\r\n\r\n # [1] Take average over feature_maps and pixels.\r\n y = y.mean().view(1, 1, 1, 1)\r\n\r\n # [B x 1 x H x W] Replicate over group and pixels.\r\n y = y.repeat(batch_size, 1, height, width)\r\n\r\n # [B x C x H x W] Append as new feature_map.\r\n y = torch.cat([x, y], 1)\r\n\r\n # return the computed values:\r\n return y", "def normalize(self, mean=None, std=None):\n if mean is None:\n mean = self.mean\n if std is None:\n std = self.std\n\n new = self.copy()\n new.data = (new.data - mean) / std\n return new", "def __train_projection__(self):\n\n copied_train_data = np.copy(self.train_data)\n\n for curr_train_sample in tqdm(copied_train_data,\n disable=not self.verbose,\n postfix=f'Model training...'):\n\n assert len(curr_train_sample.shape) == 1, \\\n f'Flatten your input! Now dim is: {curr_train_sample.shape}'\n\n self.weights += curr_train_sample.reshape(-1, 1) @ self.__get_inverse_flatten__(curr_train_sample)\n\n print(self.weights)\n # self.weights = self.weights / len(self.train_data)\n\n print(self.weights)", "def __call__(self,\n inputs,\n use_running_stats = None,\n weights = None):\n use_running_stats = nn.module.merge_param(\n \"use_running_stats\", self.use_running_stats, use_running_stats)\n\n # Normalization is independent per spin per channel.\n num_spins, num_channels = inputs.shape[-2:]\n feature_shape = (1, 1, 1, num_spins, num_channels)\n reduced_feature_shape = (num_spins, num_channels)\n\n initializing = not self.has_variable(\"batch_stats\", \"variance\")\n\n running_variance = self.variable(\"batch_stats\", \"variance\",\n lambda s: jnp.ones(s, jnp.float32),\n reduced_feature_shape)\n\n if self.centered:\n running_mean = self.variable(\"batch_stats\", \"mean\",\n lambda s: jnp.zeros(s, jnp.complex64),\n reduced_feature_shape)\n\n if use_running_stats:\n variance = running_variance.value\n if self.centered:\n mean = running_mean.value\n else:\n # Compute the spherical mean over the spherical grid dimensions, then a\n # conventional mean over the batch.\n if self.centered:\n mean = sphere_utils.spin_spherical_mean(inputs)\n mean = jnp.average(mean, axis=0, weights=weights)\n # Complex variance is E[x x*] - E[x]E[x*].\n # For spin != 0, E[x] should be zero, although due to discretization this\n # is not always true. We only use E[x x*] here.\n # E[x x*]:\n mean_abs_squared = sphere_utils.spin_spherical_mean(inputs *\n inputs.conj())\n mean_abs_squared = jnp.average(mean_abs_squared, axis=0, weights=weights)\n # Aggregate means over devices.\n if self.axis_name is not None and not initializing:\n if self.centered:\n mean = lax.pmean(mean, axis_name=self.axis_name)\n mean_abs_squared = lax.pmean(mean_abs_squared, axis_name=self.axis_name)\n\n # Imaginary part is negligible.\n variance = mean_abs_squared.real\n\n if not initializing:\n running_variance.value = (self.momentum * running_variance.value +\n (1 - self.momentum) * variance)\n if self.centered:\n running_mean.value = (self.momentum * running_mean.value +\n (1 - self.momentum) * mean)\n\n if self.centered:\n outputs = inputs - mean.reshape(feature_shape)\n else:\n outputs = inputs\n\n factor = lax.rsqrt(variance.reshape(feature_shape) + self.epsilon)\n if self.use_scale:\n scale = self.param(\"scale\",\n self.scale_init,\n reduced_feature_shape).reshape(feature_shape)\n factor = factor * scale\n\n outputs = outputs * factor\n\n if self.use_bias:\n bias = self.param(\"bias\",\n self.bias_init,\n reduced_feature_shape).reshape(feature_shape)\n outputs = outputs + bias\n\n return outputs", "def batchForwardBackward(self, batch, learningRate, ass, dErrors, bundles):\n\n # reset bundles\n for layerIndex in range(1, len(bundles)):\n for bundle in bundles[layerIndex]:\n self.layers[layerIndex].resetBundle(bundle)\n\n # forwardBackward on each sample, and sum up modifications suggested by the gradients\n iterations = 0\n sumLoss = 0\n for indexInBatch, (sample, label, labelScalar) in enumerate(batch):\n iterations += 1\n sumLoss += self.forwardBackward(sample, label, indexInBatch, ass, bundles)\n meanLoss = sumLoss/iterations\n\n # modify weigths and biases according to previous backpropagations\n for layerIndex in range(1, len(bundles)):\n self.layers[layerIndex].update(layerIndex, ass, learningRate, bundles[layerIndex])\n\n return meanLoss", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n sample_mean = np.mean(x, axis = 0)\n sample_var = np.var(x , axis = 0)\n x_hat = (x - sample_mean) / (np.sqrt(sample_var + eps))\n out = gamma * x_hat + beta\n cache = (gamma, x, sample_mean, sample_var, eps, x_hat)\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_var\n elif mode == 'test':\n scale = gamma / (np.sqrt(running_var + eps))\n out = x * scale + (beta - running_mean * scale)\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def _step(self):\n # Make a minibatch of training data\n num_train = self.X_train.shape[0]\n # random choose the samples\n batch_mask = np.random.choice(num_train, self.batch_size)\n X_batch = self.X_train[batch_mask]\n y_batch = self.y_train[batch_mask]\n\n # Compute loss and gradient\n loss, grads = self.model.loss(X_batch, y_batch)\n self.loss_history.append(loss)\n\n # Perform a parameter update\n for p, w in self.model.params.items():\n dw = grads[p]\n config = self.optim_configs[p]\n next_w, next_config = self.update_rule(w, dw, config)\n self.model.params[p] = next_w\n self.optim_configs[p] = next_config", "def fit(self, data):\n\n \"\"\"YOUR CODE HERE \"\"\"\n # unormalized data\n un_st = np.concatenate([datum[\"observations\"] for datum in data])\n un_stp1 = np.concatenate([datum[\"next_observations\"] for datum in data])\n un_at = np.concatenate([datum[\"actions\"] for datum in data])\n \n # normalize data\n n_st = (un_st-self.mean_obs)/(self.std_obs+self.epsilon)\n n_at = (un_at-self.mean_action)/(self.std_action+self.epsilon)\n n_stat = np.concatenate([n_st,n_at],axis=1)\n \n n_delta = ((un_stp1-un_st)-self.mean_deltas)/(self.std_deltas+self.epsilon)\n\n # make a shuffle row of whole data to be used\n N = n_delta.shape[0]\n train_indicies = np.arange(N)\n np.random.shuffle(train_indicies)\n # train over the whole data set for the number of iterations\n for i in range(self.iterations):\n for i in range(int(math.ceil(N/self.batch_size))):\n # index for the batch points from a random row\n start_idx = i*self.batch_size%N\n idx = train_indicies[start_idx:start_idx+self.batch_size]\n # choose the batch\n feed_dict = {self.st_at : n_stat[idx,:], self.delta_ : n_delta[idx,:]}\n # train the data\n self.sess.run(self.update_op, feed_dict=feed_dict)", "def fit_batch(self, batch):\n if self.auto_mix_prec:\n\n self.wav2vec_optimizer.zero_grad()\n self.model_optimizer.zero_grad()\n\n with torch.cuda.amp.autocast():\n outputs = self.compute_forward(batch, sb.Stage.TRAIN)\n loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)\n\n self.scaler.scale(loss).backward()\n self.scaler.unscale_(self.wav2vec_optimizer)\n self.scaler.unscale_(self.model_optimizer)\n\n if self.check_gradients(loss):\n self.scaler.step(self.wav2vec_optimizer)\n self.scaler.step(self.adam_optimizer)\n\n self.scaler.update()\n else:\n outputs = self.compute_forward(batch, sb.Stage.TRAIN)\n\n loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)\n loss.backward()\n\n if self.check_gradients(loss):\n self.wav2vec_optimizer.step()\n self.model_optimizer.step()\n\n self.wav2vec_optimizer.zero_grad()\n self.model_optimizer.zero_grad()\n\n return loss.detach()", "def _special_handle_batchnorm(cls, op, X, W):\n # for singa, x, scale, bias is input\n # and mean and var is attribute\n # so we add the mean and var to W\n tensor_list = []\n append_inputs = {\"mean\": op.running_mean, \"var\": op.running_var}\n for tmp_name, append_input in append_inputs.items():\n node_name = op.name + \":\" + tmp_name\n append_input = tensor.to_numpy(tensor.from_raw_tensor(append_input))\n tensor_list.append(numpy_helper.from_array(append_input, node_name))\n return tensor_list", "def normalize_image(self):\n # The image normalization is identical to Cloud TPU ResNet.\n self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)\n offset = tf.constant(DATASET_MEAN)\n offset = tf.expand_dims(offset, axis=0)\n offset = tf.expand_dims(offset, axis=0)\n self._image -= offset\n\n scale = tf.constant(DATASET_VAR)\n scale = tf.expand_dims(scale, axis=0)\n scale = tf.expand_dims(scale, axis=0)\n self._image /= scale", "def divide_by_std_across_trials(self):\n if not hasattr(self, 'mean_across_trials_subtracted_data'):\n self.subtract_mean_across_trials()\n self.std_across_trials_divided_data = \\\n self.mean_across_trials_subtracted_data / \\\n np.std(self.mean_across_trials_subtracted_data,\n axis=1, keepdims=True)", "def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std", "def normalize(array, inplace=False):\n if inplace:\n array -= ds_mean\n array /= ds_std\n else:\n array = (array - ds_mean) / ds_std\n return array", "def batch_norm_act(self, inputs, training, UseAct=True):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n logstr = ''\n if self.bn:\n if DEBUG_TMP:\n training = True\n global_step = tf.train.get_or_create_global_step()\n batch_norm_decay = self.bn_decay_fn(global_step)\n inputs = tf.layers.batch_normalization(\n inputs=inputs, axis=1 if self.data_format == 'channels_first' else -1,\n momentum=batch_norm_decay , epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)\n logstr += 'BN'\n\n if UseAct:\n if self.act == 'Relu':\n act_fn = tf.nn.relu\n elif self.act == 'Lrelu':\n act_fn = tf.nn.leaky_relu\n else:\n raise NotImplementedError\n inputs = act_fn(inputs)\n logstr += ' '+self.act\n\n if self.IsShowModel: self.log('%30s'%(logstr))\n return inputs", "def train_step(model, model_0, mu:int, optimizer, train_data, loss_f):\n \n total_loss=0\n \n for idx, (features,labels) in enumerate(train_data):\n \n optimizer.zero_grad()\n \n predictions= model(features)\n \n loss=loss_f(predictions,labels)\n loss+=mu/2*difference_models_norm_2(model,model_0)\n total_loss+=loss\n \n loss.backward()\n optimizer.step()\n \n return total_loss/(idx+1)", "def feature_normalize(X):\n X_mean = np.mean(X, axis=0)\n X_std = np.std(X, axis=0)\n X_std[0, 0] = 1\n X_normalize = (X - X_mean) / X_std\n X_normalize[:, 0] = 1.0\n return X_normalize, X_mean, X_std", "def _NormalizeStep(self, theta, inputs, paddings, state0, state1):\n if isinstance(self.norm, bn_layers.GroupNormLayer):\n inputs, paddings, norm_state1 = self.norm.StreamStep(\n theta.norm, inputs, paddings, state0.norm_state)\n state1.norm_state = norm_state1\n elif isinstance(self.norm, bn_layers.BatchNormLayer):\n inputs = self.norm.FProp(theta.norm, inputs)\n elif isinstance(self.norm, layers.LayerNorm):\n inputs = self.norm.FProp(theta.norm, inputs)\n else:\n raise NotImplementedError(\n 'Only bn_layers.GroupNormLayer, layers.LayerNorm are supported.')\n # [b, t, d]\n return inputs, paddings", "def _localNormalizeData(self,values,names,feat):\n self.muAndSigmaFeatures[feat] = (0.0,1.0)", "def apply_batch_normalization(self, layer):\n if type(layer) is not BatchNormalization:\n raise ValueError('The `layer` must be neoml.Dnn.BatchNormalization.')\n\n self._internal.apply_batch_normalization(layer._internal)", "def batch_norm(in_tensor, phase_train, name, reuse=None, data_format='NHWC', center=True, scale=True):\n axis = -1 if data_format == 'NHWC' else 1\n with tf.variable_scope(name):\n # return tf.contrib.layers.batch_norm(in_tensor, is_training=phase_train, scope=scope, reuse=reuse)\n return tf.layers.batch_normalization(in_tensor, axis=axis, center=center, scale=scale, training=phase_train,\n reuse=reuse, fused=True, momentum=0.99, epsilon=1e-1)", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def train_batch(self,X_batch,Y_batch):\n\n average_loss = 0\n for x, y in zip(X_batch, Y_batch):\n datum_loss = self.train_datum(x,y)\n average_loss += datum_loss / self.batch_size\n\n # Update weights on all layers after processing the batch\n for l in self.layers:\n l.update_weights()\n\n return average_loss", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def compute_training_stats():\n means, stds = [], []\n data = SUNRGBDTrainDataset(True)\n for i in range(len(data)):\n print(i)\n img, _ = data[i]\n std, mean = t.std_mean(input=img, dim=(1, 2))\n means.append(mean)\n stds.append(std)\n means = t.sum(t.vstack(means), dim=0) / len(means)\n stds = t.sum(t.vstack(stds), dim=0) / len(stds)\n print(means, stds)", "def _update_mean(self, i):\n if self.predef_beta is not None:\n beta = self.predef_beta\n else:\n beta = self.mean_z_evalf[i] / self.std_dev_z_evalf[i]\n self.beta.append(beta)\n self.chance.append(stats.norm.cdf(float(self.beta[i])))\n\n # determine alpha i and adapt mean values\n self.alpha_i.append([])\n self.mean_i.append([])\n for j in range(len(self.std_dev)):\n self.alpha_i[i].append(self.partial_dev__std_dev[i][j] / self.std_dev_z[i].subs(self.subs_mean[i]).evalf())\n self.mean_i[i + 1].append(self.mean_i[0][j] - self.alpha_i[i][j] * beta * self.std_dev[j])", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def normalize_mean(dataset):\n normalized_dataset = np.array(dataset)\n return normalized_dataset - np.mean(normalized_dataset)", "def train_loop(args, train_dataset, dev_dataset, global_mean=0.0, test_dataset=None):\n mirrored_strategy = tf.distribute.MirroredStrategy()\n with mirrored_strategy.scope():\n # build model\n user_ids = keras.Input(shape=(), dtype=tf.int32, name=\"user_id\")\n movie_ids = keras.Input(shape=(), dtype=tf.int32, name=\"movie_id\")\n item_bin_ids = keras.Input(shape=(), dtype=tf.int32, name=\"item_time_bias\")\n user_time_dev = keras.Input(shape=(), dtype=tf.float32, name=\"user_time_dev\")\n batch_score = MF_Netflix(args.user_count, args.item_count, args.hidden_dim, global_mean)(\\\n [user_ids, movie_ids, item_bin_ids, user_time_dev])\n model = keras.Model(inputs={\"user_id\":user_ids, \"movie_id\":movie_ids, \\\n \"item_time_bias\": item_bin_ids, \"user_time_dev\": user_time_dev}, \\\n outputs=batch_score)\n # build the model train setting\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n args.learning_rate,\n decay_steps=20000,\n decay_rate=0.96,\n staircase=True)\n optimizer = keras.optimizers.Adam(args.learning_rate)\n #optimizer = keras.optimizers.RMSprop(args.learning_rate)\n #optimizer = keras.optimizers.SGD(args.learning_rate)\n loss = keras.losses.MeanSquaredError()\n metrics = [keras.metrics.MeanSquaredError()]\n model.compile(optimizer, loss=loss, metrics=metrics)\n # make the training loop and evaluation\n checkpoint_callback = keras.callbacks.ModelCheckpoint(\\\n filepath=args.model_path, save_best_only=True, save_weights_only=True)\n tensorbaord_callback = keras.callbacks.TensorBoard(log_dir=args.summary_dir, \\\n histogram_freq=1)\n steps_per_epoch = args.steps_per_epoch\n model.fit(train_dataset, epochs=args.epochs, \\\n callbacks=[checkpoint_callback, tensorbaord_callback], \\\n validation_data=dev_dataset, steps_per_epoch=steps_per_epoch, \\\n validation_steps=args.val_steps)" ]
[ "0.73375744", "0.7016016", "0.69402426", "0.6930308", "0.6890035", "0.6870713", "0.6845096", "0.6766321", "0.67575425", "0.6714171", "0.67051464", "0.6647627", "0.65995526", "0.65970504", "0.65763676", "0.6518246", "0.6511042", "0.6459731", "0.64524907", "0.64098454", "0.63874143", "0.63809377", "0.6324262", "0.63154703", "0.62997943", "0.6298279", "0.6289356", "0.6287313", "0.62735456", "0.62627816", "0.6216657", "0.62077403", "0.61601394", "0.6149308", "0.61439615", "0.61397576", "0.6138808", "0.6134038", "0.6124527", "0.6121532", "0.61213845", "0.6109123", "0.61030513", "0.60955167", "0.609144", "0.6085476", "0.6076095", "0.6075412", "0.60751057", "0.6064521", "0.60497075", "0.6044496", "0.60444397", "0.60336506", "0.60336506", "0.60284543", "0.6026881", "0.6011414", "0.60078704", "0.60033965", "0.5997762", "0.5990121", "0.5990114", "0.59896755", "0.59612113", "0.5960117", "0.5948551", "0.59377015", "0.5933423", "0.59142447", "0.59129024", "0.5909677", "0.5906254", "0.5894378", "0.58796376", "0.5865605", "0.5865598", "0.5856616", "0.58538824", "0.5849451", "0.58470845", "0.5834366", "0.5833414", "0.58320016", "0.58287644", "0.5825648", "0.5816585", "0.5815039", "0.5813061", "0.58019656", "0.58005476", "0.5799409", "0.5799002", "0.5786612", "0.57829034", "0.5777111", "0.5775946", "0.5775693", "0.5775536", "0.577158" ]
0.6935189
3
Backward pass for batch normalization. For this implementation, you should write out a computation graph for batch normalization on paper and propagate gradients backward through intermediate nodes.
def batchnorm_backward(dout, cache): dx, dgamma, dbeta = None, None, None ########################################################################### # TODO: Implement the backward pass for batch normalization. Store the # # results in the dx, dgamma, and dbeta variables. # # Referencing the original paper (https://arxiv.org/abs/1502.03167) # # might prove to be helpful. # ########################################################################### x, mu, sigma, gamma, beta = cache N = dout.shape[0] X_mu = x - mu var_inv = 1./sigma dX_norm = dout * gamma dvar = np.sum(dX_norm * X_mu,axis=0) * -0.5 * sigma**(-3) dmu = np.sum(dX_norm * -var_inv ,axis=0) + dvar * 1/N * np.sum(-2.* X_mu, axis=0) dx = (dX_norm * var_inv) + (dmu / N) + (dvar * 2/N * X_mu) dbeta = np.sum(dout, axis=0) dgamma = np.sum(dout * X_mu/sigma, axis=0) ########################################################################### # END OF YOUR CODE # ########################################################################### return dx, dgamma, dbeta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n ###########################################################################\n #Extract x mean variance gamma and beta from cache.\n x_norm,inv_var,gamma = cache\n N = x_norm.shape[0]\n #Compute gradients of gamma and beta first,these are the simplest.\n dgamma = np.sum(dout * (x_norm),axis = 0)\n dbeta = np.sum(dout,axis = 0)\n #Now run backprop to compute dx.\n dx_normalized = gamma * dout\n #Move another step backward in graph towards x-E[x],there are 2 cases in this backward pass\n #TODO-Write this shorter,now the code is trivial.\n '''NEW code'''\n #Compute derivate of mean from norm.\n sub_mean = x_norm / inv_var\n derived_mean = np.zeros_like(inv_var)\n derived_mean += -1 * np.sum(dx_normalized * inv_var,axis = 0)\n derived_var = np.sum((-0.5 * (sub_mean * np.power(inv_var,3))) * dx_normalized,axis = 0)\n derived_mean += (-2 / N) * np.sum(sub_mean,axis = 0)\n #End of computing the differentiation of the mean.\n final_dx = np.zeros_like(x_norm)\n #Derivation of x_norm by x.\n final_dx += dx_normalized * inv_var\n #mean by x.\n final_dx += derived_mean * (1 / N)\n final_dx += derived_var * (2 / N) * sub_mean\n '''End of experiment code.'''\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return final_dx, dgamma, dbeta", "def forward(ctx, input, gamma, beta, eps=1e-5):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n ####### Forward pass of batch normalization ######\n\n # In this section, we have to perform the forward pass of batch normalization\n # with more intermediate steps, since we want to propagate error terms. \n # To illustrate it better, we began from the bottom and follow our way to the top.\n # In that way, we unfolded every function step by step.\n\n # Step 3.2.3: Calculate variance\n var = input.var(dim=0, unbiased=False)\n\n # Step 3.2.2: add eps for numerical stability, then sqrt\n sqrt_var = torch.sqrt(var + eps)\n\n # Step 3.2: ivert sqrtwar\n inv_sqrt_var = 1./sqrt_var\n\n # Step 3.1.1: Calculate mean\n mean = input.mean(dim=0)\n\n # Step 3.1: subtract mean vector of every trainings example\n input_mean = input - mean\n\n # Step 3 - Execute normalization\n input_norm = input_mean * inv_sqrt_var \n\n # Step 2: Nor the two transformation steps\n scaled_input_norm = gamma * input_norm\n\n # Step 1: scale and shift\n out = scaled_input_norm + beta\n #################################################\n # store tensors and non-tensorial constants\n ctx.save_for_backward(gamma, inv_sqrt_var, mean, input)\n ctx.foo = eps\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward_backward(self, data_batch):\n self.forward(data_batch, is_train=True)\n self.backward()\n if self.use_l2norm_grad_clip:\n # 2-Norm Grad Clip\n self.l2norm_grad_clip()", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n #######################################################################\n #Compute mean and variance of each element of the data.\n sample_mean = np.mean(x,axis = 0)\n sample_var = np.var(x,axis = 0)\n #Normalize\n x_normalized = (x - sample_mean) / (np.sqrt(sample_var + eps))\n #scale and shift.\n out = x_normalized * gamma + beta\n #Update running mean and variance.\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_var\n #Save the sample mean and variance as cache for backprop.\n cache = (x_normalized,np.power(sample_var + eps,-0.5),gamma)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n #Normalize with running mean and var.\n x_normalized = (x - running_mean) / (np.sqrt(running_var + eps))\n #scale and shift.\n out = gamma * x_normalized + beta\n #Save the sample mean and variance as cache for backprop.\n cache = (x_normalized,np.power(running_var + eps,-0.5),gamma)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def batchForwardBackward(self, batch, learningRate, ass, dErrors, bundles):\n\n # reset bundles\n for layerIndex in range(1, len(bundles)):\n for bundle in bundles[layerIndex]:\n self.layers[layerIndex].resetBundle(bundle)\n\n # forwardBackward on each sample, and sum up modifications suggested by the gradients\n iterations = 0\n sumLoss = 0\n for indexInBatch, (sample, label, labelScalar) in enumerate(batch):\n iterations += 1\n sumLoss += self.forwardBackward(sample, label, indexInBatch, ass, bundles)\n meanLoss = sumLoss/iterations\n\n # modify weigths and biases according to previous backpropagations\n for layerIndex in range(1, len(bundles)):\n self.layers[layerIndex].update(layerIndex, ass, learningRate, bundles[layerIndex])\n\n return meanLoss", "def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n #############################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return dx, dgamma, dbeta", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n Xshape = x.shape\n\n if len(Xshape) > 2: #deal with 2d inputs\n N,C,H,W = x.shape\n x = np.swapaxes(x,1,3)\n D = C\n x = np.reshape(x,[N*H*W,C])\n else:\n N = x.shape[0]\n x = np.reshape(x,[N,-1])\n _, D = x.shape\n\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n mu = np.mean(x,axis=0)\n var = np.var(x, axis=0)\n x_norm = (x - mu)/np.sqrt(var + eps)\n out = gamma * x_norm + beta\n running_mean = momentum*running_mean + (1-momentum)*mu\n running_var = momentum*running_var + (1-momentum)*var\n cache = (x_norm, gamma, np.sqrt(var + eps))\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n x_norm = (x - running_mean)/np.sqrt(running_var + eps)\n out = gamma * x_norm + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n if len(Xshape) > 2:\n out = np.reshape(out,[N,W,H,C])\n out = np.swapaxes(out,1,3)\n else:\n out = np.reshape(out,Xshape)\n return out, cache", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n \n mu = np.mean(x, axis=0)\n var = np.var(x, axis=0)\n sigma = np.sqrt(var+eps)\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n out = gamma * (x - mu)/sigma + beta\n #out = (x - mu)/sigma\n #out = out * gamma.T + beta.T\n #print(gamma.shape)\n #out = out * gamma + beta\n #print(out.shape)\n \n running_mean = momentum * running_mean + (1 - momentum) * mu\n running_var = momentum * running_var + (1 - momentum) * (var+eps)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n out = (x - running_mean) / np.sqrt(running_var) * gamma + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n cache = (x, mu, sigma, gamma, beta)\n return out, cache", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n \n sample_mean = np.mean(x, axis=0)\n sample_variance = np.var(x, axis=0)\n \n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_variance\n \n num = x - sample_mean\n denom = np.sqrt(sample_variance + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n \n cache = (gamma, x_hat, num, denom, eps, sample_variance)\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n num = x - running_mean\n denom = np.sqrt(running_var + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n \n Dshape = dout.shape\n x_norm, gamma, sigma = cache\n\n if len(Dshape) > 2: #deal with 2d inputs\n N,C,H,W = dout.shape\n dout = np.swapaxes(dout,1,3)\n D = C\n dout = np.reshape(dout,[N*H*W,C])\n else:\n dout = np.reshape(dout,[dout.shape[0],-1])\n N, D = x_norm.shape\n\n dgamma = np.sum(dout * x_norm, axis=0)\n dbeta = np.sum(dout, axis=0)\n dx = 1/N*(gamma/sigma)*(N*dout - dbeta - x_norm*dgamma)\n\n if len(Dshape) > 2:\n dx = np.reshape(dx,[N,W,H,C])\n dx = np.swapaxes(dx,1,3)\n else:\n dx = np.reshape(dx,Dshape)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n gamma, x_hat, num, denom, eps, sample_variance = cache\n N, D = dout.shape\n \n dbeta = np.sum(dout, axis=0)\n dyx_hat = dout\n dgamma = np.sum(dyx_hat*x_hat, axis=0)\n dx_hat = gamma*dyx_hat\n ddenom = np.sum(num*dx_hat, axis=0)\n dmu1 = (1/denom)*dx_hat\n dsqvar = ddenom*(-1)*(1/(denom**2))\n dvar = 0.5*((sample_variance+eps)**(-0.5))*dsqvar\n dsq = (1/N)*np.ones((N,D))*dvar\n dmu2 = 2*num*dsq\n dmu = (-1)*np.sum(dmu1+dmu2, axis=0)\n dx1 = dmu1 + dmu2\n dx2 = (1/N)*np.ones((N,D))*dmu\n dx = dx1+dx2\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def forward_backward(self, data_batch):\n total_feature, total_label = self.forward(data_batch, is_train=True)\n self.backward_all(total_feature, total_label)", "def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n ###########################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should#\n # be very short; ours is less than five lines. #\n ###########################################################################\n s_cache,shape_x = cache\n reshaped_dout = np.reshape(dout,(-1,dout.shape[1]))\n dx_reshaped,dgamma,dbeta = batchnorm_backward_alt(reshaped_dout,s_cache)\n dx = np.reshape(dx_reshaped,shape_x)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def affine_batchnorm_relu_backward(dout, cache):\n fc_cache, norm_cache, relu_cache = cache\n d_norm_out = relu_backward(dout, relu_cache)\n d_affine_out, dgamma, dbeta = batchnorm_backward_alt(d_norm_out, norm_cache)\n dx, dw, db = affine_backward(d_affine_out, fc_cache)\n return dx, dw, db, dgamma, dbeta", "def affine_batchnorm_relu_backward(dout, cache):\n af_cache, bf_cache, relu_cache = cache\n \n dbf_out = relu_backward(dout, relu_cache)\n daf_out, dgamma, dbeta = batchnorm_backward(dbf_out, bf_cache)\n dx, dw, db = affine_backward(daf_out, af_cache)\n return dx, dw, db, dgamma, dbeta", "def convert_batch_norm(g, op, block):\n\n ipt_name = op.input(\"X\")[0]\n scale_name = op.input(\"Scale\")[0]\n bias_name = op.input(\"Bias\")[0]\n mean_name = op.input(\"Mean\")[0]\n variance_name = op.input(\"Variance\")[0]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.batch_norm(\n g.get_node(ipt_name),\n g.get_node(scale_name),\n g.get_node(bias_name),\n g.get_node(mean_name),\n g.get_node(variance_name),\n epsilon=epsilon,\n )\n g.add_node(op.output(\"Y\")[0], out[0])", "def batchnorm_backward_alt(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # #\n # After computing the gradient with respect to the centered inputs, you #\n # should be able to compute gradients with respect to the inputs in a #\n # single statement; our implementation fits on a single 80-character line.#\n ###########################################################################\n N = dout.shape[0]\n x_norm,inv_var,gamma = cache\n dgamma = np.sum(dout * x_norm,axis = 0)\n dbeta = np.sum(dout,axis = 0)\n #Simplified calculation of dx.\n dx_normalized = dout * gamma\n dx = (1 / N) * inv_var * (N * dx_normalized - np.sum(dx_normalized,axis = 0) \\\n - x_norm * np.sum(dx_normalized * x_norm,axis = 0)) \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def backward(ctx, grad_output):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n # Retrieve saved tensors and constants\n gamma, ivar, mean, input = ctx.saved_tensors\n eps = ctx.saved_tensors\n\n # Check which inputs need gradients\n input_needs_grad, gamma_needs_grad, beta_needs_grad = ctx.needs_input_grad\n\n # Get the batch size (=N)\n N, _ = grad_output.shape\n\n # reconstruct the input_norm\n input_norm = (input - mean) * ivar\n grand_input_norm = grad_output * gamma\n\n ##### Gradient wrt beta #####\n grad_beta = grad_output.sum(dim=0) if beta_needs_grad else None\n\n #### Gradient wrt gamma ####\n grad_gamma = (input_norm*grad_output).sum(dim=0) if gamma_needs_grad else None\n \n #### Gradient wrt input ####\n term1 = N*grand_input_norm \n term2 = torch.sum(grand_input_norm, dim=0)\n term3 = input_norm*torch.sum(grand_input_norm*input_norm, dim=0)\n grad_input = (1. / N) * ivar * (term1 - term2 - term3) if input_needs_grad else None\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n # return gradients of the three tensor inputs and None for the constant eps\n return grad_input, grad_gamma, grad_beta, None", "def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)", "def backward(self, loss):\n global_timer.my_timer.start_profile(\"BWD\")\n mgr = PatrickStarManager()\n mgr.set_training_stage(TrainingStage.BWD)\n\n for param_fp16 in self.client.chunk_based_param_fp16:\n param_fp16.ps_attr.bwd_used_cnt = 0\n\n self.optimizer.zero_grad()\n if self.loss_scaler:\n self.loss_scaler.backward(loss)\n else:\n loss.backward()\n mgr.update_margin_mem()\n global_timer.my_timer.finish_profile(\"BWD\")", "def batchnorm_backward_alt(dout, cache):\n dx, dgamma, dbeta = None, None, None\n #############################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # #\n # After computing the gradient with respect to the centered inputs, you #\n # should be able to compute gradients with respect to the inputs in a #\n # single statement; our implementation fits on a single 80-character line. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n return dx, dgamma, dbeta", "def backwardPropagation(self, inputs, label, loss, node_hidden, node_output):\n err = node_output\n err[range(inputs.shape[0]), label] -= 1\n err = err / inputs.shape[0]\n \"\"\"Back propagate to hidden layer\"\"\"\n del_output_W = np.dot(node_hidden.T, err)\n \"\"\"Back propagate to input layer\"\"\"\n del_input_W = np.dot(err, self.hidden_W.T)\n \"\"\" Use Relu function\"\"\"\n del_input_W[node_hidden <= 0] = 0\n del_input_W = np.dot(inputs.T, del_input_W)\n \"\"\"Penalize the error with regularizer value\"\"\"\n del_input_W = del_input_W + self.regularizer * self.input_W\n del_output_W = del_output_W + self.regularizer * self.hidden_W\n \"\"\"Store the error value into the weight value\"\"\"\n self.input_W += -self.lr * del_input_W\n self.hidden_W += -self.lr * del_output_W", "def spatial_batchnorm_backward(dout, cache):\r\n \tN, C, H, W = dout.shape\r\n dout_new = dout.transpose(0, 2, 3, 1).reshape(N*H*W, C)\r\n dx, dgamma, dbeta = batchnorm_backward(dout_new, cache)\r\n dx = dx.reshape(N, H, W, C).transpose(0, 3, 1, 2)\r\n\r\n return dx, dgamma, dbeta", "def backward_D(self):\n base_function._unfreeze(self.net_D)\n #print(self.input_P2.shape, self.img_gen.shape)\n self.loss_dis_img_gen = self.backward_D_basic(self.net_D, self.input_P2, self.img_gen)", "def on_batch_end(self, state: _State):\n if not state.need_backward_pass:\n return\n\n loss = state.batch_metrics[self.loss_key]\n optimizer = self._optimizer\n\n self._accumulation_counter += 1\n need_gradient_step = \\\n (self._accumulation_counter + 1) % self.accumulation_steps == 0\n\n # This is very hacky check whether we have AMP optimizer and this may\n # change in future.\n # But alternative solution is to have AmpOptimizerCallback.\n # or expose another c'tor argument.\n if hasattr(optimizer, \"_amp_stash\"):\n from apex import amp\n # Need to set ``delay_unscale``\n # according to\n # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations\n delay_unscale = not need_gradient_step\n with amp.scale_loss(\n loss, optimizer, delay_unscale=delay_unscale\n ) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n if need_gradient_step:\n self.grad_step(\n optimizer=optimizer,\n optimizer_wds=self._optimizer_wd,\n grad_clip_fn=self.grad_clip_fn\n )\n\n # if self.save_model_grads:\n # for tag, value in model.named_parameters():\n # tag = tag.replace(\".\", \"/\")\n # state.model_grads[tag] = value.grad.cpu().numpy()\n\n utils.maybe_recursive_call(optimizer, \"zero_grad\")\n\n self._accumulation_counter = 0", "def spatial_batchnorm_forward(x, gamma, beta, bn_param):\n out, cache = None, None\n\n #############################################################################\n # TODO: Implement the forward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return out, cache", "def batchnorm_backward(dout, cache):\n\tdx, dgamma, dbeta = None, None, None\n\tx, normalized, gamma, beta, mu, var, eps = cache\n\tN,D = dout.shape\n\n\tdx_norm = dout * gamma\n\t\n\tdx = (1. / N) * (1/np.sqrt(var + eps)) * (N*dx_norm - np.sum(dx_norm, axis=0) - normalized*np.sum(dx_norm*normalized, axis=0))\n\t\n\tdgamma = (dout * normalized).sum(axis = 0)\n\tdbeta = dout.sum(axis = 0)\n\treturn dx, dgamma, dbeta", "def backward(self, d_out):\n # TODO: Implement backward pass\n # Compute both gradient with respect to input\n # and gradients with respect to W and B\n # Add gradients of W and B to their `grad` attribute\n\n # It should be pretty similar to linear classifier from\n # the previous assignment\n\n d_input = np.dot(d_out, self.W.value.T)\n self.W.grad = np.dot(self.X.T, d_out)\n self.B.grad = np.sum(d_out, axis=0, keepdims=True)\n\n return d_input", "def batchnorm_backward(dout, cache):\r\n dx, dgamma, dbeta = None, None, None\r\n\r\n x, xc, var, std, xn, gamma, eps = cache\r\n N = x.shape[0]\r\n\r\n dbeta = np.sum(dout, axis=0)\r\n dgamma = np.sum(dout * xn, axis=0)\r\n dxn = dout * gamma\r\n\r\n dxc = dxn / std\r\n dstd = np.sum(-(xc * dxn) / (std * std), axis=0)\r\n dvar = 0.5 * dstd / std\r\n\r\n dxc += (2.0 / N) * xc * dvar\r\n dmu = -np.sum(dxc, axis=0)\r\n dx = dxc + dmu / N\r\n\r\n return dx, dgamma, dbeta", "def backprop(model, loss_function, optimizer, batch, device):\n model.train()\n model.to(device)\n optimizer.zero_grad()\n\n inputs, targets = batch[0], batch[1]\n\n inputs = inputs.to(device)\n targets = targets.to(device)\n\n outputs = model(inputs)\n loss = loss_function(outputs, targets)\n loss.backward()\n optimizer.step()", "def conv_bn_relu_backward(dout, cache):\n conv_cache, sbn_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dan, dgamma, dbeta = spatial_batchnorm_backward(da, sbn_cache)\n dx, dw, db = conv_backward_fast(dan, conv_cache)\n return dx, dw, db, dgamma, dbeta", "def spatial_batchnorm_backward(dout, cache):\n\tdx, dgamma, dbeta = None, None, None\n\n\tN, C, H, W = dout.shape\n\ty = dout.transpose(0,2,3,1).reshape((N*H*W,C))\n\tdx, dgamma, dbeta = batchnorm_backward(y, cache)\n\tdx = dx.reshape((N,H,W,C)).transpose(0,3,1,2)\n\n\treturn dx, dgamma, dbeta", "def loss_batch(model, loss_func, xb, yb, opt=None):\r\n _, output = model(xb)\r\n loss = loss_func(output, yb)\r\n\r\n if opt is not None:\r\n loss.backward()\r\n for group in opt.param_groups:\r\n for p in group['params']:\r\n state = opt.state[p]\r\n if 'step' in state.keys():\r\n if(state['step']>=1024):\r\n state['step'] = 1000\r\n opt.step()\r\n opt.zero_grad()\r\n\r\n return loss.item()", "def backward(self, gradient: Tensor) -> Tensor:\n self.b_grad = np.sum(gradient, axis=0)\n self.w_grad = self.inputs.T @ gradient\n return gradient @ self.w.T", "def backward(\n self, X: np.ndarray, y: np.ndarray, lr: float, reg: float = 0.0\n ) -> float:\n y_hat = self.forward(X)\n\n y_one_hot = self.one_hot_encode(y)\n loss = CrossEntropy.forward(y_one_hot, y_hat)\n\n d_layer = CrossEntropy.backward(y, y_hat)\n\n w_grads = []\n b_grads = []\n\n for idx, layer in reversed(list(enumerate(self.layers))):\n # Not output layer\n if (idx + 1) < len(self.layers):\n next_layer = self.layers[idx + 1]\n\n d_layer = d_layer.dot(next_layer.w.T)\n d_layer = layer.activation_func.backward(d_layer, layer.activated_out)\n\n d_w = layer.linear_in.T.dot(d_layer) + 2 * reg * layer.w\n d_b = np.sum(d_layer, axis=0)\n\n w_grads.insert(0, d_w)\n b_grads.insert(0, d_b)\n\n self.optimizer.step(self.layers, w_grads, b_grads, lr)\n\n if self.norm_weights:\n w_norm = max(np.linalg.norm(l.w) for l in self.layers) / len(self.layers)\n b_norm = max(np.linalg.norm(l.w) for l in self.layers) / len(self.layers)\n for layer in self.layers:\n layer.w /= w_norm\n layer.b /= b_norm\n\n return loss", "def forwardBackward(self, sample, label, indexInBatch, ass, bundles):\n\n # forward pass\n for l, layer in enumerate(self.layers):\n if l == 0:\n ass[l] = sample\n else:\n layer.forward(l, ass, bundles[l][indexInBatch])\n\n # compute loss and gradient\n loss = self.lossF.f(ass[-1], label)\n dErrors = [np.empty_like(a) for a in ass]\n dErrorLeftOperand = self.lossF.fprime(ass[-1], label) # left operand of equation BP1\n\n # propagate the error back\n for l in reversed(range(1, len(self.layers))):\n dErrorLeftOperand = self.layers[l].backward(l, ass, dErrors, bundles[l][indexInBatch], dErrorLeftOperand)\n\n return loss", "def backward(self, inGradient, lr=0.001): # batchSize = 1\n wGradient = np.dot(inGradient.T, self.data)\n bGradient = np.sum(inGradient, axis=0)\n outGradient = np.dot(inGradient, self.weights)\n\n self.weights = self.weights - lr * wGradient\n self.bias = self.bias - lr * bGradient\n self.wGradient = wGradient\n self.bGradient = bGradient\n\n #print \"weight gradient \", wGradient\n #print \"bias gradient \", bGradient\n\n return outGradient", "def on_iter_backward(self, runner):\n runner.optimizer.zero_grad()\n runner.loss.backward()\n runner.optimizer.step()", "def _backward(loss):\n\n loss.backward()", "def backward(self, d_out):\n # TODO: Implement backward pass\n # Compute both gradient with respect to input\n # and gradients with respect to W and B\n # Add gradients of W and B to their `grad` attribute\n\n # It should be pretty similar to linear classifier from\n # the previous assignment\n \n dW = np.dot(self.X.T, d_out);\n dB = np.dot(np.ones((1, d_out.shape[0])), d_out);\n \n d_input = np.dot(d_out, self.W.value.T);\n #print(\"self.X = \", self.X);\n #print(\"self.W.grad.T = \", self.W.grad.T);\n #print(\"dW.T = \", dW.T);\n \n self.W.grad += dW;\n self.B.grad += dB;\n \n return d_input;", "def backward_pass(architecture,gradient_layerwise,grad_weights,grad_bias):\n \n for layer in range(len(architecture)-1,-1,-1):\n X_input,X_output,weightsi,biasi,X_input_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imxi = architecture['layer{}'.format(layer+1)]\n# print(\"Operation is:{} and Layer is: {}\".format(operationi,layer+1))\n if operationi == 'softmax': # Last layer -> Dont apply softmax in any layer other than the last layer!\n # not taking gradients here because we need dz_dX(secondlastlayer) which is y_pred - y\n continue\n \n if operationi == 'conv_bn_relu' or operationi == 'conv_relu' or operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if operationi__1 == 'softmax':\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # .\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # .\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input_im2col)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input_im2col)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi #\n elif operationi__1 == 'maxpool': # need to do something here to fix the problem\n None\n\n elif 'flatten' in operationi__1:\n # we currently have dz_doutput of flatten -> we want dz_doutput of the conv_bn_relu before flatten\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2] # weights2\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput of flatten\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5] # i\n try:\n dz_dXi = torch.t(weightsi__1).mm(dz_dXi__1)\n except:\n dz_dXi = weightsi__1.mm(dz_dXi__1)\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n\n dz_dXi = torch.reshape(dz_dXi,(output_shapei[1]*output_shapei[2],-1))\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n dz_dweightsi = X_input_im2col.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n dz_dbi = dz_dXi\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)# Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi) # Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi) # Can also set this to layer like in line ~800\n \n else:\n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dX2 -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n \n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n if 'sigmoid' in operationi__1: # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi__1: # ...\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dXi = torch.reshape(dz_dXi,(output_shape_current_layer[1]*output_shape_current_layer[2],-1))\n dz_dbi = torch.reshape(dz_dXi,bias_current_layer.shape)\n dz_dweightsi = X_im2col_current_layer.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n \n if operationi == 'maxpool':\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n \n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n try:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n except:\n Y = torch.t(weightsi__1).mm(dz_dXi__1) # Ensuring valid matrix multiplication here\n \n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n \n if operationi__1 == 'conv_sigmoid' or operationi__1 == 'conv_bn_sigmoid': # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n else:\n dz_dXi[X_output <= 0] = 0\n\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n \n dz_dXinput = torch.zeros((X_input.shape))\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+1)][0] # output = output of maxpool\n\n dz_dXoutput = torch.reshape(dz_dXoutput,(output_shapei[0],X_input_im2col.shape[2]))\n \n for i in range(output_shapei[0]):\n for j in range(X_input_im2col.shape[2]):\n Xi2ci = X_im2col_current_layer[i,:,:]\n idx = torch.argmax(Xi2ci[:,j]).item()\n value = imxi[i][(idx,j)]\n dz_dXinput[value[0],value[1],value[2]] += float(dz_dXoutput[i,j])\n\n# dz_dXinput = torch.reshape(dz_dXinput,output_shapei)\n \n X_prev_im2col = architecture['layer{}'.format(layer)][4]\n X_output_prev = architecture['layer{}'.format(layer)][1]\n X_output_prev = torch.reshape(X_output_prev,dz_dXinput.shape)\n X_input_prev = architecture['layer{}'.format(layer)][0]\n prev_bias = architecture['layer{}'.format(layer)][3]\n output_shape_prev = architecture['layer{}'.format(layer)][6]\n prev_operation = architecture['layer{}'.format(layer)][9]\n \n if prev_operation == 'conv_sigmoid' or prev_operation == 'conv_bn_sigmoid':\n dz_dXinput *= sigmoid(X_output_prev)*(1-sigmoid(X_output_prev)) # Taking the derivative of the sigmoid function\n else:\n dz_dXinput[X_output_prev <= 0] = 0\n \n if len(dz_dXinput.shape) == 3:\n dz_dXinput = torch.reshape(dz_dXinput,(-1,output_shape_prev[0]))\n \n dz_dbi = torch.reshape(dz_dXinput,prev_bias.shape)\n dz_dweightsi = X_prev_im2col.mm(dz_dXinput)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer)][0] = torch.Tensor(dz_dXinput) # ...\n \n if 'flatten_dense' in operationi:\n \n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n \n if operationi__1 == 'softmax':\n \n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n X_output = torch.reshape(X_output,(-1,1))\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if 'sigmoid' in operationi:\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # Can also set this to layer like in line ~800\n \n else:\n # Have to modify and test this before implementation -> Specifically\n # the backprop implementation is not consistent with the ones above\n #\n X_output = torch.reshape(X_output,(-1,1))\n weights__i = architecture['layer{}'.format(layer+2)][2]\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+2)][0]\n dz_dXoutput = torch.reshape(torch.Tensor(dz_dXoutput),X_output.shape)\n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n\n if 'relu' in operationi:\n dz_dXoutput[X_output<0] = 0\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n if 'sigmoid' in operationi:\n dz_dXoutput*= sigmoid(X_output)*(1-sigmoid(X_output))\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n else:\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n \n unflattened_Xinput = architecture['layer{}'.format(layer+1)][0]\n dz_dXinput = torch.reshape(dz_dXinput,unflattened_Xinput.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXinput)\n \n if gradient_layerwise['layer{}'.format(layer+1)][1] is not None:\n try:\n grad_weights['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][1]\n except:\n grad_weights['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][1])\n if gradient_layerwise['layer{}'.format(layer+1)][2] is not None:\n try:\n grad_bias['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][2]\n except:\n grad_bias['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][2])\n \n gc.collect()\n return", "def backward(self, inputs, grad_loss_input):\n raise NotImplementedError", "def backward_pass(self, loss):\n\n self.optimizer.zero_grad()\n self.optimizer.backward(loss)\n self.optimizer.step()", "def backward_D(self):\n self.loss_D.backward()", "def batchnorm_forward(x, gamma, beta, bn_param):\n\tmode = bn_param['mode']\n\teps = bn_param.get('eps', 1e-5)\n\tmomentum = bn_param.get('momentum', 0.9)\n\n\tN, D = x.shape\n\trunning_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n\trunning_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n\tout, cache = None, None\n\tif mode == 'train':\n\t\t# normalize data\n\t\tmu = np.mean(x, axis=0)\n\t\tvar = np.var(x, axis=0)\n\t\tnormalized = (x-mu)/np.sqrt(var+eps)\n\t\tout = gamma*normalized + beta\n\t\t# Update running mean and variance\n\t\trunning_mean = momentum*running_mean + (1 - momentum)*mu\n\t\trunning_var = momentum*running_var + (1 - momentum)*var\n\t\t# Cache for backwards pass\n\t\tcache = (x, normalized, gamma, beta, mu, var, eps)\n\telif mode == 'test':\n\t\t# normalize data using running mean and variance from training\n\t\tnormalized = (x - running_mean)/np.sqrt(running_var+eps)\n\t\tout = gamma*normalized + beta\n\telse:\n\t\traise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n\t# Store the updated running means back into bn_param\n\tbn_param['running_mean'] = running_mean\n\tbn_param['running_var'] = running_var\n\n\treturn out, cache", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n activation = []\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n ### Forward propagation\n activation.append(data)\n\n # Hidden layer inputs: (N, Dx) * (Dx, H) -> N x H\n z = np.dot(activation[-1], W1) + b1 \n # Activations, inputs to the final layer. \n activation.append(sigmoid(z)) # output of the hidden layer, activation\n # Final layer outputs: ( N x H ) * ( H, Dy) -> (N, Dy)\n z = np.dot(activation[-1], W2) + b2\n activation.append( softmax(z) )\n\n # Cross-entropy cost\n\n y_p = activation[-1]\n activation = activation[:-1] # remove activation data (output)\n\n cost = -np.sum(labels * np.log(y_p))\n \n error = []\n \n ### backward propagation\n sigma = (y_p - labels)\n error.append(sigma)\n\n gradb2 = np.sum(error[-1], axis=0)\n gradW2 = np.dot(activation[-1].T, error[-1])\n\n #\n sigma = np.dot(W2, error[-1].T)\n sigma = sigma.T * sigmoid_grad(activation[-1])\n activation = activation[:-1] # remove activation data ( hidden layer )\n\n error.append(sigma)\n\n gradb1 = np.sum(error[-1], axis=0)\n gradW1 = np.dot(activation[-1].T, error[-1])\n\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), \n gradW2.flatten(), gradb2.flatten()))\n \n return cost, grad", "def batch_normalization(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 30)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('l1_reg', 0.00)\n hp('l2_reg', 0.0001)\n hp('patience', 5000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(hp.batch_size, 28 * 28), input_var=input_var, name='Input')\n # Batch Normalization\n l_bn1 = BatchNormalization(incoming=l_in, name='Batch Normalization 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_bn1, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 1')\n # Batch Normalization\n l_bn2 = BatchNormalization(incoming=l_hid1, name='Batch Normalization 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_bn2, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 2')\n # Batch Normalization\n l_bn3 = BatchNormalization(incoming=l_hid2, name='Batch Normalization 3')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_bn3, n_class=10, l1=hp.l1_reg,\n l2=hp.l2_reg, name='Logistic regression')\n\n # Create network and add layers\n net = Network('mlp with batch normalization')\n net.add(l_in)\n net.add(l_bn1)\n net.add(l_hid1)\n net.add(l_bn2)\n net.add(l_hid2)\n net.add(l_bn3)\n net.add(l_out)\n\n return net, hp", "def backward(self, input_train, input_train_label):\n batchSize = len(input_train) #liczba obrazow podawanych na wejscie w trakcie jednej iteracji\n weights = self.Weights\n biases = self.Biases\n delta_W = self.delta_W\n delta_B = self.delta_B\n poolParams = self.poolParams\n dW_list = []\n dB_list = []\n dW4 = np.zeros(weights[4].shape)\n dB4 = np.zeros(biases[4].shape)\n dW3 = np.zeros(weights[3].shape)\n dB3 = np.zeros(biases[3].shape)\n dW2 = np.zeros(weights[2].shape)\n dB2 = np.zeros(biases[2].shape)\n dW1 = np.zeros(weights[1].shape)\n dB1 = np.zeros(biases[1].shape)\n dW0 = np.zeros(weights[0].shape)\n dB0 = np.zeros(biases[0].shape)\n loss = 0\n for image in range(batchSize):\n\n X_data = input_train[image]\n X_label = input_train_label[image]\n output_forward, cache = self.forward(X_data) \n loss += -1*sum(X_label - np.log(output_forward)) #obliczenie wartosci funkcji straty [cross entropy]\n\n #Propagacja wsteczna gradientu\n dy = -1*(X_label - output_forward)/2\n #print(\"X_label = {} \\t layer7 = {} \\t dy = {}\".format(X_label, output_forward, dy))\n\n [dy, dW, dB ] = fullycon_b(cache[6], np.asarray([dy]).transpose() , weights[4])\n dW4 += dW\n dB4 += dB.flatten() #wektoryzacja macierzy\n dy = act.relu_b(dy.transpose(), cache[6])\n\n [dy, dW, dB ] = fullycon_b(cache[5][:,0], dy, weights[3])\n dW3 += dW\n dB3 += dB.flatten()\n dy = act.relu_b(dy.transpose(), cache[5][:,0]) \n \n [dy, dW, dB ] = convolution_b(cache[4], dy, weights[2])\n dW2 += dW\n dB2 += dB.flatten()\n \n dy = maxpool_b(cache[3], dy)\n dy = act.relu_b(dy, cache[3])\n\n [dy, dW, dB ] = convolution_b(cache[2], dy, weights[1])\n dW1 += dW\n dB1 += dB.flatten()\n \n dy = maxpool_b(cache[1], dy)\n dy = act.relu_b(dy, cache[1]) \n\n [dy, dW, dB ] = convolution_b(np.asarray([cache[0]]), dy, weights[0])\n dW0 += dW\n dB0 += dB.flatten()\n\t\t\t\n dW_list.append(dW4)\n dB_list.append(dB4)\n dW_list.append(dW3)\n dB_list.append(dB3)\n dW_list.append(dW2)\n dB_list.append(dB2)\n dW_list.append(dW1)\n dB_list.append(dB1)\n dW_list.append(dW0)\n dB_list.append(dB0)\n dW_list = dW_list[::-1]\n dB_list = dB_list[::-1]\n \n #Aktualizacja parametrow kazdej z warstw (o ile takie posiada)\n #uczenie z metoda momentum: learning rate = const; alpha = const\n for x in range(len(dW_list)):\n delta_W[x] = alpha*delta_W[x] - eta*dW_list[x]/batchSize\n weights[x] += delta_W[x]\n delta_B[x] = alpha*delta_B[x] - eta*dB_list[x]/batchSize\n biases[x] += delta_B[x]\n #przypisanie nowych wag po aktualiacji wszystkich parametrow\n self.Weights = weights\n self.Biases = biases\n\n #zwrocenie stosunku wartosci f-cji straty do rozmiaru batch'u\n return loss/batchSize", "def backward(self):\n self.loss_similarity = [LNCC(warped_img, self.batch_fixed, self.corr_kernel) for warped_img in self.warped_img_list]\n self.loss_similarity_mean = torch.mean(torch.stack(self.loss_similarity))\n self.loss_smooth = [GradNorm(disp_map) for disp_map in self.disp_list]\n self.loss_smooth_mean = torch.mean(torch.stack(self.loss_smooth))\n if len(self.strain_compensated_list) > 1:\n self.loss_consistency_strain = [LNCC(self.strain_compensated_list[t-1][:,:,143:-143,:], self.strain_compensated_list[t][:,:,143:-143,:], self.corr_kernel) for t in range(1, len(self.strain_compensated_list))]\n self.loss_consistency_strain_mean = torch.mean(torch.stack(self.loss_consistency_strain))\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha + (1 - self.loss_consistency_strain_mean) * self.beta\n else:\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha", "def backward_G(self):\n self.loss_G.backward()", "def forward(self, batch):\n self.output = np.dot(np.array(batch), self.weights) + self.biases", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def backward(self, d_out):\n # TODO: Implement backward pass\n # Compute both gradient with respect to input\n # and gradients with respect to W and B\n # Add gradients of W and B to their `grad` attribute\n\n # It should be pretty similar to linear classifier from\n # the previous assignment\n self.W.grad += np.dot(self.X.T, d_out)\n self.B.grad += np.sum(d_out, axis=0)[np.newaxis, :]\n return np.dot(d_out, self.W.value.T)", "def backward(ctx, grad_from_upstream):\n grad_inputX = grad_weight = grad_bias = None\n\n print('Performing custom backward of MyConv2d')\n nOutCh, nInCh, nKnRows, nKnCols, padding, stride = ctx.parameters\n # inX_nSamp_nL_nB, kn_nB_nOutCh = ctx.saved_tensors\n\n # grad_out = torch.ones(out.shape, dtype=torch.float64) / out.numel()\n\n grad_bias = grad_from_upstream.sum(dim=[0, 2, 3]) # done for grad_bias\n\n grad_out_nSamp_nOutCh_nR_nC = grad_from_upstream\n\n # for: out_nSamp_nOutCh_nR_nC = out_nSamp_nOutCh_nL.reshape(nSamples, outCh, nOutRows, nOutCols)\n grad_out_nSamp_nOutCh_nL = grad_out_nSamp_nOutCh_nR_nC.reshape(ctx.out_nSamp_nOutCh_nL_shape)\n\n # for: out_nSamp_nOutCh_nL = out_nSamp_nL_nOutCh.transpose(1, 2)\n grad_out_nSamp_nL_nOutCh = grad_out_nSamp_nOutCh_nL.transpose(1, 2)\n\n # for: out_nSamp_nL_nOutCh = inX_nSamp_nL_nB.matmul(kn_nB_nOutCh)\n grad_inX_nSamp_nL_nB = grad_out_nSamp_nL_nOutCh.matmul(ctx.kn_nB_nOutCh.t())\n\n # continue to finish calculation of the gradient w.r.t \"weight\", i.e. the convolution kernel\n grad_kn_nB_nOutCh = ctx.inX_nSamp_nL_nB.transpose(1, 2).matmul(grad_out_nSamp_nL_nOutCh)\n grad_kn_nB_nOutCh = grad_kn_nB_nOutCh.sum(dim=0)\n grad_kn_nOutCh_nB = grad_kn_nB_nOutCh.t()\n grad_weight = grad_kn_nOutCh_nB.view(nOutCh, nInCh, nKnRows, nKnCols) # done for grad_weight\n\n # for: inX_nSamp_nL_nB = inX_nSamp_nB_nL.transpose(1, 2)\n grad_inX_nSamp_nB_nL = grad_inX_nSamp_nL_nB.transpose(1, 2)\n\n # for: inX_nSamp_nB_nL = torch.nn.functional.unfold(inputX, (ctx.nKnRows, ctx.nKnCols))\n grad_inputX = torch.nn.functional.fold(grad_inX_nSamp_nB_nL, ctx.InImgSize, (nKnRows, nKnCols),\n padding=padding, stride=stride)\n\n return grad_inputX, grad_weight, grad_bias, None", "def forward(self, input):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n assert input.shape[1] == self.n_neurons, \"The shape of the input tensor is not correct.\"\n\n bn_fct = CustomBatchNormManualFunction()\n out = bn_fct.apply(input, self.gamma, self.beta, self.eps)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def backward_G(self):\n # Calculate regularzation loss to make transformed feature and target image feature in the same latent space\n self.loss_reg_gen = self.loss_reg * self.opt.lambda_regularization\n\n # Calculate l1 loss \n loss_app_gen = self.L1loss(self.img_gen, self.input_P2)\n self.loss_app_gen = loss_app_gen * self.opt.lambda_rec \n \n # parsing loss\n label_P2 = self.label_P2.squeeze(1).long()\n #print(self.input_SPL2.min(), self.input_SPL2.max(), self.parsav.min(), self.parsav.max())\n self.loss_par = self.parLoss(self.parsav,label_P2)# * 20. \n self.loss_par1 = self.L1loss(self.parsav, self.input_SPL2) * 100 \n\n # Calculate GAN loss\n base_function._freeze(self.net_D)\n D_fake = self.net_D(self.img_gen)\n self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g\n\n # Calculate perceptual loss\n loss_content_gen, loss_style_gen = self.Vggloss(self.img_gen, self.input_P2) \n self.loss_style_gen = loss_style_gen*self.opt.lambda_style\n self.loss_content_gen = loss_content_gen*self.opt.lambda_content\n\n total_loss = 0\n\n for name in self.loss_names:\n if name != 'dis_img_gen':\n #print(getattr(self, \"loss_\" + name))\n total_loss += getattr(self, \"loss_\" + name)\n total_loss.backward()", "def _backward(self, w=None):\n grad = self.w # Should be I * self.w . We keep a vector for simplicity\n\n # Left multiply input `w` with normalizer gradient\n return w * grad if w is not None else grad", "def spatial_batchnorm_forward(x, gamma, beta, bn_param):\n out, cache = None, None\n\n ###########################################################################\n # TODO: Implement the forward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should#\n # be very short; ours is less than five lines. #\n ###########################################################################\n reshaped = np.reshape(x,(-1,x.shape[1]))\n batch_norm,cache = batchnorm_forward(reshaped,gamma,beta,bn_param)\n out = np.reshape(batch_norm,x.shape)\n cache = (cache,x.shape)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return out, cache", "def backward(self, accum_grad):\n\n W = self.W\n\n grad_w = self.layer_input.T.dot(accum_grad)\n grad_b = np.sum(accum_grad, axis=0, keepdims=True)\n\n # Update the layer weights\n self.W = self.W_optimizer.update(self.W, grad_w)\n self.b = self.b_optimizer.update(self.b, grad_b)\n\n accum_grad = accum_grad.dot(W.T)\n return accum_grad", "def _bn_relu(self, input):\n depth = input.get_shape().as_list()[-1]\n\n # mean and variance calc on batch-height-width dimension\n mean, var = tf.nn.moments(input, axes=[0, 1, 2])\n beta = tf.Variable(tf.zeros([depth]), name='beta')\n gamma = self._get_weight_variable([depth], name='gamma')\n\n bn_out = tf.nn.batch_norm_with_global_normalization(input, mean, var, beta, gamma, 0.001,\n scale_after_normalization=True)\n\n out = tf.nn.relu(bn_out)\n\n return out", "def backward(ctx, grad_output):\n if PROFILE:\n batch_tic = time.time()\n tic = time.time()\n timings = defaultdict(float)\n\n feats1, feats2, xxyy, batch_grid_u, params, pow = ctx.saved_tensors\n\n \"\"\"We needed to store the integers as part of a tensor, so the\n unpacking code here is a little convoluted.\"\"\"\n B, C, H, W, stride, norm = [x.item() for x in params]\n h, w = H, W\n pow = pow.item()\n\n \"\"\"This is a pattern that is very convenient - at the top of backward\n unpack saved_tensors and initialize all gradients w.r.t. inputs to\n None. Thanks to the fact that additional trailing Nones are\n ignored, the return statement is simple even when the function has\n optional inputs.\"\"\"\n grad_feats1 = grad_feats2 = grad_xxyy = grad_batch_u = None\n grad_stride = grad_norm = grad_pow = None\n\n \"\"\"Returning gradients for inputs that don't require it is\n not an error.\"\"\"\n assert ctx.needs_input_grad[0], \"expected feats1 to need grad\"\n assert ctx.needs_input_grad[1], \"expected feats2 to need grad\"\n assert not ctx.needs_input_grad[2], \"expected xxyy does not need grad\"\n assert not ctx.needs_input_grad[3], \"expected batch_grid_u does not need grad\"\n assert not ctx.needs_input_grad[4], \"expected stride does not need grad\"\n\n if PROFILE:\n timings[\"back-init\"] = time.time() - tic\n tic = time.time()\n\n with torch.no_grad():\n\n if feats1.is_cuda:\n # TODO: clean up types here\n if feats1.dtype == torch.float32:\n grad_feats1 = torch.cuda.FloatTensor(B, C, H, W).fill_(0)\n grad_feats2 = torch.cuda.FloatTensor(B, C, h, w).fill_(0)\n elif feats1.dtype == torch.float16:\n grad_feats1 = torch.cuda.HalfTensor(B, C, H, W).fill_(0)\n grad_feats2 = torch.cuda.HalfTensor(B, C, h, w).fill_(0)\n else:\n grad_feats1 = torch.zeros((B, C, H, W), dtype=feats1.dtype)\n grad_feats2 = torch.zeros((B, C, h, w), dtype=feats2.dtype)\n\n grad_loss = grad_output / (H * W * B)\n\n if PROFILE:\n timings[\"data transfer\"] = time.time() - batch_tic\n\n for b in range(B):\n\n if PROFILE:\n tic = time.time()\n\n with torch.no_grad():\n diff = batch_grid_u[b, :, :, None, None, :] - \\\n xxyy[None, None, ::stride, ::stride, :]\n diff = (diff * diff).sum(4).sqrt()\n diff = diff.pow(pow)\n\n if PROFILE:\n timings[\"diff-grid\"] += time.time() - tic\n tic = time.time()\n\n # loss gradient for the current minibatch element (expand to tensor)\n grad_loss_b = grad_loss\n grad_smcorr2 = grad_loss_b * diff\n\n if LOCAL_CHECKS:\n ones = torch.ones(diff.shape, dtype=diff.dtype)\n grad_loss_b_ = ones * grad_loss\n smcorr_ = torch.randn(\n diff.shape,\n dtype=torch.double,\n requires_grad=True)\n with torch.autograd.enable_grad():\n L_ = diff * smcorr_\n d_smcorr = torch.autograd.grad(\n outputs=L_,\n inputs=smcorr_,\n grad_outputs=grad_loss_b_,\n )\n rel_diff(grad_smcorr2, d_smcorr[0], \"smax\")\n if torch.any(torch.isnan(grad_smcorr2[0])):\n import ipdb; ipdb.set_trace()\n\n\n if PROFILE:\n timings[\"scale-feats\"] += time.time() - tic\n tic = time.time()\n\n # Re-compute intermediate values\n grad_smcorr2 = grad_smcorr2.view(H, W, -1)\n f1_ = feats1[b].view(C, H * W)\n f2_ = feats2[b].view(C, h * w)\n fa_ = feats1[(b + 1) % B].reshape(C, h * w) # auxiliary\n\n if norm:\n f1_norm = F.normalize(f1_, p=2, dim=0) * JDT_FACTOR\n f2_norm = F.normalize(f2_, p=2, dim=0) * JDT_FACTOR\n fa_norm = F.normalize(fa_, p=2, dim=0) * JDT_FACTOR\n else:\n f1_norm = f1_.clone()\n f2_norm = f2_.clone()\n fa_norm = fa_.clone()\n\n if PROFILE:\n timings[\"fwd-norm\"] += time.time() - tic\n tic = time.time()\n\n # Match the source features against the auxiliaries\n corr = torch.matmul(f1_norm.t(), fa_norm)\n corr = corr.reshape(H, W, h, w)\n\n if PROFILE:\n timings[\"f1-aux-correlation\"] += time.time() - tic\n tic = time.time()\n\n smcorr = F.softmax(corr.view(H, W, -1), dim=2)\n smcorr = smcorr.view(corr.shape)\n if LOCAL_CHECKS:\n # cache a copy of the mega tensor for numerical checks\n smcorr_fa = smcorr[None, ...] * fa_norm.view(-1, 1, 1, h, w)\n f1_via_fa = smcorr_fa.sum((3, 4))\n else:\n \"\"\"This is one of the largest tensors.....\"\"\"\n f1_via_fa = (smcorr[None, ...] *\n fa_norm.view(-1, 1, 1, h, w)).sum((3, 4))\n\n f1_via_fa = f1_via_fa.view(C, H * W)\n\n # Main correlation computation\n corr2 = torch.matmul(f1_via_fa.t(), f2_norm).view(corr.shape)\n\n # Direct backward pass for second softmax\n smcorr2 = F.softmax(corr2.view(H, W, -1), dim=2)\n sum_term = torch.sum(grad_smcorr2 * smcorr2, dim=2, keepdim=True)\n grad_corr2 = smcorr2 * (grad_smcorr2 - sum_term)\n\n if not LOCAL_CHECKS:\n del smcorr2\n\n if PROFILE:\n timings[\"softmax\"] += time.time() - tic\n tic = time.time()\n\n # safety checks\n if LOCAL_CHECKS:\n with torch.enable_grad():\n corr2_num = corr2.clone().requires_grad_()\n corr2_num = corr2_num.reshape(H, W, -1)\n smcorr2_num = F.softmax(corr2_num, dim=2)\n grad_corr2_num = torch.autograd.grad(\n outputs=smcorr2_num,\n inputs=(corr2_num,),\n grad_outputs=grad_smcorr2,\n )\n rel_diff(grad_corr2, grad_corr2_num[0], \"smax-corr2\")\n\n \"\"\"Derivatives through the main correlation correlation\"\"\"\n grad_corr2 = grad_corr2.view(H * W, H * W)\n grad_f1_via_fa = torch.matmul(grad_corr2, f2_norm.t()).t()\n grad_f2_norm = torch.matmul(f1_via_fa, grad_corr2)\n\n if not LOCAL_CHECKS:\n del grad_corr2\n\n if PROFILE:\n timings[\"corr-back\"] += time.time() - tic\n tic = time.time()\n\n if LOCAL_CHECKS:\n with torch.enable_grad():\n f1_via_fa_num = f1_via_fa.clone().requires_grad_()\n f2_norm_num = f2_norm.clone().requires_grad_()\n corr2_num = torch.matmul(f1_via_fa_num.t(), f2_norm_num)\n grad_f1_via_fa_num, grad_f2_norm_num = torch.autograd.grad(\n outputs=corr2_num,\n inputs=(f1_via_fa_num, f2_norm_num),\n grad_outputs=grad_corr2,\n )\n rel_diff(grad_f1_via_fa, grad_f1_via_fa_num,\n \"corr-f1-via-fa\")\n rel_diff(grad_f2_norm, grad_f2_norm_num,\n \"corr->f2-norm\")\n\n if OLD_METHOD:\n # (may be able to collapse all this later)\n grad_f1_via_fa = grad_f1_via_fa.view(-1, H, W, 1, 1)\n\n # This tensor is crashing the GPU\n grad_smcorr_fa = grad_f1_via_fa.repeat(1, 1, 1, h, w)\n\n # safety checks over the summation\n if LOCAL_CHECKS:\n with torch.enable_grad():\n\n smcorr_fa_num = smcorr_fa.clone().requires_grad_()\n f1_via_fa_num = smcorr_fa_num.sum((3, 4))\n # f1_via_fa_num = f1_via_fa_num.view(C, H * W)\n\n grad_smcorr_fa_num = torch.autograd.grad(\n outputs=f1_via_fa_num,\n inputs=(smcorr_fa_num,),\n grad_outputs=grad_f1_via_fa.view(-1, H, w),\n )\n rel_diff(grad_smcorr_fa, grad_smcorr_fa_num[0],\n \"summation of grad_smcorr-fa\")\n\n # smcorr_fa = smcorr[None, ...] * fa_.view(-1, 1, 1, h, w)\n grad_smcorr = (grad_smcorr_fa * fa_norm.view(-1, 1, 1, h, w)).sum(0)\n grad_fa_ = (grad_smcorr_fa * smcorr[None, ...]).sum(1).sum(1)\n grad_fa_ = grad_fa_.reshape(C, h * w)\n\n # safety checks over the weighted sum\n if LOCAL_CHECKS:\n with torch.enable_grad():\n\n smcorr_num = smcorr.clone().requires_grad_()\n fa_norm_num = fa_norm.clone().requires_grad_()\n smcorr_fa_num = smcorr_num[None, ...] \\\n * fa_norm_num.view(-1, 1, 1, h, w)\n\n (grad_smcorr_num, grad_fa_num) = torch.autograd.grad(\n outputs=smcorr_fa_num,\n inputs=(smcorr_num, fa_norm_num),\n grad_outputs=grad_smcorr_fa,\n )\n rel_diff(grad_fa_, grad_fa_num,\n \"product of grad_fa_\")\n rel_diff(grad_smcorr, grad_smcorr_num,\n \"product of grad_smcor\")\n else:\n # -------------------------------------------------------\n # Collapsed summation method\n # -------------------------------------------------------\n # Fwd ops ->\n # smcorr_fa = smcorr[None, ...] * fa.reshape(-1, 1, 1, h, w)\n # f1_via_fa = smcorr_fa.sum((3, 4)).reshape(C, H * w)\n\n # Given gradient ->\n # (grad_f1_via_fa)\n\n # Desired gradients ->\n # (grad_fa_, grad_smcorr)\n\n grad_f1_via_fa = grad_f1_via_fa.view(-1, H, W, 1, 1)\n\n # safety checks over the summation\n if LOCAL_CHECKS:\n # This tensor is crashing the GPU, so should only be\n # used for numerical checks\n grad_smcorr_fa = grad_f1_via_fa.repeat(1, 1, 1, h, w)\n with torch.enable_grad():\n\n smcorr_fa_num = smcorr_fa.clone().requires_grad_()\n f1_via_fa_num = smcorr_fa_num.sum((3, 4))\n # f1_via_fa_num = f1_via_fa_num.view(C, H * W)\n\n grad_smcorr_fa_num = torch.autograd.grad(\n outputs=f1_via_fa_num,\n inputs=(smcorr_fa_num,),\n grad_outputs=grad_f1_via_fa.view(-1, H, w),\n )\n rel_diff(grad_smcorr_fa, grad_smcorr_fa_num[0],\n \"summation of grad_smcorr-fa\")\n\n # Use for-loop over EVC dimension to avoid memory issues\n if feats1.is_cuda:\n if grad_f1_via_fa.dtype == torch.float64:\n grad_smcorr = torch.cuda.DoubleTensor(H, W, h, w).fill_(0)\n grad_fa_ = torch.cuda.DoubleTensor(C, h, w).fill_(0)\n else:\n grad_smcorr = torch.cuda.FloatTensor(H, W, h, w).fill_(0)\n grad_fa_ = torch.cuda.FloatTensor(C, h, w).fill_(0)\n else:\n grad_smcorr = torch.zeros((H, W, h, w), dtype=feats1.dtype)\n grad_fa_ = torch.zeros((C, h, w), dtype=feats1.dtype)\n\n for cc in range(C):\n grad_smcorr += (grad_f1_via_fa[cc] * fa_norm[cc].view(1, 1, h, w))\n grad_fa_[cc] = (grad_f1_via_fa[cc] * smcorr).sum((0, 1))\n grad_fa_ = grad_fa_.reshape(C, h * w)\n\n # safety checks over the weighted sum\n if LOCAL_CHECKS:\n with torch.enable_grad():\n\n smcorr_num = smcorr.clone().requires_grad_()\n fa_norm_num = fa_norm.clone().requires_grad_()\n smcorr_fa_num = smcorr_num[None, ...] \\\n * fa_norm_num.view(-1, 1, 1, h, w)\n\n (grad_smcorr_num, grad_fa_num) = torch.autograd.grad(\n outputs=smcorr_fa_num,\n inputs=(smcorr_num, fa_norm_num),\n grad_outputs=grad_smcorr_fa,\n )\n rel_diff(grad_fa_, grad_fa_num,\n \"product of grad_fa_\")\n rel_diff(grad_smcorr, grad_smcorr_num,\n \"product of grad_smcor\")\n\n if PRINT_MEM:\n key = None\n val = None\n shape_mems = {}\n for key, val in locals().items():\n if hasattr(val, \"shape\"):\n shape_mems[key] = estimate_mem(val)\n\n sorted_mems = sorted(shape_mems.items(), key=lambda kv: -kv[1])\n for key, val in sorted_mems:\n print(\"{}: {:.4f} GiB\".format(key, val))\n\n # Direct backward pass for first softmax\n # smcorr = F.softmax(corr.view(H, W, -1), dim=2)\n grad_smcorr = grad_smcorr.view(H, W, -1)\n smcorr = smcorr.view(H, W, -1)\n sum_term = torch.sum(grad_smcorr * smcorr, dim=2, keepdim=True)\n grad_corr = smcorr * (grad_smcorr - sum_term)\n\n if not LOCAL_CHECKS:\n del grad_smcorr\n del grad_smcorr2\n del smcorr\n del corr\n\n if LOCAL_CHECKS:\n with torch.enable_grad():\n corr_num = corr.clone().requires_grad_()\n smcorr_num = F.softmax(corr_num.view(H, W, -1), dim=2)\n smcorr_num = smcorr_num.reshape(corr_num.shape)\n grad_corr_num = torch.autograd.grad(\n outputs=smcorr_num,\n inputs=(corr_num,),\n grad_outputs=grad_smcorr.view(H, W, h, w),\n )\n rel_diff(grad_corr, grad_corr_num[0].view(H, W, -1),\n \"smax-corr\")\n\n # Back through the first correlation\n # [Fwd op] -> `corr = torch.matmul(f1_norm.t(), fa_norm)`\n grad_corr = grad_corr.view(H * W, h * w)\n grad_f1_norm = torch.matmul(grad_corr, fa_norm.t()).t()\n grad_fa_norm = torch.matmul(f1_norm, grad_corr)\n\n if not LOCAL_CHECKS:\n del grad_corr\n\n\n if LOCAL_CHECKS:\n with torch.enable_grad():\n f1_norm_num = f1_norm.clone().requires_grad_()\n fa_norm_num = fa_norm.clone().requires_grad_()\n corr_num = torch.matmul(f1_norm_num.t(), fa_norm_num)\n grad_f1_norm_num, grad_fa_norm_num = torch.autograd.grad(\n outputs=corr_num,\n inputs=(f1_norm_num, fa_norm_num),\n grad_outputs=grad_corr,\n )\n rel_diff(grad_f1_norm, grad_f1_norm_num, \"corr->f1n-orm\")\n rel_diff(grad_fa_norm, grad_fa_norm_num, \"corr->fa-norm\")\n\n # Combine gradients for two ops using aux features\n grad_fa_norm = grad_fa_norm + grad_fa_\n\n # Back through the norms\n # [Fwd op] -> `f1_norm = F.normalize(f1_, p=2, dim=0) * JDT_FACTOR`\n # [Fwd op] -> `f2_norm = F.normalize(f2_, p=2, dim=0) * JDT_FACTOR`\n # [Fwd op] -> `fa_norm = F.normalize(fa_, p=2, dim=0) * JDT_FACTOR`\n # xNorm = sqrt(sum(x.*x, 3) + opts.epsilon) ;\n\n if norm:\n f1_norm_val = torch.norm(f1_, p=2, dim=0).clamp(min=EPS)\n f2_norm_val = torch.norm(f2_, p=2, dim=0).clamp(min=EPS)\n fa_norm_val = torch.norm(fa_, p=2, dim=0).clamp(min=EPS)\n\n max_val_f1 = torch.max(f1_norm_val)\n max_val_f2 = torch.max(f2_norm_val)\n max_val_fa = torch.max(fa_norm_val)\n if max_val_f1 + max_val_f2 + max_val_fa > 1E8:\n import ipdb; ipdb.set_trace()\n\n grad_f1_norm_ = grad_f1_norm / f1_norm_val\n grad_f1 = JDT_FACTOR * (grad_f1_norm_ -\n (grad_f1_norm_ * f1_).sum(0) * (f1_ / (f1_norm_val ** 2)))\n\n grad_f2_norm_ = grad_f2_norm / f2_norm_val\n grad_f2 = JDT_FACTOR * (grad_f2_norm_ -\n (grad_f2_norm_ * f2_).sum(0) * (f2_ / (f2_norm_val ** 2)))\n\n grad_fa_norm_ = grad_fa_norm / fa_norm_val\n grad_fa = JDT_FACTOR * (grad_fa_norm_ -\n (grad_fa_norm_ * fa_).sum(0) * (fa_ / (fa_norm_val ** 2)))\n\n if LOCAL_CHECKS:\n with torch.enable_grad():\n f1_num = f1_.clone().requires_grad_()\n f2_num = f2_.clone().requires_grad_()\n fa_num = fa_.clone().requires_grad_()\n\n f1_norm_num = F.normalize(f1_num, p=2, dim=0) * JDT_FACTOR\n f2_norm_num = F.normalize(f2_num, p=2, dim=0) * JDT_FACTOR\n fa_norm_num = F.normalize(fa_num, p=2, dim=0) * JDT_FACTOR\n\n grad_f1_num = torch.autograd.grad(\n outputs=f1_norm_num,\n inputs=(f1_num,),\n grad_outputs=grad_f1_norm,\n )\n grad_f2_num = torch.autograd.grad(\n outputs=f2_norm_num,\n inputs=(f2_num,),\n grad_outputs=grad_f2_norm,\n )\n grad_fa_num = torch.autograd.grad(\n outputs=fa_norm_num,\n inputs=(fa_num,),\n grad_outputs=grad_fa_norm,\n )\n rel_diff(grad_f1, grad_f1_num[0], \"norm-f1\")\n rel_diff(grad_f2, grad_f2_num[0], \"norm-f2\")\n rel_diff(grad_fa, grad_fa_num[0], \"norm-fa\")\n else:\n grad_f1 = grad_f1_norm\n grad_f2 = grad_f2_norm\n grad_fa = grad_fa_norm\n\n\n if PRINT_MEM:\n key = None\n val = None\n shape_mems = {}\n print(\"=======================\")\n for key, val in locals().items():\n if hasattr(val, \"shape\"):\n shape_mems[key] = estimate_mem(val)\n\n sorted_mems = sorted(shape_mems.items(), key=lambda kv: -kv[1])\n for key, val in sorted_mems:\n print(\"{}: {:.4f} GiB\".format(key, val))\n import ipdb; ipdb.set_trace()\n\n\n # safety checks over the whole inner loop\n if LOCAL_CHECKS:\n with torch.enable_grad():\n\n f1_num = feats1[b].clone().detach().requires_grad_().reshape(C, H * W)\n f2_num = feats2[b].clone().detach().requires_grad_().reshape(C, h * w)\n fa_num = feats1[(b + 1) % B].clone().detach().requires_grad_().reshape(C, h * w)\n\n if norm:\n f1_norm_num = F.normalize(f1_num, p=2, dim=0) * JDT_FACTOR\n f2_norm_num = F.normalize(f2_num, p=2, dim=0) * JDT_FACTOR\n fa_norm_num = F.normalize(fa_num, p=2, dim=0) * JDT_FACTOR\n else:\n f1_norm_num = f1_num\n f2_norm_num = f2_num\n fa_norm_num = fa_num\n\n # BLock 1 ------------------------------------------\n corr_num = torch.matmul(f1_norm_num.t(), fa_norm_num)\n corr_num = corr_num.reshape(H, W, H, W)\n smcorr_num = F.softmax(corr_num.reshape(H, W, -1), dim=2)\n smcorr_num = smcorr_num.reshape(corr_num.shape)\n # BLock 1 ------------------------------------------\n\n\n # BLock 2 ------------------------------------------\n smcorr_fa_num = smcorr_num[None, ...] * \\\n fa_norm_num.reshape(-1, 1, 1, h, w)\n # BLock 2 ------------------------------------------\n\n\n # BLock 3 ------------------------------------------\n f1_via_fa_num = smcorr_fa_num.sum((3, 4)).reshape(C, H * W)\n # BLock 3 ------------------------------------------\n\n # BLock 4 ------------------------------------------\n corr2_num = torch.matmul(f1_via_fa_num.t(), f2_norm_num)\n corr2_num = corr2_num.reshape(corr_num.shape)\n smcorr2_num = F.softmax(corr2_num.reshape(H, W, -1), dim=2)\n smcorr2_num = smcorr2_num.reshape(corr_num.shape)\n # BLock 4 ------------------------------------------\n\n grad_f1_num, grad_fa_num, grad_f2_num = torch.autograd.grad(\n outputs=(smcorr2_num,),\n inputs=(f1_num, fa_num, f2_num),\n grad_outputs=(grad_smcorr2.view(corr_num.shape)),\n )\n\n rel_diff(grad_f1, grad_f1_num, \"df1_\")\n rel_diff(grad_f2, grad_f2_num, \"df2_\")\n rel_diff(grad_fa, grad_fa_num, \"dfa_\")\n\n \"\"\"Distribute the gradients back among the input tensor\n features that require them.\"\"\"\n grad_feats1[b] += grad_f1.reshape((C, H, W))\n grad_feats1[(b + 1) % B] += grad_fa.reshape((C, h, w))\n grad_feats2[b] += grad_f2.reshape((C, h, w))\n\n if PROFILE:\n timings[\"feat-assign\"] += time.time() - tic\n\n\n if LOCAL_CHECKS_INNER_LOOP:\n with torch.enable_grad():\n loss = 0.\n grad_loss_ = grad_loss * (H * W * B) # unscale\n for b in range(B):\n f1 = feats1[b].reshape(C, H * W) # source\n f2 = feats2[b].reshape(C, h * w) # target\n fa = feats1[(b + 1) % B].reshape(C, h * w) # auxiliary\n\n if norm:\n f1 = F.normalize(f1, p=2, dim=0) * JDT_FACTOR\n f2 = F.normalize(f2, p=2, dim=0) * JDT_FACTOR\n fa = F.normalize(fa, p=2, dim=0) * JDT_FACTOR\n\n corr = torch.matmul(f1.t(), fa)\n corr = corr.reshape(H, W, h, w)\n smcorr = F.softmax(corr.reshape(H, W, -1), dim=2).reshape(corr.shape)\n smcorr_fa = smcorr[None, ...] * fa.reshape(-1, 1, 1, h, w)\n # del smcorr\n\n f1_via_fa = smcorr_fa.sum((3, 4)).reshape(C, H * w)\n # del smcorr_fa\n\n corr2 = torch.matmul(f1_via_fa.t(), f2).reshape(corr.shape)\n smcorr2 = F.softmax(corr2.reshape(H, W, -1), dim=2).reshape(corr.shape)\n # del corr2\n\n with torch.no_grad():\n diff = batch_grid_u[b, :, :, None, None, :] - \\\n xxyy[None, None, ::stride, ::stride, :]\n diff = (diff * diff).sum(4).sqrt()\n diff = diff.pow(pow)\n L = diff * smcorr2\n loss += L.float().sum()\n\n loss = loss / (H * W * B)\n grad_f1_num, grad_f2_num = torch.autograd.grad(\n outputs=loss,\n inputs=(feats1, feats2),\n grad_outputs=grad_loss_,\n )\n\n rel_diff(grad_feats1, grad_f1_num, \"full-loop f2\")\n rel_diff(grad_feats2, grad_f2_num, \"full-loop f2\")\n\n if PROFILE:\n tic = time.time()\n\n if PRINT_MEM:\n key = None\n val = None\n shape_mems = {}\n for key, val in locals().items():\n if hasattr(val, \"shape\"):\n shape_mems[key] = estimate_mem(val)\n\n sorted_mems = sorted(shape_mems.items(), key=lambda kv: -kv[1])\n for key, val in sorted_mems:\n print(\"{}: {:.4f} GiB\".format(key, val))\n\n if PROFILE:\n timings[\"cleanup\"] += time.time() - tic\n\n if PROFILE:\n timings[\"minibatch\"] = time.time() - batch_tic\n print(\"==============\")\n total_ratios = 0\n for key in timings:\n ratio = 100 * timings[key] / timings[\"minibatch\"]\n msg = \"{:.3f} ({:.2f}%) >>> {}\"\n print(msg.format(timings[key], ratio, key))\n total_ratios += ratio\n msg = \"{:.3f}s >>> ratio total {}\"\n print(msg.format(timings[\"minibatch\"], total_ratios - 100))\n print(\"==============\")\n\n return (grad_feats1, grad_feats2, grad_xxyy, grad_batch_u,\n grad_stride, grad_norm, grad_pow)", "def FoldBatchNorms(graph):\n _FoldFusedBatchNorms(graph)\n _FoldUnfusedBatchNorms(graph)", "def backward_and_step(self, loss):\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()", "def backward(self, gradient):\n raise NotImplementedError()", "def backward(self, gradient):\n #TODO\n pass", "def backward(self, gradient):\n #TODO\n pass", "def _AffBatchRelu_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-Batchnorm-relu} X (L-1)]\n dhid = relu_backward(dhid, cache[i]['relu'])\n dhid, grads['gamma{0}'.format(i)], grads['beta{0}'.format(i)] = \\\n batchnorm_backward_alt(dhid, cache[i]['batchnorm'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_backward(dhid, cache[i]['affine']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def _FoldUnfusedBatchNorms(graph):\n input_to_ops_map = input_to_ops.InputToOps(graph)\n\n for bn in common.BatchNormGroups(graph):\n has_scaling = _HasScaling(graph, input_to_ops_map, bn)\n\n # The mangling code intimately depends on BatchNorm node's internals.\n original_op, folded_op = _CreateFoldedOp(graph, bn, has_scaling=has_scaling)\n\n activation = common.GetEndpointActivationOp(graph, bn)\n if activation:\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[activation])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % activation.name)\n continue\n\n # Treat consumer ops in bypass modules differently since they have Add\n # operations instead of Relu* above.\n add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)\n add_bypass = graph.get_operation_by_name(add_bypass_ctx + '/Add')\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[add_bypass])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % add_bypass.name)", "def affine_batchnorm_relu_forward(x, w, b, gamma, beta, bn_param):\n fc_out, fc_cache = affine_forward(x, w, b)\n norm_out, norm_cache = batchnorm_forward(fc_out, gamma, beta, bn_param)\n out, relu_cache = relu_forward(norm_out)\n cache = (fc_cache, norm_cache, relu_cache)\n return out, cache", "def batch_norm(\n input,\n running_mean,\n running_var,\n weight,\n bias,\n training=False,\n momentum=0.1,\n eps=1e-5,\n):\n return FunctionLib.apply(\n 'BatchNorm', input.device,\n [input, weight, bias, running_mean, running_var],\n axis=1, epsilon=eps, use_stats=int(not training),\n momentum=1.0 - momentum)", "def _AffBatchReluDrop_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-Batchnorm-relu-drop} X (L-1)]\n dhid = dropout_backward(dhid, cache[i]['drop'])\n dhid = relu_backward(dhid, cache[i]['relu'])\n dhid, grads['gamma{0}'.format(i)], grads['beta{0}'.format(i)] = \\\n batchnorm_backward_alt(dhid, cache[i]['batchnorm'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_backward(dhid, cache[i]['affine']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def _FoldFusedBatchNorms(graph):\n for match in _FindFusedBatchNorms(graph):\n scope, sep, _ = match.layer_op.name.rpartition('/')\n # Make sure new ops are added to `graph` and put on the same device as\n # `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope\n # named `scope`. Otherwise, TF creates a unique scope whose name starts with\n # `scope`.\n with graph.as_default(), graph.name_scope(scope + sep), ops.device(\n match.bn_op.device):\n with graph.name_scope(scope + sep + 'BatchNorm_Fold' + sep):\n # new weights = old weights * gamma / sqrt(variance + epsilon)\n # new biases = -mean * gamma / sqrt(variance + epsilon) + beta\n multiplier_tensor = match.gamma_tensor * math_ops.rsqrt(\n match.variance_tensor + match.bn_op.get_attr('epsilon'))\n bias_tensor = math_ops.subtract(\n match.beta_tensor,\n match.mean_tensor * multiplier_tensor,\n name='bias')\n\n # The shape of depthwise weights is different, so we need to reshape the\n # multiplier_tensor to ensure that the scaled_weight_tensor has the\n # expected shape.\n if match.layer_op.type == 'DepthwiseConv2dNative':\n new_shape = [\n match.weight_tensor.get_shape().as_list()[2],\n match.weight_tensor.get_shape().as_list()[3]\n ]\n multiplier_tensor = array_ops.reshape(\n multiplier_tensor, new_shape, name='scale_reshape')\n\n # TODO(suharshs): This naming of the following ops needs to carefully\n # follow the naming expected by quantize.py. Generalize the quantize code\n # to not require these delicate naming conventions.\n scaled_weight_tensor = math_ops.multiply(\n match.weight_tensor, multiplier_tensor, name='mul_fold')\n\n new_layer_tensor = _CloneWithNewOperands(\n match.layer_op, match.input_tensor, scaled_weight_tensor)\n\n bias_add_tensor = math_ops.add(\n new_layer_tensor, bias_tensor, name='add_fold')\n\n nodes_modified_count = graph_editor.reroute_ts(bias_add_tensor,\n match.output_tensor)\n if nodes_modified_count != 1:\n raise ValueError(\n 'Unexpected inputs to op: %s' % match.output_tensor.name)", "def fit_batch(self, batch):\n\n predictions = self.compute_forward(batch, sb.Stage.TRAIN)\n loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)\n\n # normalize the loss by gradient_accumulation step\n (loss / self.hparams.gradient_accumulation).backward()\n\n if self.step % self.hparams.gradient_accumulation == 0:\n # gradient clipping & early stop if loss is not finite\n self.check_gradients(loss)\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n return loss.detach()", "def backward(ctx, grad_output):\n batch_size, n_dim = grad_output.shape\n sign_z, = ctx.saved_tensors\n device = grad_output.device\n S = sign_z != 0\n S[:, 0] = True\n sign_z[:, 0] = 0\n # XXX do clever computations\n L = torch.triu(torch.ones((n_dim, n_dim), dtype=torch.float64,\n device=device))\n\n grad_x, grad_lbda = [], []\n for i in range(batch_size):\n L_S = L[:, S[i]] # n_dim x |S|\n grad_u = grad_output[i].matmul(L_S) # 1 x |S|\n H_S = torch.inverse(L_S.t().matmul(L_S))\n grad_x.append(grad_u.matmul(H_S.matmul(L_S.t())))\n grad_lbda.append(grad_u.matmul(H_S.matmul(-sign_z[i][S[i]])))\n grad_x = torch.stack(grad_x)\n grad_lbda = torch.stack(grad_lbda)\n return (grad_x, grad_lbda)", "def batch_norm(x: tf.Tensor) -> tf.Tensor:\n return slim.batch_norm(x, activation_fn=tf.nn.relu, scope='postnorm')", "def convert_layer_norm(g, op, block):\n\n begin_norm_axis = op.attr(\"begin_norm_axis\")\n epsilon = op.attr(\"epsilon\")\n x = g.get_node(op.input(\"X\")[0])\n bias_input = op.input(\"Bias\")\n scale_input = op.input(\"Scale\")\n\n x_shape = infer_shape(x)\n assert begin_norm_axis in (\n len(x_shape) - 1,\n -1,\n ), \"Support only normalization over last one dimension.\"\n\n if bias_input:\n bias = g.get_node(bias_input[0])\n else:\n bias = _expr.const(np.zeros(x_shape[begin_norm_axis]))\n\n if scale_input:\n scale = g.get_node(scale_input[0])\n else:\n scale = _expr.const(np.ones(x_shape[begin_norm_axis]))\n\n out = _op.nn.layer_norm(\n x, gamma=scale, beta=bias, axis=begin_norm_axis, epsilon=epsilon, center=True, scale=True\n )\n g.add_node(op.output(\"Y\")[0], out)", "def affine_bn_relu_backward(dout, cache):\n fc_cache, bn_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dan, dgamma, dbeta = batchnorm_backward(da, bn_cache)\n dx, dw, db = affine_backward(dan, fc_cache)\n return dx, dw, db, dgamma, dbeta", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n sample_mean = np.mean(x, axis = 0)\n sample_var = np.var(x , axis = 0)\n x_hat = (x - sample_mean) / (np.sqrt(sample_var + eps))\n out = gamma * x_hat + beta\n cache = (gamma, x, sample_mean, sample_var, eps, x_hat)\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_var\n elif mode == 'test':\n scale = gamma / (np.sqrt(running_var + eps))\n out = x * scale + (beta - running_mean * scale)\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def apply_batch_normalization(self, layer):\n if type(layer) is not BatchNormalization:\n raise ValueError('The `layer` must be neoml.Dnn.BatchNormalization.')\n\n self._internal.apply_batch_normalization(layer._internal)", "def backward(self, inputs): \n self.error = self.error * sigmoid(self.output, der=True) # because the activation function of last layer must be sigmoid\n delta3_weights = np.dot(self.z2.T, self.error)\n\n self.error = np.dot(self.error, self.output3_weights.T) * self.af(self.z2, der=True) \n delta2_weights = np.dot(self.z1.T, self.error)\n\n self.error = np.dot(self.error, self.hidden2_weights.T) * self.af(self.z1, der=True)\n delta1_weights = np.dot(inputs.T, self.error)\n\n self.hidden1_weights -= self.lr * delta1_weights\n self.hidden2_weights -= self.lr * delta2_weights\n self.output3_weights -= self.lr * delta3_weights", "def backward(self, grad):\n self.grads[\"w\"] = np.matmul(self.input_data.T, grad)\n self.grads[\"b\"] = np.sum(grad, axis=0)\n return np.matmul(self.input_data.T, grad)", "def convert_batchnorm(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n momentum = float(attrs.get(\"momentum\", 0.9))\n eps = float(attrs.get(\"eps\", 0.001))\n\n bn_node = onnx.helper.make_node(\n \"BatchNormalization\",\n input_nodes,\n [name],\n name=name,\n epsilon=eps,\n momentum=momentum,\n # MXNet computes mean and variance per channel for batchnorm.\n # Default for onnx is across all spatial features. Relying on default\n # ONNX behavior of spatial=1 for ONNX opset 8 and below. As the spatial\n # attribute is deprecated in opset 9 and above, not explicitly encoding it.\n )\n return [bn_node]", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n ### YOUR CODE HERE: forward propagation\n affine_1 = np.dot(data, W1) + b1\n sigmoid_1 = sigmoid(affine_1)\n affine_2 = np.dot(sigmoid_1, W2) + b2\n scores = sigmoid(affine_2)\n cost = - np.sum(np.multiply(np.log(softmax(scores)), labels))\n ### END YOUR CODE\n \n ### YOUR CODE HERE: backward propagation\n cross_entropy_grad_ = cross_entropy_grad(scores, labels)\n sigmoid_2_grads = sigmoid_input_grad(cross_entropy_grad_, scores)\n x_2_grad, gradW2, gradb2 = affine_grads(sigmoid_2_grads, sigmoid_1, W2, b2)\n sigmoid_1_grads = sigmoid_input_grad(x_2_grad, sigmoid_1)\n x_1_grad, gradW1, gradb1 = affine_grads(sigmoid_1_grads, data, W1, b1)\n ### END YOUR CODE\n \n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), \n gradW2.flatten(), gradb2.flatten()))\n return cost, grad", "def _batch_gradient_descent(self, X, y, lr, epochs):\n\n # Initialize the bias and weights.\n _, n = X.shape\n self.bias = 0\n self.weights = np.random.normal(size=n)\n\n for i in range(epochs):\n # Calculate and sum the gradient delta of each sample\n grad_bias, grad_weights = self._get_gradient(X, y)\n\n # Show the gradient of each epoch.\n grad = (grad_bias + grad_weights.mean()) / 2\n print(\"Epochs %d gradient %.3f\" % (i + 1, grad), flush=True)\n\n # Update the bias and weight by gradient of current epoch\n self.bias += lr * grad_bias\n self.weights += lr * grad_weights", "def backward(self):\n #initiate the gradients\n #print('')\n \n #print('node {} grad {}'.format(self.id, self.gradient))\n #print('node {} times visited : {}/{}'.format(self.id, self.times_visited, self.times_used))\n\n if self.gradient is None:\n self.gradient=np.eye(self.output_dim)\n self.times_visited+=1\n\n \n \n if self.childrens==[]:\n return(self.gradient)\n else:\n self.backward()\n \n else: \n if self.childrens!=[]:\n #we can still going deeper in backprop\n #print(len(self.childrens), ' childrens', str([self.childrens[i]['node'].id for i in range(len(self.childrens))]))\n for child in self.childrens:\n node,jacobian=child['node'], child['jacobian']\n \n new_grad = np.dot(self.gradient, jacobian)\n #print(node.gradient)\n #print(new_grad)\n \n if node.gradient is None:\n node.gradient = new_grad\n else: \n node.gradient += new_grad\n \n node.times_visited+=1\n #print('looking at node {} \\ngradient {}'.format(node.id, node.gradient))\n\n \n if node.times_used ==node.times_visited: \n #print(node.gradient)\n node.backward() \n else:\n #still some computations to perform upwards before going deeped\n #print('node {} visits : {}/{}'.format(node.id, node.times_visited, node.times_used))\n pass", "def backward(self, out_grad, input):\n raise NotImplementedError", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = \"test\" if y is None else \"train\"\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.use_dropout:\n self.dropout_param[\"mode\"] = mode\n if self.normalization == \"batchnorm\":\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = mode\n scores = None\n\n cache_affine = []\n cache_bn = []\n cache_ln = []\n cache_relu = []\n cache_dropout = []\n \n # Forward Pass\n out = X\n for i in range(self.num_layers - 1):\n # Affine\n W, b = self.params['W' + str(i+1)], self.params['b' + str(i+1)]\n out, cache = affine_forward(out, W, b)\n cache_affine.append(cache)\n # BN\n if self.normalization=='batchnorm':\n gamma, beta = self.params['gamma' + str(i+1)], self. params['beta' + str(i+1)]\n out, cache = batchnorm_forward(out, gamma, beta, self.bn_params[i])\n cache_bn.append(cache)\n if self.normalization=='layernorm':\n gamma, beta = self.params['gamma' + str(i+1)], self.params['beta' + str(i+1)]\n out, cache = layernorm_forward(out, gamma, beta, self.bn_params[i])\n cache_ln.append(cache)\n # ReLU\n out, cache = relu_forward(out)\n cache_relu.append(cache)\n # Dropout\n if self.use_dropout:\n out, cache = dropout_forward(out, self.dropout_param)\n cache_dropout.append(cache)\n # Input update\n x = out\n \n # Last Layer\n W, b = self.params['W' + str(self.num_layers)], self.params['b' + str(self.num_layers)]\n scores, cache = affine_forward(x, W, b)\n cache_affine.append(cache)\n\n # If test mode return early\n if mode == \"test\":\n return scores\n\n loss, grads = 0.0, {}\n\n N = X.shape[0]\n\n weight_name = 'W' + str(self.num_layers) \n bias_name = 'b' + str(self.num_layers)\n\n # Loss calculation\n loss, dx = softmax_loss(scores, y)\n # Last layer backwards\n dout, grads[weight_name], grads[bias_name] = affine_backward(dx, cache_affine.pop())\n # Last layer regularization\n loss += 0.5 * self.reg * np.sum(np.square(self.params[weight_name]))\n #grads[weight_name] /= N\n grads[weight_name] += self.reg * self.params[weight_name]\n # Layers: self.num_layer - 1 -> 1\n i = self.num_layers - 2\n while i >= 0:\n # Dropout\n if self.use_dropout:\n dout = dropout_backward(dout, cache_dropout.pop())\n # ReLU\n dout = relu_backward(dout, cache_relu.pop())\n # BN\n if self.normalization=='batchnorm':\n dout, grads['gamma' + str(i+1)], grads['beta' + str(i+1)] = batchnorm_backward(dout, cache_bn.pop())\n #LN\n if self.normalization=='layernorm':\n dout, grads['gamma' + str(i+1)], grads['beta' + str(i+1)] = layernorm_backward(dout, cache_ln.pop())\n # Affine\n weight_name = 'W' + str(i+1) \n bias_name = 'b' + str(i+1)\n dout, grads[weight_name], grads[bias_name] = affine_backward(dout, cache_affine.pop())\n # Regularization\n loss += 0.5 * self.reg * np.sum(np.square(self.params[weight_name]))\n #grads[weight_name] /= N\n grads[weight_name] += self.reg * self.params[weight_name]\n i -= 1\n\n return loss, grads", "def back_propagation(self,\n X,\n Y,\n X_test=None,\n Y_test=None,\n batch_size=128,\n num_epochs=100,\n lr=0.1,\n verbose=True,\n no_tqdm=False):\n if len(X.shape) != 2 or X.shape[1] != self.input_size:\n raise ValueError(\n \"Input dimensions must be (n_samples, RBM.input_size)\")\n\n\n if verbose:\n pretrained_str = \" pretrained\" if self.pretrained else \"\"\n print(\n f\"################## Training{pretrained_str} DNN ##################\")\n print(\n f\"## Num layers : {len(self.layers)}\\n## Num neurons : {self.layers[0].output_size}\\n## Num training samples : {X.shape[0]}\\n## Classification layer of size : {self.classif_RBM.output_size}\\n\")\n \n n_samples = X.shape[0]\n train_total_score = []\n train_total_loss = []\n test_total_score = []\n test_total_loss = []\n\n tq_epochs = tqdm(range(num_epochs), leave=False, position=0, disable=no_tqdm)\n for e in tq_epochs:\n\n # shuffle data\n indices = np.random.permutation(n_samples)\n X = X[indices, :]\n Y = Y[indices, :]\n epoch_score = []\n epoch_loss = []\n for b in range(int(np.ceil(n_samples / batch_size))):\n # batch borders\n beg = b * batch_size\n end = min(beg + batch_size, n_samples)\n n = end - beg\n batch = X[beg:end, :]\n targets = Y[beg:end, :]\n layer_wise_output = self.input_output_network(batch)\n predictions = layer_wise_output[-1]\n # cross entropy & score computation\n epoch_score.append(accuracy_score(predictions, targets))\n epoch_loss.append(cross_entropy(predictions, targets))\n # gradients computations\n grads_W = []\n grads_b = []\n # note : layer_wise_output have one more element than self.layers because of self.classif_RBM\n # -- last layer\n C = predictions - targets\n grads_W.append((layer_wise_output[-2].T @ C) / n)\n grads_b.append(np.sum(C) / n)\n # -- over layers\n for layer_idx in reversed(range(len(self.layers))):\n # layers in reverse order\n if layer_idx == len(self.layers) - 1:\n C = C @ self.classif_RBM.W.T * (\n layer_wise_output[layer_idx] *\n (1 - layer_wise_output[layer_idx]))\n else:\n C = C @ self.layers[layer_idx + 1].W.T * (\n layer_wise_output[layer_idx] *\n (1 - layer_wise_output[layer_idx]))\n\n if layer_idx == 0:\n grads_W.append(batch.T @ C)\n else:\n grads_W.append(layer_wise_output[layer_idx - 1].T @ C)\n grads_b.append(np.sum(C))\n # gradients updates\n for layer_idx in range(len(self.layers)):\n self.layers[layer_idx].W -= grads_W[-layer_idx -\n 1] * lr / n\n self.layers[layer_idx].b -= grads_b[-layer_idx -\n 1] * lr / n\n self.classif_RBM.W -= grads_W[0] * lr / n\n self.classif_RBM.b -= grads_b[0] * lr / n\n\n # train loss & score\n train_total_score.append(sum(epoch_score) / len(epoch_score))\n train_total_loss.append(sum(epoch_loss) / len(epoch_loss))\n # test loss & score\n if X_test is not None:\n layer_wise_output = self.input_output_network(X_test)\n predictions = layer_wise_output[-1]\n test_total_score.append(accuracy_score(predictions, Y_test))\n test_total_loss.append(cross_entropy(predictions, Y_test))\n\n if X_test is not None:\n m = \"epoch: {0:d} (Loss: train {1:.2f}, test {2:.2f}) (Accuracy: train {3:.2f}, test {4:.2f})\".format(e, train_total_loss[-1], test_total_loss[-1],\n train_total_score[-1], test_total_score[-1])\n else:\n m = \"epoch: {0:d} (Loss: train {1:.2f}) (Accuracy: train {2:.2f})\".format(e, train_total_loss[-1],\n train_total_score[-1])\n tq_epochs.set_description(m)\n\n if verbose:\n print(m)\n print(\"DONE.\\n\")\n self.trained = True\n\n if X_test is not None:\n return train_total_loss, test_total_loss, train_total_score, test_total_score\n else:\n return train_total_loss, train_total_score", "def back_propagate(self, inputs, hidden, output, errors):\n d_output = self._da(output) * errors\n d_hidden = self._da(hidden) * dot(d_output, self.W_output[:-1].T)\n\n n_samples = inputs.shape[0]\n bias = ones((n_samples, 1))\n # Update momentum and weights\n self.V_output = self.output_learning_rate * dot(c_[hidden, bias].T, d_output) / n_samples\n self.W_output+= self.V_output\n\n self.V_hidden = self.hidden_learning_rate * dot(c_[inputs, bias].T, d_hidden) / n_samples\n self.W_hidden+= self.V_hidden", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n #print W2.shape, b2.shape\n\n #print len(W1), data[0], b1, len(data)\n\n ### YOUR CODE HERE: forward propagation\n #Eg, find the cost function. Save some intermediate stuff though, seems like it'd be useful\n #h = sigmoid(x * w1 + b1)\n # y = (softmax( h * w2 + b2)\n # hence the cost function will be labels * log(y) and then sum it all up\n\n z_1 = np.matrix(data) * W1 + b1\n h = sigmoid(z_1)\n y_prime = softmax(h * W2 + b2)\n logs = np.log(y_prime)\n\n #print y_prime.shape\n\n #print np.array(logs) * labels\n\n cost = - np.sum(np.array(logs) * labels, axis = 1)\n cost = np.sum(cost) # lets add up each instance fo the cost for now and see what happens\n\n # My question is then do we just sum up the costs of each function\n #print cost #somethign is printing so I'm gonan say i'm a genius right here duh\n\n #Cost(y, y') = -sum of (y * log Y')\n ### END YOUR CODE\n\n ### YOUR CODE HERE: backward propagation\n\n # you'll need gradients for each parameter except for the input vectors. Right now this isn't even a word2vec\n delta_1 = y_prime - labels\n delta_2 = delta_1 * W2.T\n #print sigmoid_grad(h).shape\n delta_3 = np.array(delta_2) * sigmoid_grad(h)\n\n gradW2 = np.array(h.T * delta_1) # i dunno or its reverse OMG I HASTE EVERYONE why is it that np.array fixes everything. Sigh\n gradb2 = np.array(np.sum(delta_1, axis=0)) # main issue is that this is a 20 x 5 vector when it should be a 1 x 5\n gradW1 = data.T.dot(delta_3)\n gradb1 = np.sum(delta_3, axis=0) # this should be 1 x10 not 20 x 5\n\n\n\n ### END YOUR CODE\n\n #print gradW1, gradW1.flatten()\n # print 'jee'\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((\n gradW1.flatten(),\n gradb1.flatten(),\n gradW2.flatten(),\n gradb2.flatten())\n )\n #print grad\n #print cost\n return cost, grad", "def scale_loss_and_gradients(loss: torch.Tensor, optimizer, model, args) -> torch.Tensor:\n\tdataset_names = list(args.dataset_gpu_mapping.keys())\n\tloss_i_tensor_list = all_gather_create_tensor_list(tensor=loss, ngpus_per_node=args.ngpus_per_node)\n\tdataset_loss_dict = reduce_to_dict_per_dataset(loss_i_tensor_list, args.dataset_gpu_mapping)\n\n\toptimizer.zero_grad()\n\t# Independent: each process will only have gradients with respect to its own subset of the minibatch\n\n\t# Under ddp.no_sync() context, this is doing an independent backward op\n\tassert not model.require_backward_grad_sync\n\tloss.backward()\n\n\tper_dataset_per_param_dict = {}\n\t# list of all gradients, per each dataset\n\tdataset_allgrads = defaultdict(list)\n\t# accumulate the gradients per each task\n\n######################################## print out unsynced gradients\n\t# for p_name, param in model.named_parameters():\n\t# \tif param.grad is not None:\n\t# \t\t# grad_i_tensor_list = all_gather_create_tensor_list(tensor=param.grad, ngpus_per_node=args.ngpus_per_node)\n\t# \t\t#print(f'grad_i_tensor_list for {p_name}: ', grad_i_tensor_list)\n\t# \t\t# dataset_grad_p_dict = reduce_to_dict_per_dataset(grad_i_tensor_list, args.dataset_gpu_mapping)\n\t# \t\t# per_dataset_per_param_dict[p_name] = dataset_grad_p_dict\n\t# \t\tfor dname in dataset_names:\n\t# \t\t\tdataset_allgrads[dname] += [param.grad.clone().flatten()] # TODO: remove the flatten??\n\t# for dname in dataset_names:\n\t# \tdataset_allgrads[dname] = torch.cat(dataset_allgrads[dname])\n\n\t# for dname in dataset_names:\n\t# \tnorm = torch.norm(dataset_allgrads[dname]).item()\n\t# \targs.logger.info(f'rank: {args.rank}, {dname}: norm {norm}')\n\t# no need to sort these now, names are unique\n##########################################\n\tdataset_allgrads = defaultdict(list)\n\tfor p_name, param in model.named_parameters():\n\t\tif param.grad is not None:\n\t\t\tgrad_i_tensor_list = all_gather_create_tensor_list(tensor=param.grad, ngpus_per_node=args.ngpus_per_node)\n\t\t\t#print(f'grad_i_tensor_list for {p_name}: ', grad_i_tensor_list)\n\t\t\tdataset_grad_p_dict = reduce_to_dict_per_dataset(grad_i_tensor_list, args.dataset_gpu_mapping)\n\t\t\tper_dataset_per_param_dict[p_name] = dataset_grad_p_dict\n\n\t\t\tfor dname in dataset_names:\n\t\t\t\tdataset_allgrads[dname] += [dataset_grad_p_dict[dname].clone().flatten()] # TODO: remove the flatten??\n\t\n\tcurrent_ns_time = lambda: int(round(time.time() * 1e9))\n\n\tscales = {}\n\n\t# sol, min_norm = MinNormSolver.find_min_norm_element([dataset_allgrads[d] for d in dataset_names])\n\t# for i, d in enumerate(dataset_names):\n\t# \tscales[d] = float(sol[i])\n\t\t# args.logger.info(f'{d}, {scales[d]}')\n\n\tfor dname in dataset_names:\n\t\tdataset_allgrads[dname] = torch.cat(dataset_allgrads[dname])\n\n\t# Optionally, could normalize all gradients here.\n\tfor dname, grad_list in dataset_allgrads.items():\n\t\t_, grad_norm = normalize_tensor_list(grad_list) # dataset_allgrads[dname]\n\t\tif dist.get_rank() == 0:\n\t\t\tprint(f'Gradient norms: {dname}: $ {grad_norm:.2f} $, ns = $ {current_ns_time()} $')\n\n\t# args.logger.info(dataset_names)\n\t# args.logger.info(dataset_allgrads.keys())\n\n\n\tsol, min_norm = MinNormSolverNew.find_min_norm_element([dataset_allgrads[d] for d in dataset_names])\n\tfor i, d in enumerate(dataset_names):\n\t\tscales[d] = float(sol[i])\n\n\t# args.logger.info(f'{scales}')\n\n\t# Scaled back-propagation, we must preserve gradients so we will not call optimizer.zero_grad() again\n\tfor p_name, param in model.named_parameters():\n\t\tif param.grad is not None:\n\t\t\t# Instead of a second backward pass, just use the results of the original backward pass\n\t\t\tparam.grad = scaled_reduce_dict_to_tensor(per_dataset_per_param_dict[p_name], dataset_names, scales)\n\n\t# Multi-task loss -- adding each dataset's scaled loss.\n\tloss = scaled_reduce_dict_to_tensor(dataset_loss_dict, dataset_names, scales)\n\treturn loss, scales", "def backward(ctx, grad_output):\n loss, reg, u, lbda = ctx.saved_tensors\n\n device = u.device\n\n # do clever computations\n eps = 1e-10\n grad, = torch.autograd.grad(loss, u, only_inputs=True,\n retain_graph=True)\n x = (u - eps * grad).data\n lbda = lbda.data\n\n prox_x = check_tensor(\n np.array([prox_tv.tv1_1d(xx, eps * lbda) for xx in x]),\n device=device,\n )\n grad_u = (u - prox_x) / eps\n grad_lbda = reg.clone()\n return (torch.ones(0), grad_u, grad_lbda)", "def affine_batchnorm_relu_forward(x, w, b, gamma, beta, bn_params):\n af_out, af_cache = affine_forward(x, w, b)\n bf_out, bf_cache = batchnorm_forward(af_out, gamma, beta, bn_params)\n out, relu_cache = relu_forward(bf_out)\n \n cache = (af_cache, bf_cache, relu_cache)\n return out, cache", "def backward(ctx, grad_output):\n\n # This is a pattern that is very convenient - at the top of backward\n # unpack saved_tensors and initialize all gradients w.r.t. inputs to\n # None. Thanks to the fact that additional trailing Nones are\n # ignored, the return statement is simple even when the function has\n # optional inputs.\n # input, weight, bias = ctx.saved_variables\n\n return grad_output", "def forward_propagation(X, parameters):\n L= len(parameters)//2\n AL = X\n for i in range(1,L):\n A_Prev = AL\n z = tf.add(tf.matmul(parameters[\"W\"+str(i)], A_Prev),parameters[\"b\"+str(i)]) \n z= tf.layers.batch_normalization (z,axis =0, center =True, scale = True, training= True)\n #z=BatchNormalization(z, is_training = True)\n AL = tf.nn.relu(z)\n z = tf.add(tf.matmul(parameters[\"W\" +str(L)],AL),parameters[\"b\"+str(L)])\n #z=BatchNormalization(z, is_training = True)\n z= tf.layers.batch_normalization (z, axis=0,center =True, scale = True, training= True)\n return z", "def backward(self, grad_out):\n\n # *********************************************\n # check this with torch.autograd.gradcheck !!!!\n # *********************************************\n\n k, a, m, y, targets = self.saved_tensors\n b = 1.0 - a\n\n features = y.numpy()\n labels = targets.numpy()\n\n loss, counts, centers, l_intra, inter_indices, l_inter, d = self.compute_loss(features, labels, k.numpy(), a.numpy(), b.numpy(), m.numpy())\n\n grad_inter = torch.FloatTensor(y.size())\n grad_intra = torch.FloatTensor(y.size())\n\n idx1 = inter_indices[0]\n idx2 = inter_indices[1]\n grad_inter[idx1] = torch.from_numpy(0.5 / (counts[idx1]) * np.abs(centers[idx1] - centers[idx2]))\n grad_inter[idx2] = torch.from_numpy(0.5 / (counts[idx2]) * np.abs(centers[idx2] - centers[idx1]))\n\n # compute intra class gradients with respect to xi, xj\n # only nonzero for these two values\n\n # *********************************************************\n # HOW TO COMPUTE GRADIENTS WITH RESPECT TO MULTIPLE SAMPLES\n # WHEN LOSS IS JUST COMPUTED OVERALL????\n # *********************************************************\n\n for idx in range(y.size()[1]):\n denom = np.array([np.power(d[idx,0]*np.sum(d[idx,:]),2)])\n grad = 2*k.double() / torch.from_numpy(denom)\n for entry in range(y.size()[0]):\n grad_intra[entry, idx] = grad[0]\n\n # compute inter class gradients with respect to xq, xr\n # only nonzero for these two values\n\n # ****************************************\n # SOMEHOW THE GRADIENT IS WAY TOO BIG ****\n # ****************************************\n grad_in = a*grad_intra + b*grad_inter\n print(grad_in)\n return grad_in, torch.DoubleTensor([0]), torch.DoubleTensor([0]), torch.DoubleTensor([0]), torch.DoubleTensor([0])", "def grad_step(self, data, optimizer):\n\n running_loss = 0\n \n for i, batch in enumerate(data):\n optimizer.zero_grad()\n \n # print(batch)\n\n nums, labels = batch\n nums = nums.transpose(0,1)\n # nums = nums.squeeze(1).reshape((-1, 784))\n hidden = self.init_hidden(nums.size(1))\n # print(nums.shape)\n # # forward\n out, _ = self(nums, hidden)\n # loss = -self.obs.distr(out).log_prob(labels).mean()\n # print(out.shape)\n # print(labels.shape)\n loss = nn.BCEWithLogitsLoss()(out.squeeze(), labels.squeeze())\n # loss = -self.obs.distr(out).log_prob(labels).sum()\n \n # loss = -loglihood(self, nums, labels)\n # foo = self(nums)[0]\n # loss = nn.NLLLoss(reduction='sum')(foo, labels)\n\n # optimise\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()\n \n return running_loss/(i+1)", "def on_backward_end(self, batch):\n if self.updater == \"backward\":\n grads = OrderedDict((name, param.grad.data.cpu(\n )) for name, param in self.model.model.named_parameters() if param.grad is not None)\n try:\n self.update(grads)\n except KeyboardInterrupt:\n raise\n except:\n pass", "def Batchnorm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True, labels=None, n_labels=None):\n if axes != [0,2,3]:\n raise Exception('unsupported')\n batch_mean, batch_var = tf.nn.moments(inputs, axes, keep_dims=True)\n shape = batch_mean.get_shape().as_list() # shape is [1,n,1,1]\n offset_m = lib.param(name+'.offset', np.zeros([n_labels,shape[1]], dtype='float32'))\n scale_m = lib.param(name+'.scale', np.ones([n_labels,shape[1]], dtype='float32'))\n offset = tf.nn.embedding_lookup(offset_m, labels)\n # offset = tf.Print(offset,['offset',offset])\n scale = tf.nn.embedding_lookup(scale_m, labels)\n # scale = tf.Print(scale,['scale',scale])\n\n moving_mean = lib.param(name + '.moving_mean', np.zeros(batch_mean.get_shape(), dtype='float32'), trainable=False)\n moving_variance = lib.param(name + '.moving_variance', np.ones(batch_var.get_shape(), dtype='float32'),trainable=False)\n\n def _batch_norm_training():\n return tf.nn.batch_normalization(inputs, batch_mean, batch_var, offset[:,:,None,None], scale[:,:,None,None], 1e-5)\n\n def _batch_norm_inference():\n # Version which blends in the current item's statistics\n mean = moving_mean[None, :, None, None]\n var = moving_variance[None, :, None, None]\n '''\n batch_size = tf.cast(tf.shape(inputs)[0], 'float32')\n mean, var = tf.nn.moments(inputs, [2,3], keep_dims=True)\n mean = ((1./batch_size)*mean) + (((batch_size-1.)/batch_size)*moving_mean)[None,:,None,None]\n var = ((1./batch_size)*var) + (((batch_size-1.)/batch_size)*moving_variance)[None,:,None,None]\n '''\n return tf.nn.batch_normalization(inputs, mean, var, offset[:,:,None,None], scale[:,:,None,None],\n 1e-5), mean, var\n\n if is_training is None:\n outputs = _batch_norm_training()\n else:\n if is_training:\n outputs = _batch_norm_training()\n else:\n outputs = _batch_norm_inference()\n\n if update_moving_stats:\n no_updates = lambda: outputs\n\n def _force_updates():\n \"\"\"Internal function forces updates moving_vars if is_training.\"\"\"\n float_stats_iter = tf.cast(stats_iter, tf.float32)\n update_moving_mean = tf.assign(moving_mean,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_mean) + (\n (1 / (float_stats_iter + 1)) * batch_mean))\n update_moving_variance = tf.assign(moving_variance,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_variance) + (\n (1 / (float_stats_iter + 1)) * batch_var))\n with tf.control_dependencies([update_moving_mean, update_moving_variance]):\n return tf.identity(outputs)\n\n if is_training:\n outputs = _force_updates()\n else:\n outputs = no_updates()\n\n return outputs" ]
[ "0.71418196", "0.6997701", "0.6994699", "0.69764066", "0.69122756", "0.6845697", "0.68414086", "0.67535686", "0.6739037", "0.65906703", "0.6587759", "0.6582729", "0.65654385", "0.65509117", "0.65476483", "0.6520883", "0.65095323", "0.65002865", "0.647348", "0.6422956", "0.64102554", "0.6389929", "0.6352189", "0.634879", "0.63341", "0.6311153", "0.63050455", "0.62979656", "0.62898207", "0.624764", "0.62397516", "0.6233976", "0.6218258", "0.6202396", "0.6201745", "0.62012345", "0.6193141", "0.6183942", "0.617889", "0.6168085", "0.6167142", "0.61660206", "0.61612767", "0.6150971", "0.61476487", "0.61438805", "0.61389834", "0.6134754", "0.6134615", "0.6106106", "0.6080688", "0.6066687", "0.6066687", "0.60631496", "0.60453236", "0.6040902", "0.6037798", "0.6034209", "0.603191", "0.60306656", "0.6025202", "0.60217255", "0.6021257", "0.60182786", "0.6018075", "0.60155743", "0.60155743", "0.5993121", "0.59893453", "0.59770924", "0.5947299", "0.59060574", "0.5887426", "0.588581", "0.5879947", "0.58722335", "0.5872226", "0.5864531", "0.58619994", "0.58554333", "0.5848927", "0.5844464", "0.5843124", "0.5841572", "0.58402115", "0.5839327", "0.58269113", "0.5821866", "0.5819869", "0.58186245", "0.58086264", "0.5795249", "0.5790446", "0.57892495", "0.5785826", "0.57857114", "0.57828206", "0.57824904", "0.5782433", "0.57718223" ]
0.6634307
9
Performs the forward pass for dropout.
def dropout_forward(x, dropout_param): p, mode = dropout_param['p'], dropout_param['mode'] if 'seed' in dropout_param: np.random.seed(dropout_param['seed']) mask = None out = None if mode == 'train': ####################################################################### # TODO: Implement training phase forward pass for inverted dropout. # # Store the dropout mask in the mask variable. # ####################################################################### mask = np.random.random_sample(x.shape) mask = mask < p out = x * mask ####################################################################### # END OF YOUR CODE # ####################################################################### elif mode == 'test': ####################################################################### # TODO: Implement the test phase forward pass for inverted dropout. # ####################################################################### out = np.empty_like(x) np.copyto(out,x) ####################################################################### # END OF YOUR CODE # ####################################################################### cache = (dropout_param, mask) out = out.astype(x.dtype, copy=False) return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward( self, x ):\n x = x + self.pe[ :x.size(0), : ]\n return self.dropout( x )", "def forward(self, x):\n x = x + self.pe[: x.size(0), :]\n return self.dropout(x)", "def forward(self, x):\n\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)", "def forward(self, x):\n\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)", "def forward(self, x):\n\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)", "def forward(self, x):\n\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)", "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n mask = np.random.uniform(0,1,x.shape)# / p\n mask[mask<=p]=1\n mask[mask<1]=0\n out = x * mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n out = x*p\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "def forward(self):\n pass", "def forward(self):\n pass", "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n mask = (np.random.rand(*x.shape) < p).astype(int)\n out = mask * x\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n out = x * p\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n mask = np.random.binomial([np.ones(x.shape)], p)[0] == 0\n out = (x * mask)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n out = x\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "def forward(self, output, target):\n raise NotImplementedError", "def forward(self, x):\n\n # x = [batch size, seq len, hid dim]\n\n x = self.dropout(torch.relu(self.fc_1(x)))\n\n # x = [batch size, seq len, pf dim]\n\n x = self.fc_2(x)\n\n # x = [batch size, seq len, hid dim]\n\n return x", "def forward_pass(self):", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = x + self.pe[: x.size(0)]\n return self.dropout(x)", "def forward(self, x): \n out = self.layer1(x)\n out = self.layer2(out)\n\n out = out.reshape(out.size(0), -1)\n \n out = self.dropout(out)\n out = self.fc1(out)\n out = self.fc2(out)\n \n return out", "def forward(self, x):\n pass", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self, x):\n x, self.hidden = self.gru(x, self.hidden)\n self.detach_hidden()\n x = self.dropout(x)\n x = self.out(x)\n return x", "def forward(self, x):\n if len(self.convs) == 0:\n return x\n x = x.contiguous()\n for c, n in zip(self.convs, self.norms):\n x = c(x.permute(0, 2, 1)) # (B, C, T)\n x = n(x.permute(0, 2, 1)) # (B, T, C)\n d = torch.nn.functional.dropout(x, p=self.dropout, training=self.training)\n x = torch.relu(d)\n return d", "def forward(self)->None:", "def forward(self, *args, **kwargs):\n pass", "def forward(self, x, dropout=False):\n\n # Performs forward propagation with the encoder.\n x = self.encoder.extract_features(x)\n x = self.encoder_pool(x)\n x = x.view(x.shape[0], -1)\n\n # Applies dropout to the model is selected.\n if dropout:\n x = F.dropout(x, self.drop_rate)\n\n # Performs forward propagation with the hidden layer.\n x = self.hidden(x)\n\n # Applies dropout to the model is selected.\n if dropout:\n x = F.dropout(x, self.drop_rate)\n\n # Gets the output logits from the output layer.\n return self.classifier(x)", "def forward(self, x, dropout=False):\n\n # Performs forward propagation with the EfficientNet encoder.\n x = self.encoder.extract_features(x)\n x = self.encoder_pool(x)\n x = x.view(x.shape[0], -1)\n\n # Applies dropout if selected.\n if dropout:\n x = F.dropout(x, self.drop_rate)\n\n # Uses the hidden layer.\n x = self.hidden(x)\n\n # Applies dropout if selected.\n if dropout:\n x = F.dropout(x, self.drop_rate)\n\n # Gets the predictive output of the model.\n x1 = self.classifier(x)\n\n # Gets the selective output of the model.\n x2 = F.relu(self.selective_hidden(x))\n x2 = self.selective_batch_norm(x2)\n x2 = torch.sigmoid(self.selective_regression(x2))\n\n # Gets the auxiliary output of the model.\n x3 = self.auxiliary_output(x)\n\n # Returns the outputs of the model.\n return x1, x2, x3", "def dropout_forward(x, dropout_param):\r\n p, mode = dropout_param['p'], dropout_param['mode']\r\n if 'seed' in dropout_param:\r\n np.random.seed(dropout_param['seed'])\r\n\r\n mask = None\r\n\r\n if mode == 'train':\r\n mask = (np.random.rand(*x.shape) < (1 - p)) / (1 - p)\r\n out = x * mask\r\n elif mode == 'test':\r\n out = x\r\n\r\n cache = (dropout_param, mask)\r\n out = out.astype(x.dtype, copy=False)\r\n\r\n return out, cache", "def _forward(self, z):\n raise NotImplementedError(\"Forward shouldn't be called!\")", "def forward(self, state):\n x = self.nonlin(self.fc1(self.in_fn(state)))\n x = self.drop_layer(x)\n x = self.nonlin(self.fc2(x))\n x = self.drop_layer(x)\n return self.fc3(x)", "def forward(self, input):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self, obs):\n\t\tpass", "def forward(self, x, **kwargs):\n pass", "def forward_batch(self,batcher, phase=0):\n pass", "def forward(self, x):\n self.save_net()\n self.perturb_tensors()\n out = self.net.forward(x)\n return out", "def _dropout(self,components,dropout=None):\r\n \r\n if dropout is not None:\r\n components.append(nn.Dropout(dropout))", "def forward(\n self,\n input: Tensor, # [batch, input_size],\n hx: Optional[Tensor] = None, # [batch, hidden_size]\n ) -> Tensor: # [batch, hidden_size]\n if hx is None:\n # Imitate behaviour of parent class: hx is assumed zero if not given.\n hx = torch.zeros(\n input.size(0), self.hidden_size, dtype=input.dtype, device=input.device\n )\n dropped_input = nn.functional.dropout(\n input=input, p=self._dropout, training=self.training\n )\n dropped_hidden = nn.functional.dropout(\n input=hx, p=self._recurrent_dropout, training=self.training\n )\n return super().forward(dropped_input, dropped_hidden)", "def forward(self, input):\n raise NotImplementedError()", "def forward(self, input):\n sl, bs = input.size()\n if bs != self.bs:\n self.bs = bs\n self.reset()\n with set_grad_enabled(self.training):\n emb = self.encoder_with_dropout(input, dropout=self.dropoute if self.training else 0)\n emb = self.dropouti(emb)\n raw_output = emb\n new_hidden,raw_outputs,outputs = [],[],[]\n for l, (rnn,drop) in enumerate(zip(self.rnns, self.dropouths)):\n current_input = raw_output\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n raw_output, new_h = rnn(raw_output, self.hidden[l])\n new_hidden.append(new_h)\n raw_outputs.append(raw_output)\n if l != self.n_layers - 1: raw_output = drop(raw_output)\n outputs.append(raw_output)\n\n self.hidden = repackage_var(new_hidden)\n return raw_outputs, outputs", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def forward(self, *args):\n raise NotImplementedError", "def forward(self, *args):\n raise NotImplementedError", "def forward(self, x, mask):\n context_vector, attn_weights = self.self_mha(x, x, x, mask)\n x = self.layer_norm1(\n F.dropout(x + context_vector, self.dropout, training=self.training))\n\n x = self.layer_norm2(\n F.dropout(x + self.ffn(x), self.dropout, training=self.training))\n return x, attn_weights", "def forward(self, x_in):\r\n # x_out = torch.zeros_like(x_in)\r\n\r\n for layer in self.layers: #Call forward function of each layer in order\r\n x_out = layer.forward(x_in)\r\n # print(\"Forward pass Seq: \", layer, x_in, x_out)\r\n x_in = x_out # output of the layer is passed as input to the next layer\r\n self.temp = x_in\r\n return x_out", "def forward(self, x):\r\n out = x + self.conv_block(x) # add skip connections\r\n return out", "def forward(self, *args, **kwargs):\n raise NotImplementedError", "def forward(self, input):\n sl, bs = input.size()\n if bs != self.bs:\n self.bs = bs\n self.reset()\n with set_grad_enabled(self.training):\n embedding = self.encoder_with_dropout(input, \n dropout=self.drop_e if self.training else 0)\n embedding = self.drop_i(embedding)\n raw_output = embedding\n new_hidden, raw_outputs, outputs = [], [], []\n \n for l, (rnn, drop) in enumerate(zip(self.rnns, self.drop_hs)):\n current_input = raw_output\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n raw_output, new_h = rnn(raw_output, self.hidden[l])\n new_hidden.append(new_h)\n raw_outputs.append(raw_output)\n if l != self.nl - 1: raw_output = drop(raw_output)\n outputs.append(raw_output)\n\n self.hidden = repackage_var(new_hidden)\n return raw_outputs, outputs", "def forward(self, x):\n y = self.Dropout(self.PRelu(self.bn(self.conv(x))))\n return y", "def forward(self, unprojected_outs, src_tokens=None, input_tokens=None, possible_translation_tokens=None, select_single=None):\n raise NotImplementedError()", "def forward(self, x):\n out = x + self.conv_block(x) # add skip connections\n return out", "def forward(self, x):\n out = x + self.conv_block(x) # add skip connections\n return out", "def forward(self, x):\n out = x + self.conv_block(x) # add skip connections\n return out", "def forward(self, input):\n sl, bs = input.size()\n if bs != self.bs:\n self.bs = bs\n self.reset()\n with set_grad_enabled(self.training):\n embedding = self.encoder_with_dropout(input, \n dropout=self.drop_e if self.training else 0)\n embedding = self.drop_i(embedding)\n raw_output = embedding\n new_hidden, raw_outputs, outputs = [], [], []\n \n for l, (rnn, drop) in enumerate(zip(self.rnns, self.drop_hs)):\n current_input = raw_output\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n raw_output, new_h = rnn(raw_output, self.hidden[l])\n new_hidden.append(new_h)\n raw_outputs.append(raw_output)\n if l != self.nl - 1: raw_output = drop(raw_output)\n outputs.append(raw_output)\n\n self.hidden = repackage_var(new_hidden)\n return raw_outputs, outputs", "def forward(self, x):\n x = self.pool(x)\n x = self.conv(x)\n x = x.reshape(x.shape[0], -1)\n x = self.relu(self.fc1(x))\n x = self.dropout1(x)\n x = self.fc2(x)\n x = self.dropout2(x)\n x = self.fc3(x)\n x = self.dropout3(x)\n x = self.fc4(x)\n\n return x", "def forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n raise NotImplementedError", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def forward(self, forward):\n\n self._forward = forward", "def step_forward(self):", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n out: torch.Tensor = self.ff2(self.drop(self.activation(self.ff1(x))))\n return out", "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def forward(self, states):\n raise NotImplementedError()", "def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n p = dropout_param['p']\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n dx = dout * mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx", "def forward(self, obs):\n raise NotImplementedError", "def reduce_dropout(self):\n def reduce_p(layer):\n if isinstance(layer, nn.Dropout):\n layer.p = 0\n self.apply(reduce_p)", "def forward(self, observation: Tensor) -> Tensor:\n pass", "def _forward(self, X, **kwargs):\n raise NotImplementedError()", "def forward(self, *args, **kwargs):\n\n raise NotImplementedError()", "def forward(self, in_tensors: List[Tensor], out_tensors: List[Tensor]):\n pass", "def forward(self, in_tensors: List[Tensor], out_tensors: List[Tensor]):\n pass", "def forward(self, inputs):\n raise NotImplementedError", "def forward(self, input=None):\n if (input is not None) and (self.result is None):\n self.result = self.act(self.drop(self.node(input.view(*self.G.d_in))))\n\n # Pull the input from previous network layers\n elif self.result is None:\n in_result = []\n for n in self.input:\n in_result.append( n() )\n\n # Concatenate input along the last dim\n self.result = self.act(self.drop(self.node(torch.cat(in_result, in_result[0].dim() - 1))))\n\n return self.result.view(*self.G.d_out)", "def forward(self, *inputs):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def forward_pass(self, inputs):\n self._rbf_forward(inputs)\n self._slp_forward()\n return self.slp_outputs", "def forward(self, x: Tensor, mask: Tensor) -> Tensor:\n x_norm = self.layer_norm(x)\n h = self.src_src_att(x_norm, x_norm, x_norm, mask)\n h = self.dropout(h) + x\n o = self.feed_forward(h)\n return o", "def forward(self, features=None):\n\n assert (features is None) == (self.in_features is None), \\\n \"Layer has not been properly configured to take in features!\"\n\n in_dim = self.in_features if self.in_features is not None else self.num_nodes\n triples = self.triples\n out_dim = self.out_features\n edge_dropout = self.edge_dropout\n weight_decomp = self.weight_decomp\n num_nodes = self.num_nodes\n num_relations = self.num_relations\n vertical_stacking = self.vertical_stacking\n general_edge_count = int((triples.size(0) - num_nodes)/2)\n self_edge_count = num_nodes\n\n # Apply edge dropout TODO Remove edge dropout from here - Not correct to apply dropout here\n if edge_dropout is not None and self.training:\n assert 'general' in edge_dropout and 'self_loop' in edge_dropout, \\\n 'General and self-loop edge dropouts must be specified!'\n assert type(edge_dropout['general']) is float and 0.0 <= edge_dropout['general'] <= 1.0, \\\n \"Edge dropout rates must between 0.0 and 1.0!\"\n general_edo = edge_dropout['general']\n self_loop_edo = edge_dropout['self_loop']\n triples = drop_edges(triples, num_nodes, general_edo, self_loop_edo)\n\n # Choose weights\n if weight_decomp is None:\n weights = self.weights\n elif weight_decomp == 'basis':\n weights = torch.einsum('rb, bio -> rio', self.comps, self.bases)\n elif weight_decomp == 'block':\n weights = block_diag(self.blocks)\n else:\n raise NotImplementedError(f'{weight_decomp} decomposition has not been implemented')\n\n # Determine whether to use cuda or not\n if weights.is_cuda:\n device = 'cuda'\n else:\n device = 'cpu'\n\n # Stack adjacency matrices (vertically/horizontally)\n adj_indices, adj_size = stack_matrices(\n triples,\n num_nodes,\n num_relations,\n vertical_stacking=vertical_stacking,\n device=device\n )\n\n num_triples = adj_indices.size(0)\n vals = torch.ones(num_triples, dtype=torch.float, device=device)\n\n # Apply normalisation (vertical-stacking -> row-wise rum & horizontal-stacking -> column-wise sum)\n sums = sum_sparse(adj_indices, vals, adj_size, row_normalisation=vertical_stacking, device=device)\n if not vertical_stacking:\n # Rearrange column-wise normalised value to reflect original order (because of transpose-trick)\n n = general_edge_count\n i = self_edge_count\n sums = torch.cat([sums[n:2 * n], sums[:n], sums[-i:]], dim=0)\n\n vals = vals / sums\n\n # Construct adjacency matrix\n if device == 'cuda':\n adj = torch.cuda.sparse.FloatTensor(indices=adj_indices.t(), values=vals, size=adj_size)\n else:\n adj = torch.sparse.FloatTensor(indices=adj_indices.t(), values=vals, size=adj_size)\n\n if self.no_hidden:\n assert weights.size() == (num_relations, in_dim)\n else:\n assert weights.size() == (num_relations, in_dim, out_dim)\n\n if self.in_features is None:\n # Featureless\n output = torch.mm(adj, weights.view(num_relations * in_dim, out_dim))\n elif self.no_hidden:\n fw = torch.einsum('ij,kj->kij', features, weights)\n fw = torch.reshape(fw, (self.num_relations * self.num_nodes, in_dim))\n output = torch.mm(adj, fw)\n elif self.vertical_stacking:\n # Adjacency matrix vertically stacked\n af = torch.spmm(adj, features)\n af = af.view(self.num_relations, self.num_nodes, in_dim)\n output = torch.einsum('rio, rni -> no', weights, af)\n else:\n # Adjacency matrix horizontally stacked\n fw = torch.einsum('ni, rio -> rno', features, weights).contiguous()\n output = torch.mm(adj, fw.view(self.num_relations * self.num_nodes, out_dim))\n\n assert output.size() == (self.num_nodes, out_dim)\n \n if self.bias is not None:\n output = torch.add(output, self.bias)\n \n return output", "def forward_pass_on_convolutions(x, target_layer):\n net.features[-1].register_forward_hook(save_target_output)", "def _poputil_remap_deduce_layer_backward(op, grads):\n return grads", "def _post_forward(\n self,\n handles: List[FlatParamHandle],\n reshard_fn: Optional[Callable],\n module: nn.Module,\n input: Any,\n output: Any,\n ) -> Any:\n self._exec_order_data.record_post_forward(handles)\n if reshard_fn is not None:\n reshard_fn()\n # Register pre-backward hooks to unshard the flattened parameters\n # for the gradient computation (if needed)\n output = self._register_pre_backward_hooks(output, handles)\n self.training_state = TrainingState_.IDLE\n for handle in handles:\n handle._training_state = HandleTrainingState.IDLE\n return output", "def forward(self):\n self.img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)", "def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n dx = mask*dout\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx", "def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n dx = dout * mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx", "def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n dx = dout * mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx", "def forward(self, input, context, state):\n raise NotImplementedError", "def dropout_backward(dout, cache):\r\n dropout_param, mask = cache\r\n mode = dropout_param['mode']\r\n\r\n if mode == 'train':\r\n dx = dout * mask\r\n elif mode == 'test':\r\n dx = dout\r\n return dx", "def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5", "def optimize(self):\n self.output = self.net.forward(Variable(self.source))\n self.optimizer.zero_grad()\n self.loss = self.loss_function(self.output, Variable(self.target))\n self.loss.backward()\n self.optimizer.step()", "def on_iter_backward(self, runner):\n runner.optimizer.zero_grad()\n runner.loss.backward()\n runner.optimizer.step()", "def forward(self, x):\n x = self._activation(self.fully_connected_1(x))\n x = self._activation(self.fully_connected_2(x))\n x = self.dropout(x)\n x = self._activation(self.fully_connected_3(x))\n x = self._activation(self.fully_connected_4(x))\n x = self.dropout(x)\n x = self._activation(self.fully_connected_5(x))\n return self.fully_connected_out(x)", "def base_forward(self, x):\r\n pass", "def on_iter_forward(self, runner):\n # unpack features into features and targets\n *features, target = runner.batch\n # Forward features\n runner.output = runner.model(*features)\n # Ensure `targetL` and `outputL` are always in a list format.\n targetL = [target] if not isinstance(target, (list, tuple)) else target\n outputL = [runner.output] if not isinstance(runner.output, (list, tuple)) else runner.output\n # Compute loss\n runner.loss = runner.criterion(*outputL, *targetL)\n runner.target = target", "def test(self):\n self.output = self.net.forward(Variable(self.source, volatile=True))\n self.loss = self.loss_function(self.output,\n Variable(self.target, volatile=True))", "def test(self):\n with torch.no_grad():\n self.forward()", "def forward(self, input, target):\n\n #return self.bce(input_, target)\n return self.bce(input, target)", "def post_backward_generator(self):\n pass", "def forward(self, x):\n # define feedforward behavior, applying activations as necessary\n out = self.leaky_relu(self.conv1(x))\n out = self.leaky_relu(self.conv2(out))\n out = self.leaky_relu(self.conv3(out))\n out = self.leaky_relu(self.conv4(out))\n\n out = self.res_blocks(out)\n\n out = self.leaky_relu(self.deconv1(out))\n out = self.leaky_relu(self.deconv2(out))\n out = self.leaky_relu(self.deconv3(out))\n\n # tanh applied to last layer\n out = F.tanh(self.out_layer(out))\n out = torch.clamp(out, min=-0.5, max=0.5)\n\n return out", "def Naive_forwardpass(self):\n\n for filter_k in range(0, self.n_filters):\n filter_col = self.im2col(self.filter_map[filter_k].data_mtx)\n for hgt_indx in range(0, self.Output_Height):\n for wdth_indx in range(0, self.Output_Width):\n wdth_start_index = wdth_indx * self.stride_len\n wdth_end_index= wdth_start_index + self.filter_size\n hgt_start_index = hgt_indx * self.stride_len\n hgt_end_index = hgt_start_index + self.filter_size\n trn_img_area = self.input_vol.padded_mtx[:, wdth_start_index:wdth_end_index,\n hgt_start_index:hgt_end_index]\n trn_img_col = self.im2col(trn_img_area)\n self.output_Tensor.data_mtx[filter_k,wdth_indx , hgt_indx] = self.convolution_op(trn_img_col,\n filter_col) + np.sum(self.bias_vol[filter_k].data_mtx)\n return self.output_Tensor", "def forward(self, x):\n sources = list()\n tcb_source = list()\n odm_loc = list()\n odm_conf = list()\n if self.phase == 'test':\n feat_sizes = list()\n\n # apply vgg up to conv4_3 relu and conv5_3 relu\n for k in range(self.conv5_3_layer):\n x = self.vgg[k](x)\n if self.size != 512 and self.size != 320 and self.conv3_3_layer - 1 == k:\n s = self.conv3_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n if self.conv4_3_layer - 1 == k:\n s = self.conv4_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n elif self.conv5_3_layer - 1 == k:\n s = self.conv5_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # apply vgg up to fc7\n for k in range(self.conv5_3_layer, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # apply extra layers and cache source layer outputs\n for k in range(len(self.extras)):\n x = self.extras[k](x)\n if self.extra_1_layer - 1 == k:\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n if (self.size == 640 or self.size == 5126) and self.extra_2_layer - 1 == k:\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # calculate TCB features\n p = None\n for k, v in enumerate(sources[::-1]):\n s = v\n for i in range(3):\n s = self.tcb0[(self.step-k)*3 + i](s)\n if k != 0:\n u = p\n u = self.tcb1[self.step-k](u)\n s += u\n for i in range(3):\n s = self.tcb2[(self.step-k)*3 + i](s)\n p = s\n tcb_source.append(s)\n tcb_source.reverse()\n\n # apply ODM to source layers\n for (x, l, c) in zip(tcb_source, self.odm_loc, self.odm_conf):\n odm_loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n odm_conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n odm_loc = torch.cat([o.view(o.size(0), -1) for o in odm_loc], 1)\n odm_conf = torch.cat([o.view(o.size(0), -1) for o in odm_conf], 1)\n\n if self.phase == \"test\":\n output = (\n odm_loc.view(odm_loc.size(0), -1, 4), # odm loc preds\n self.softmax(odm_conf.view(odm_conf.size(0), -1,\n self.num_classes)), # odm conf preds\n feat_sizes\n )\n else:\n output = (\n odm_loc.view(odm_loc.size(0), -1, 4),\n odm_conf.view(odm_conf.size(0), -1, self.num_classes),\n )\n return output" ]
[ "0.6779948", "0.66298467", "0.6543176", "0.6543176", "0.6543176", "0.6543176", "0.65413773", "0.64989567", "0.64989567", "0.6470937", "0.64692926", "0.64219373", "0.6366126", "0.635455", "0.6328682", "0.6312325", "0.6288312", "0.623391", "0.623391", "0.623391", "0.6223676", "0.62042516", "0.6201141", "0.6183197", "0.6161058", "0.61449677", "0.61304635", "0.6115778", "0.6093804", "0.60661757", "0.60661757", "0.6050194", "0.604874", "0.60140574", "0.6004076", "0.59908557", "0.59833044", "0.5967765", "0.594874", "0.5944036", "0.5942066", "0.5942066", "0.5922498", "0.5903472", "0.5891824", "0.58817816", "0.5859115", "0.5857385", "0.5854062", "0.58520985", "0.58520985", "0.58520985", "0.58452404", "0.58421576", "0.58410263", "0.58410263", "0.5826035", "0.5817095", "0.58047247", "0.5800806", "0.5793757", "0.5790951", "0.5789464", "0.5782558", "0.5774758", "0.5768901", "0.57675725", "0.5764455", "0.5754186", "0.5754186", "0.5750042", "0.5749718", "0.5742305", "0.5733905", "0.5733905", "0.5732946", "0.5729493", "0.5723186", "0.57042223", "0.5702866", "0.5701595", "0.5690375", "0.5687805", "0.56850314", "0.56850314", "0.5677529", "0.5661764", "0.56599915", "0.5656346", "0.5639863", "0.5635013", "0.5630908", "0.5618481", "0.5617762", "0.56130403", "0.5609703", "0.56072956", "0.56046456", "0.55949336", "0.55823725" ]
0.6543027
6
Perform the backward pass for dropout.
def dropout_backward(dout, cache): dropout_param, mask = cache mode = dropout_param['mode'] dx = None if mode == 'train': ####################################################################### # TODO: Implement training phase backward pass for inverted dropout # ####################################################################### dx = dout * mask ####################################################################### # END OF YOUR CODE # ####################################################################### elif mode == 'test': dx = dout return dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n p = dropout_param['p']\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n dx = dout * mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx", "def backward_pass(self, grad):\n pass", "def dropout_backward(dout, cache):\n dropout_param, mask = cache\n mode = dropout_param['mode']\n\n dx = None\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase backward pass for inverted dropout #\n #######################################################################\n dx = mask*dout\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n dx = dout\n return dx", "def backward(self, dout):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)-1,-1,-1):\n act_dout = self.activations[l].backward(dout)\n dout = self.layers[l].backward(act_dout)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return", "def backward(self, *output_grads):\n raise NotImplementedError", "def dropout_backward(dout, cache):\r\n dropout_param, mask = cache\r\n mode = dropout_param['mode']\r\n\r\n if mode == 'train':\r\n dx = dout * mask\r\n elif mode == 'test':\r\n dx = dout\r\n return dx", "def backward(self):\n raise NotImplementedError", "def backward(ctx: Any, grad_output: Any) -> Any:\n return grad_output, None", "async def skip_backward(self) -> None:\n return await self.relay(\"skip_backward\")()", "def backward(cls, grad_out, activated_out):\n raise Exception(\"Unimplemented\")", "def backward(cls, grad_out, activated_out):\n raise Exception(\"Unimplemented\")", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\r\n pass", "def backward(self, loss):\n global_timer.my_timer.start_profile(\"BWD\")\n mgr = PatrickStarManager()\n mgr.set_training_stage(TrainingStage.BWD)\n\n for param_fp16 in self.client.chunk_based_param_fp16:\n param_fp16.ps_attr.bwd_used_cnt = 0\n\n self.optimizer.zero_grad()\n if self.loss_scaler:\n self.loss_scaler.backward(loss)\n else:\n loss.backward()\n mgr.update_margin_mem()\n global_timer.my_timer.finish_profile(\"BWD\")", "def backward(self, top, propagate_down, bottom):\n\t\tpass", "def backward(self, grad, index):\n pass", "def backward(self, grad_output):\n raise NotImplementedError", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def backward(self):\n self.units = self._units_history.pop()\n self._backward()\n # We must set the utop to previous state immediately, because the utop could be other gate's input unit\n # And other gate's backward could be called before this gate's backward\n self._utop_history.pop()\n if self._utop_history:\n self.utop = self._utop_history[-1]", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(ctx, dy):\n y = ctx.y\n if ctx.eagerly_discard_variables:\n del ctx.y\n for i in range(len(ctx.reversible_blocks) - 1, -1, -1):\n y, dy = ctx.reversible_blocks[i].backward_pass(y, dy, not ctx.eagerly_discard_variables)\n if ctx.eagerly_discard_variables:\n del ctx.reversible_blocks\n return dy, None, None", "def backward_pass(self, loss):\n\n self.optimizer.zero_grad()\n self.optimizer.backward(loss)\n self.optimizer.step()", "def backward_and_step(self, loss):\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()", "def backward(self, gradwrtoutput):\n if Dropout.train_flag:\n gradin = torch.empty(self.data.shape).zero_()\n gradin[self.data > 0] = 1.0\n return gradin*gradwrtoutput\n return gradwrtoutput", "def on_iter_backward(self, runner):\n runner.optimizer.zero_grad()\n runner.loss.backward()\n runner.optimizer.step()", "def back(self, step):\r\n self.forward(-step)", "def backward(self, out_tensors: List[Tensor], in_tensors: List[Tensor]):\n pass", "def post_backward_generator(self):\n pass", "def backward(self, y):\n pass", "def _poputil_remap_deduce_layer_backward(op, grads):\n return grads", "def backwards_de(self, input_, expected, idx):\r\n\r\n\r\n trial = self._mutant(idx, self.F)\r\n self.set_weights_to_layers(trial)\r\n vec_output = self.forward(input_)\r\n trial_loss = torch.mean(self._objective(vec_output, expected,self.device)).item()\r\n \r\n if trial_loss <= self.past_loss[idx] :\r\n self.population[idx] = trial[:]\r\n self.past_loss[idx] = trial_loss", "def backward(self, x_out, x_target):\r\n return 2*(x_out - x_target)", "def go_backward(self):\n command = _build_robovac_command(RobovacModes.GO_BACKWARD, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)", "def move_backward(self, distance):\r\n return self.move('back', distance)", "def move_backward():\n pass", "def backward(self, out_grad, input):\n raise NotImplementedError", "def _backward(loss):\n\n loss.backward()", "def _backward(outputs, grad_outputs, retain_graph=False):\n # Collect forward tapes.\n inputs = list(outputs)\n op_tape = tape.OrderedTape()\n graph_leaves = set()\n memo = set()\n while len(inputs) > 0:\n input = inputs.pop(0)\n if id(input) in memo:\n continue\n memo.add(id(input))\n if input._tape:\n op_tape.merge_from(input._tape)\n inputs.extend(input._tape.get_sources())\n input._tape = None\n if input._retains_grad:\n graph_leaves.add(input.id)\n elif input._requires_grad:\n graph_leaves.add(input.id)\n\n # Run backward computations reversely.\n op_defs = op_tape.get_op_defs()\n execute_ws = workspace.get_workspace()\n execute_ws.run_backward(\n op_defs=op_defs,\n targets=[y.id for y in outputs],\n grad_targets=[dy.id for dy in grad_outputs],\n sources=list(graph_leaves),\n )\n\n # Free the forward handles if allowed.\n if not retain_graph:\n handle_pool = execute_ws._handle_pool\n for op_def in op_defs:\n handle_pool.release(op_def.name)", "def backward(self, gradient):\n #TODO\n pass", "def backward(self, gradient):\n #TODO\n pass", "def drive_backward(self):\n\n print(f\"{self.make.title()} driving backward.\")", "def move_backward(self, dist):\r\n self.send_command_without_response(f'back {dist}')", "def right_backward(self):\n self.right_motor.run_forever(speed_sp=-self.MAX_SPEED)", "def _backward(self):\n if self.units[0].value > 0:\n self.units[0].gradient += 1 * self.utop.gradient\n else:\n self.units[0].gradient += 0 * self.utop.gradient", "def _poputil_remap_layer_backward(op, grads):\n return grads", "def backward(ctx, grad_output):\n diff, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input = grad_input + diff\n return grad_input", "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n mask = np.random.uniform(0,1,x.shape)# / p\n mask[mask<=p]=1\n mask[mask<1]=0\n out = x * mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n out = x*p\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "def backward(self,input,grads):\n\t\traise RuntimeError(\"All subclasses of Module must implement a forward method\")", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[torch.abs(input) > 1.001] = 0\n return grad_input", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[torch.abs(input) > 1.001] = 0\n return grad_input", "def backward(self) -> np.ndarray:\n # TODO\n return None", "def backward(ctx, grad_output):\n loss, reg, u, lbda = ctx.saved_tensors\n\n device = u.device\n\n # do clever computations\n eps = 1e-10\n grad, = torch.autograd.grad(loss, u, only_inputs=True,\n retain_graph=True)\n x = (u - eps * grad).data\n lbda = lbda.data\n\n prox_x = check_tensor(\n np.array([prox_tv.tv1_1d(xx, eps * lbda) for xx in x]),\n device=device,\n )\n grad_u = (u - prox_x) / eps\n grad_lbda = reg.clone()\n return (torch.ones(0), grad_u, grad_lbda)", "def backward(self, # type: ignore\n closure_loss: torch.Tensor,\n *args,\n **kwargs) -> torch.Tensor:\n closure_loss = closure_loss.to(self.root_device)\n return super().backward(\n closure_loss,\n *args,\n **kwargs,\n )", "def backward(ctx, grad_output):\n\n # This is a pattern that is very convenient - at the top of backward\n # unpack saved_tensors and initialize all gradients w.r.t. inputs to\n # None. Thanks to the fact that additional trailing Nones are\n # ignored, the return statement is simple even when the function has\n # optional inputs.\n # input, weight, bias = ctx.saved_variables\n\n return grad_output", "def backward_step():\n #print 'a step backward'\n maze.turn_left()\n maze.turn_left()\n if maze.found():\n return maze.found()\n maze.go()\n maze.turn_left()\n maze.turn_left()", "def backward_G(self):\n self.loss_G.backward()", "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n mask = np.random.random_sample(x.shape)\n mask = mask < p\n out = x * mask\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n out = np.empty_like(x)\n np.copyto(out,x)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n mask = np.random.binomial([np.ones(x.shape)], p)[0] == 0\n out = (x * mask)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n out = x\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "def convert_dropout(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dropout_prob = op.attr(\"dropout_prob\")\n dropout_implementation = op.attr(\"dropout_implementation\")\n if dropout_implementation == \"downgrade_in_infer\":\n out = _op.nn.dropout(x, dropout_prob) * _expr.const(1 - dropout_prob, dtype=\"float32\")\n else:\n out = _op.nn.dropout(x, dropout_prob)\n g.add_node(op.output(\"Out\")[0], out)", "def backward(self, grad_output):\n input, = self.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input < -1] = 0\n grad_input[input > 1] = 0\n return grad_input, None", "def backward(self):\n if self.d_out_d_in is None:\n raise Exception(\"Haven't computed the loss!\")\n return self.d_out_d_in", "def backward(self):\n #print('backward\\r')\n self.linearVector = Vector3(x=-1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def _dropout(self,components,dropout=None):\r\n \r\n if dropout is not None:\r\n components.append(nn.Dropout(dropout))", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input < 0] = 0\n return truncate_significand(grad_input, ctx.n), None", "def dropout_forward(x, dropout_param):\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n #######################################################################\n # TODO: Implement training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n #######################################################################\n mask = (np.random.rand(*x.shape) < p).astype(int)\n out = mask * x\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n #######################################################################\n out = x * p\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache", "def backward(self, top, propagate_down, bottom):\n for ib in range(2):\n if not propagate_down[ib]:\n continue\n ndim = bottom[0].data.shape\n count = ndim[0] * ndim[2] * ndim[3]\n if not self.count:\n bottom[ib].diff[ ... ] = np.zeros_like( bottom[0].data )\n continue\n if top[0].data < 1.\n bottom[ib].diff[ ... ] = np.abs( bottom[0].data - bottom[1].data )\n bottom[ib].diff[ ... ] *= ( 1 - 1.0*self.iter/self.maxiter )\n else:\n bottom[ib].diff[ ... ] = np.ones_like( bottom[ib].data )\n inop = bottom[0].data < bottom[1].data\n bottom[ib].diff[ inop ] *= -1\n \n # ingore false label and repair\n ignore = bottom[1].data <= 0.\n count -= np.sum(ignore)\n bottom[ib].diff[ignore] = 0.\n #normlist\n bottom[ib].diff[...] /= count", "def _Dropout(self, name, drop_prob):\n return super()._Dropout(name, keep_prob=1.0 - drop_prob)", "def backward(self, d_output=None):\n if d_output is None:\n d_output = 1.0\n backpropagate(VariableWithDeriv(self, d_output))", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input < 0] = 0\n return grad_input", "def backward_D(self):\n fake_targets = [self.fake_target_pool.query(fake_target).detach() for fake_target in self.fake_target]\n self.loss_D_frame, self.loss_D_frame_real, self.loss_D_frame_fake = self.get_GAN_loss_D_sequential(\n discriminator=self.discriminator,\n real_images=self.real_target,\n fake_images=fake_targets,\n )\n self.loss_D += self.loss_D_frame\n super().backward_D()", "def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)", "def forward_backward(self, x):\n raise NotImplementedError()", "def backward(self, grad_output):\n grad_input = grad_output\n for module in reversed(self.modules):\n grad_input = module.backward(grad_input)\n return grad_input", "def backward(cls, grad_out, activated_out):\n new_grad = grad_out.copy()\n new_grad[activated_out == 0] = 0\n return new_grad", "def _wait_for_post_backward(self) -> None:\n assert self._is_root, \"_wait_for_post_backward can only be called on root.\"\n # Root's training state might be backward_pre or backward_post depending on\n # if root parameter's post backward hook was called. The post-backward hook\n # may not have been called if gradient was not computed for this param/FSDP\n # module.\n\n if self._sync_gradients:\n torch.cuda.current_stream().wait_stream(self._streams[\"post_backward\"])\n if self.cpu_offload.offload_params:\n # We need to wait for the non-blocking GPU ->\n # CPU grad transfers to finish. We need to do this for GPU -> CPU\n # copies because when grad is on CPU, it won't wait for any CUDA\n # stream to finish GPU -> CPU copies unless we explicitly block the\n # host-side with synchronize().\n torch.cuda.current_stream().synchronize()\n self._exec_order_data.next_iter()\n\n # A backward pass is done, clean up below.\n def _catch_all_reshard(fsdp_module: FullyShardedDataParallel) -> None:\n \"\"\"\n Reshards full parameters that may have not been resharded in\n post_backward_hook. This can happen when an FSDP module's output\n is used in forward so its pre-backward fires unsharding the param,\n but post-backward does not fire since the output was not ultimately\n used in loss computation so FSDP parameter did not get a gradient.\n \"\"\"\n # Note that we wrap resharding logic in a try-catch as a defensive\n # approach, as if an error is thrown, we are in the backwards pass,\n # and autograd would not print out much useful info about the actual\n # error hit.\n try:\n free_unsharded_flat_params: List[bool] = []\n handles_to_reshard: List[FlatParamHandle] = []\n for handle in fsdp_module._handles:\n # TODO: This already-resharded check is brittle:\n # https://github.com/pytorch/pytorch/issues/83956\n already_resharded = (\n handle.flat_param.data_ptr() == handle.flat_param._local_shard.data_ptr()\n )\n if already_resharded:\n continue\n free_unsharded_flat_params.append(self._should_free_unsharded_flat_param(handle))\n handles_to_reshard.append(handle)\n self._reshard(handles_to_reshard, free_unsharded_flat_params)\n except Exception as e:\n p_assert(\n False,\n f\"Got exception while resharding module {fsdp_module}: {str(e)}\",\n raise_assertion_error=False\n )\n raise e\n\n def _finalize_params(fsdp_module: FullyShardedDataParallel) -> None:\n \"\"\"Helper used below on all fsdp modules.\"\"\"\n for handle in fsdp_module._handles:\n p = handle.flat_param\n if p.requires_grad:\n if hasattr(p, \"_post_backward_hook_state\"):\n p_assert(\n len(p._post_backward_hook_state) == 2, # type: ignore[attr-defined]\n \"p._post_backward_hook_state fields are not valid.\"\n )\n p._post_backward_hook_state[1].remove() # type: ignore[attr-defined]\n delattr(p, \"_post_backward_hook_state\")\n # Preserve the gradient accumulation state if not\n # synchronizing: `p.grad` remains the unsharded gradient\n # accumulated from prior `no_sync()` iterations, and\n # `p._saved_grad_shard` remains the sharded gradient from\n # the last synchronized iteration\n if not self._sync_gradients:\n continue\n # Set `p.grad` as needed to ensure optimizer correctness\n # since optimizers operate on the `grad` attribute\n if hasattr(p, \"_cpu_grad\"):\n p_assert(\n p.device == torch.device(\"cpu\"),\n f\"Device mismatch: p={p.device} \" # type: ignore[attr-defined]\n f\"p._cpu_grad={p._cpu_grad}\"\n )\n p.grad = p._cpu_grad # type: ignore[attr-defined]\n elif hasattr(p, \"_saved_grad_shard\"):\n p_assert(\n p.device == p._saved_grad_shard.device, # type: ignore[attr-defined]\n f\"Device mismatch: p={p.device} \" # type: ignore[attr-defined]\n f\"p._saved_grad_shard={p._saved_grad_shard.device}\"\n )\n # Check if post-backward was called for this param (FSDP unit).\n # TODO: This logic will have to be revisited when non-recursive wrapping\n # lands. If it was not called, there is no new gradient to accumulate\n if p._post_backward_called:\n p.grad = p._saved_grad_shard\n if fsdp_module._mixed_precision_keep_low_precision_grads():\n p.grad.data = p.grad.to(\n fsdp_module.mixed_precision.param_dtype\n )\n else:\n p_assert(\n not handle.uses_sharded_strategy or not p._post_backward_called,\n \"All sharded parameters that received a gradient \"\n \"should use `_saved_grad_shard`\"\n )\n if hasattr(p, \"_saved_grad_shard\"):\n delattr(p, \"_saved_grad_shard\")\n\n p_assert(\n hasattr(p, '_post_backward_called'),\n \"Expected flag _post_backward_called to be set on param.\"\n )\n # Reset _post_backward_called in preparation for the next iteration.\n p._post_backward_called = False\n\n # Update root and nested FSDP's hooks and flags.\n for m in self.fsdp_modules(self): # includes self\n _finalize_params(m)\n _catch_all_reshard(m)\n m._ran_pre_backward_hook.clear()\n m.training_state = TrainingState_.IDLE\n for handle in m._handles:\n handle._training_state = HandleTrainingState.IDLE\n m._handles_prefetched.clear()\n if m._is_root:\n # reset this flag for cases like \"one forward pass + multiple backward passes\"\n self._post_backward_callback_queued = False\n\n if self._use_param_exec_order_policy() and self._param_exec_order_prep_stage:\n self._param_exec_order_policy_second_iter_init()", "def _poputil_recompute_backward(op, grads):\n return grads", "def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs):\n self.clear_lp_grads()\n loss.backward(**bwd_kwargs)\n\n if update_hp_grads:\n self.update_hp_grads(clear_lp_grads=clear_lp_grads)", "def backward_D(self):\n self.loss_D.backward()", "def _poputil_block_recompute_backward(op, grads):\n return grads", "def backward(self, grad_output):\n input, = self.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input < 0] = 0\n return grad_input", "def backward(self, duration):\n self.set_motor(self.left_motor, 'right', 0.5)\n self.set_motor(self.right_motor, 'left', 0.5)\n time.sleep(duration)", "def backward(self):\n assert self.cache is not None, \"Cannot backprop without forward first.\"\n prob, y = self.cache\n\n dX = prob - y\n if self.reduction == \"mean\":\n m, _ = prob.shape\n dX /= m\n\n # clear cache\n self.cache = None\n\n return dX", "def move_backward(self, distance):\n quad_offset = self.quad_offset_mapping['backward']\n client.moveByVelocityAsync(self.velocity * quad_offset[0], self.velocity * quad_offset[1],\n 0.15, distance/self.velocity).join()\n # if self.logging:\n # self.log_arr.append(\"backward\")" ]
[ "0.7518181", "0.7460461", "0.74584544", "0.7332788", "0.7288746", "0.71464086", "0.7143232", "0.69664955", "0.69397676", "0.69232845", "0.69232845", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.6907345", "0.689129", "0.6887062", "0.6884955", "0.68117493", "0.6810054", "0.68093985", "0.68093985", "0.68093985", "0.6798869", "0.679116", "0.679116", "0.679116", "0.678329", "0.6730677", "0.66642684", "0.6636568", "0.6636054", "0.6611707", "0.65931875", "0.6556329", "0.65057224", "0.6497655", "0.64955956", "0.64837605", "0.64542276", "0.64207006", "0.639943", "0.6396804", "0.6369241", "0.6355275", "0.63455147", "0.63455147", "0.634165", "0.6334841", "0.63347507", "0.6333243", "0.6332905", "0.63080204", "0.62846184", "0.6281297", "0.6280904", "0.6280904", "0.6272058", "0.6268232", "0.62587947", "0.6246852", "0.6244034", "0.6243774", "0.62403524", "0.62277573", "0.62244797", "0.6219563", "0.62151325", "0.62134296", "0.6210735", "0.6207832", "0.6206817", "0.620162", "0.61958075", "0.6193798", "0.61880845", "0.61793166", "0.61541057", "0.6132215", "0.61258376", "0.6121695", "0.6107281", "0.61019975", "0.6094376", "0.60723275", "0.606885", "0.6050233", "0.60421103", "0.6038186", "0.60184234" ]
0.7471565
2
The input consists of N data points, each with C channels, height H and width W. We convolve each input with F different filters, where each filter spans all C channels and has height HH and width WW. Assume that stride=1 and there is no padding. You can ignore the bias term in your implementation.
def conv_forward(x, w): out = None ########################################################################### # TODO: Implement the convolutional forward pass. # # Hint: you can use the function np.pad for padding. # ########################################################################### N, C, H, W = x.shape F, C, HH, WW = w.shape H_prime = H - (HH - 1) W_prime = W - (WW - 1) out = np.zeros((N, F, H_prime, W_prime)) for n in range(N): for f in range(F): for i in range(H_prime): for j in range(W_prime): out[n, f, i, j] = np.sum(x[n, :, i:i+HH, j:j+WW] * w[f]) ########################################################################### # END OF YOUR CODE # ########################################################################### cache = (x, w) return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loop_conv(X, W):\n # Go over all five dimensions \n # (#batches x #channels x #height x #width x #dur/length )\n # with filter that has\n # #filters x #channels x #height x #width x #dur/length \n num_filters = W.shape[0]\n filt_channels = W.shape[1]\n filt_height = W.shape[2]\n filt_width = W.shape[3]\n filt_duration = W.shape[4]\n num_batches = X.shape[0]\n input_channels = X.shape[1]\n assert(filt_channels == input_channels)\n out_shape = compute_out_shape(X.shape, W.shape)\n out_height = out_shape[2]\n out_width = out_shape[3]\n out_duration = out_shape[4]\n \n # The output is H :)\n H = np.zeros((out_shape))\n for batch_i in xrange(0, num_batches):\n for filt_i in xrange(0, num_filters):\n for out_x in xrange(0, out_height):\n for out_y in xrange(0, out_width):\n for out_z in xrange(0, out_duration):\n for chan_i in xrange(0, filt_channels):\n for filt_x in xrange(0, filt_height):\n for filt_y in xrange(0, filt_width):\n for filt_z in xrange(0, filt_duration):\n weight = W[filt_i, chan_i, filt_x, filt_y, filt_z]\n input_val = X[batch_i, chan_i, \\\n out_x + filt_x, out_y + filt_y, out_z + filt_z]\n H[batch_i, filt_i, out_x, out_y, out_z] += \\\n weight * input_val\n return H", "def MyConvolve(img, ff):\n result = np.zeros(img.shape)\n x_len = img.shape[0]\n y_len = img.shape[1]\n\n ff = np.flipud(np.fliplr(ff)) # Flip filters\n\n # Apply filter to pixels\n for x in range(1, x_len - 1):\n for y in range(1, y_len - 1):\n # Left column\n top_left = img[x - 1, y - 1] * ff[0, 0]\n left = img[x, y - 1] * ff[1, 0]\n btm_left = img[x + 1, y - 1] * ff[2, 0]\n # Middle column\n top = img[x - 1, y] * ff[0, 1]\n middle = img[x, y] * ff[1, 1]\n btm = img[x + 1, y] * ff[2, 1]\n # Right column\n top_right = img[x - 1, y + 1] * ff[0, 2]\n right = img[x, y + 1] * ff[1, 2]\n btm_right = img[x + 1, y + 1] * ff[2, 2]\n\n result[x, y] = top_left + left + btm_left + top + middle + btm + top_right + right + btm_right\n\n return result", "def conv2d(args):\n inp_ = args[0]\n kernel = args[1]\n stride = args[2]\n padding = args[3]\n (batch_size, in_channels, H, W) = inp_.shape\n (out_channels, in_channels_t, Hk, Wk) = kernel.shape\n Hc = int((H - Hk)/stride)+1\n Wc = int((W - Wk)/stride)+1\n conv_layer = np.zeros((batch_size, out_channels, Hc, Wc))\n for batch_i in range(batch_size):\n for o_chann_i in range(out_channels):\n for in_chann_i in range(in_channels):\n curr_ker = kernel[o_chann_i, in_chann_i, :, :]\n curr_inp = inp_[batch_i, in_chann_i, :, :]\n h_ind = 0\n while h_ind + Hk <= H:\n w_ind = 0\n while w_ind + Wk <= W:\n inp_patch = curr_inp[h_ind:h_ind+Hk, w_ind:w_ind+Wk]\n # Sum the conv_value of all the inp_channels\n conv_layer[batch_i, o_chann_i, h_ind//stride, w_ind//stride] += np.sum(inp_patch*curr_ker)\n w_ind+=stride\n h_ind+=stride\n return conv_layer", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m = images.shape[0]\n image_h = images.shape[1]\n image_w = images.shape[2]\n filter_h = kernel.shape[0]\n filter_w = kernel.shape[1]\n s1 = stride[0]\n s2 = stride[1]\n\n if padding == 'valid':\n pad_h = 0\n pad_w = 0\n\n if padding == 'same':\n pad_h = int(((image_h - 1) * s1 + filter_h - image_h) / 2) + 1\n pad_w = int(((image_w - 1) * s2 + filter_w - image_w) / 2) + 1\n\n if type(padding) == tuple:\n pad_h = padding[0]\n pad_w = padding[1]\n\n n_dim1 = int((image_h + 2 * pad_h - filter_h) / stride[0]) + 1\n n_dim2 = int((image_w + 2 * pad_w - filter_w) / stride[1]) + 1\n convolve = np.zeros((m, n_dim1, n_dim2))\n new_images = np.pad(images, ((0, 0), (pad_h, pad_h), (pad_w, pad_w),\n (0, 0)), mode='constant')\n for x in range(n_dim1):\n for y in range(n_dim2):\n mini_matrix = new_images[:, x * s1: x * s1 + filter_h,\n y * s2: y * s2 + filter_w, :]\n values = np.sum(mini_matrix * kernel,\n axis=1).sum(axis=1).sum(axis=1)\n convolve[:, x, y] = values\n return (convolve)", "def convolve(images, kernels, padding='same', stride=(1, 1)):\n m = images.shape[0]\n h = images.shape[1]\n w = images.shape[2]\n c = images.shape[3]\n kh = kernels.shape[0]\n kw = kernels.shape[1]\n nc = kernels.shape[3]\n sh = stride[0]\n sw = stride[1]\n\n if padding == 'same':\n ph = max((h - 1) * sh + kh - h, 0)\n pt = int(np.ceil(ph / 2))\n pb = pt\n pw = max((w - 1) * sw + kw - w, 0)\n pl = int(np.ceil(pw / 2))\n pr = pl\n elif padding == 'valid':\n pt, pb, pl, pr = 0, 0, 0, 0\n else:\n pt, pb = padding[0], padding[0]\n pl, pr = padding[1], padding[1]\n\n oh = ((h - kh + pt + pb) // sh) + 1\n ow = ((w - kw + pl + pr) // sw) + 1\n\n images = np.pad(images, pad_width=((0, 0), (pt, pb), (pl, pr), (0, 0)),\n mode='constant', constant_values=0)\n\n conv = np.zeros((m, oh, ow, nc))\n for k in range(nc):\n for i in range(oh):\n for j in range(ow):\n aux = images[:, i * sh:i * sh + kh, j * sw:j * sw + kw] \\\n * kernels[:, :, :, k]\n conv[:, i, j, k] = np.sum(aux, axis=(1, 2, 3))\n return conv", "def conv4d(data,filters,bias=None,permute_filters=True,use_half=False):\n b,c,h,w,d,t=data.size()\n\n data=data.permute(2,0,1,3,4,5).contiguous() # permute to avoid making contiguous inside loop \n \n # Same permutation is done with filters, unless already provided with permutation\n if permute_filters:\n filters=filters.permute(2,0,1,3,4,5).contiguous() # permute to avoid making contiguous inside loop \n\n c_out=filters.size(1)\n if use_half:\n output = Variable(torch.HalfTensor(h,b,c_out,w,d,t),requires_grad=data.requires_grad)\n else:\n output = Variable(torch.zeros(h,b,c_out,w,d,t),requires_grad=data.requires_grad)\n \n padding=filters.size(0)//2\n if use_half:\n Z=Variable(torch.zeros(padding,b,c,w,d,t).half())\n else:\n Z=Variable(torch.zeros(padding,b,c,w,d,t))\n \n if data.is_cuda:\n Z=Z.cuda(data.get_device()) \n output=output.cuda(data.get_device())\n \n data_padded = torch.cat((Z,data,Z),0)\n \n\n for i in range(output.size(0)): # loop on first feature dimension\n # convolve with center channel of filter (at position=padding)\n output[i,:,:,:,:,:]=F.conv3d(data_padded[i+padding,:,:,:,:,:], \n filters[padding,:,:,:,:,:], bias=bias, stride=1, padding=padding)\n # convolve with upper/lower channels of filter (at postions [:padding] [padding+1:])\n for p in range(1,padding+1):\n output[i,:,:,:,:,:]=output[i,:,:,:,:,:]+F.conv3d(data_padded[i+padding-p,:,:,:,:,:], \n filters[padding-p,:,:,:,:,:], bias=None, stride=1, padding=padding)\n output[i,:,:,:,:,:]=output[i,:,:,:,:,:]+F.conv3d(data_padded[i+padding+p,:,:,:,:,:], \n filters[padding+p,:,:,:,:,:], bias=None, stride=1, padding=padding)\n\n output=output.permute(1,2,0,3,4,5).contiguous()\n return output", "def conv(n_inputs, n_filters, kernel_size=3, stride=1, bias=False) -> torch.nn.Conv2d:\n return nn.Conv2d(n_inputs, n_filters,\n kernel_size=kernel_size, stride=stride,\n padding=kernel_size//2, bias=bias)", "def conv2D(null,channels,X,stride,kernel_shape,padding = False,initialize_weights = True,*args):\n # filters = dimensionality of output space\n # If padding is enabled, we pad the input with zeros such that the input size\n # remains the same if weights with stride 1 are applied to the input\n if initialize_weights:\n kernel = np.random.normal(size = (kernel_shape[0],kernel_shape[1],kernel_shape[2]))*math.sqrt(1/(kernel_shape[0]*kernel_shape[1]*kernel_shape[2])) # Our input\n kernel = torch.FloatTensor(kernel)\n kernel.requires_grad = False\n else:\n kernel = args[0] # weights and bias must be given if initialise weights is disabled\n bias = args[1]\n kernel_shape = kernel.shape\n \n X = X.detach().numpy()\n if padding: # Can only pad during initialization -> weights and input shapes cannot change during feedforward and backpropagation\n if kernel_shape[1] % 2 == 0 and kernel_shape[2] % 2 == 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2)-1,math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2)-1)), 'symmetric')\n elif kernel_shape[1] % 2 != 0 and kernel_shape[2] % 2 == 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2),math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2)-1)), 'symmetric')\n elif kernel_shape[1] % 2 == 0 and kernel_shape[2] % 2 != 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2)-1,math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2))), 'symmetric')\n else:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2),math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2))), 'symmetric')\n \n X = torch.FloatTensor(X)\n \n img_shape = X.shape\n \n output_size1 = math.floor((img_shape[1] - kernel_shape[1])/(stride)) + 1\n output_size2 = math.floor((img_shape[2] - kernel_shape[2])/(stride)) + 1\n output_shape = [channels,output_size1,output_size2]\n \n X_im2col,im = im2col(X,kernel,stride)\n \n \n if initialize_weights:\n weight = torch.reshape(kernel,(kernel_shape[0]*kernel_shape[1]*kernel_shape[2],1))\n # weight consists of only one weight vector. But the dimensionality of output space has to be\n # num_filters. So we need to stack weight vectors horizontally and create num_filters number of\n # feature maps\n for i in range(channels-1):\n weight2 = np.random.normal(size = (kernel_shape[0]*kernel_shape[1]*kernel_shape[2],1))*math.sqrt(1/(kernel_shape[0]*kernel_shape[1]*kernel_shape[2])) # Our input\n weight2 = torch.FloatTensor(weight2)\n weight2.requires_grad = False\n weight = torch.cat((weight2, weight),1) # do this num_filters - 1 number of times\n conv_output = torch.t(X_im2col).mm(weight)\n bias = torch.Tensor(np.random.normal(size = conv_output.shape))\n conv_output += bias\n conv_output = torch.reshape(conv_output,(output_shape))\n return torch.nn.Parameter(conv_output), torch.nn.Parameter(weight),X_im2col,im, output_shape,bias\n else:\n # Since weights are already initialised, the relevant channels are already dictated in the architecture.\n # Therefore, conv output is just a matmul\n conv_output = torch.t(X_im2col).mm(kernel) + bias\n return torch.nn.Parameter(conv_output),X_im2col", "def convolve_one_image(self,input4D, one_image, image_shape, \n Pstruct, filter_shape,\n image_index,\n channel_index): \n \n \n ## We look at the composition for the first channel in the beginning \n rank = Pstruct[0]['U1'].shape[1]\n fwidth = filter_shape[2]\n fheight = filter_shape[3]\n \n \n # Construct horizontal filters\n #TODO save the filters in the correct shape\n horizontal_filter_shape = (rank, 1, fwidth)\n horizontal_filters = np.ndarray(horizontal_filter_shape)\n horizontal_filters[:, 0, :] = np.transpose(Pstruct[channel_index]['U1']);\n \n # Output is 1 x rank x W x H\n horizontal_conv_out = conv.conv2d(input=one_image, \n filters = horizontal_filters,\n filter_shape = horizontal_filter_shape, \n image_shape = image_shape)\n \n # Construct vertical filters\n vertical_filter_shape = (rank, fheight, 1)\n vertical_filters = np.ndarray(vertical_filter_shape) \n vertical_filters[:,:, 0] = np.transpose(Pstruct[channel_index]['U2']);\n\n initial_n_rows = image_shape[1]\n final_n_rows = initial_n_rows- fwidth + 1\n final_n_cols = image_shape[2] - fheight + 1 \n conv_out = theano.shared(np.zeros((rank, final_n_rows, final_n_cols)))\n for r in range(rank):\n # temp is 1x1x imageW x imageH\n A = conv.conv2d(input = horizontal_conv_out[:,r,:,:], \n filters = vertical_filters[r,:,:],\n filter_shape = (1, fheight, 1), \n image_shape = (1, initial_n_rows, final_n_cols))\n conv_out = T.set_subtensor(conv_out[r,:,:], A[0,:,:])\n \n nbr_filters = Pstruct[0]['U3'].shape[0]\n # Final number of rows and columns \n ## numberof images, number of filters, image width, image height\n alphas = Pstruct[channel_index]['U3'] \n for f in range(nbr_filters): \n temp = theano.shared(np.zeros((final_n_rows, final_n_cols)))\n for r in range(rank):\n temp = temp + conv_out[r, :,:]* alphas[f, r] * Pstruct[channel_index]['lmbda'][r]; \n input4D =T.set_subtensor(input4D[image_index,f,:,:], temp)\n return input4D", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m, h, w, c = images.shape\n KernelHeight, kernelWidth, c = kernel.shape\n StrideHeight, StrideWidth = stride\n\n if padding == 'valid':\n PaddingHeight = 0\n PaddingWidth = 0\n elif padding == 'same':\n PaddingHeight = int(\n (((h - 1) * StrideHeight + KernelHeight - h) / 2) + 1)\n PaddingWidth = int((((w - 1) * StrideWidth + kernelWidth - w) / 2) + 1)\n else:\n PaddingHeight, PaddingWidth = padding\n\n OutputH = int(((h + 2 * PaddingHeight - KernelHeight) / StrideHeight) + 1)\n OutputW = int(((w + 2 * PaddingWidth - kernelWidth) / StrideWidth) + 1)\n\n ImagePadded = np.pad(\n images,\n ((0, 0), (PaddingHeight, PaddingHeight),\n (PaddingWidth, PaddingWidth), (0, 0)),\n 'constant'\n )\n\n output = np.zeros((m, OutputH, OutputW))\n ImageRange = np.arange(m)\n\n for i_OutputH in range(OutputH):\n for i_OutputW in range(OutputW):\n s_i_OutputH = i_OutputH * StrideHeight\n s_i_OutputW = i_OutputW * StrideWidth\n flt = ImagePadded[ImageRange,\n s_i_OutputH:KernelHeight + s_i_OutputH,\n s_i_OutputW:kernelWidth + s_i_OutputW,\n :]\n output[ImageRange, i_OutputH, i_OutputW] = np.sum(\n flt * kernel, axis=(1, 2, 3))\n return output", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m, image_h, image_w, image_c = images.shape\n kernel_h, kernel_w, kernel_c = kernel.shape\n stride_h, stride_w = stride\n\n if isinstance(padding, tuple):\n padding_h, padding_w = padding\n if padding is 'same':\n padding_h = int(((stride_h * image_h)\n - stride_h + kernel_h - image_h) / 2) + 1\n padding_w = int(((stride_w * image_w)\n - stride_w + kernel_w - image_w) / 2) + 1\n if padding is 'valid':\n padding_h, padding_w = 0, 0\n\n output_h = int(((image_h + (2 * padding_h) - kernel_h) / stride_h) + 1)\n output_w = int(((image_w + (2 * padding_w) - kernel_w) / stride_w) + 1)\n conv_output = np.zeros((m, output_h, output_w))\n\n img_m = np.arange(0, m)\n\n images = np.pad(\n images,\n [(0, 0), (padding_h, padding_h), (padding_w, padding_w), (0, 0)],\n mode='constant',\n constant_values=0)\n\n for i in range(output_h):\n for j in range(output_w):\n s_h = (stride_h)\n s_w = (stride_w)\n multiply = images[\n img_m,\n i*s_h:kernel_h+i*s_h,\n j*s_w:kernel_w+j*s_w]\n conv_output[img_m, i, j] = np.sum(\n np.multiply(multiply, kernel), axis=(1, 2, 3))\n return conv_output", "def convolution(\n input, # pylint: disable=redefined-builtin\n filter, # pylint: disable=redefined-builtin\n padding,\n strides=None,\n dilation_rate=None,\n name=None,\n data_format=None,\n filters=None,\n dilations=None): # pylint: disable=g-doc-args\n filter = deprecated_argument_lookup(\"filters\", filters, \"filter\", filter)\n dilation_rate = deprecated_argument_lookup(\n \"dilations\", dilations, \"dilation_rate\", dilation_rate)\n return convolution_internal(\n input,\n filter,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilations=dilation_rate,\n name=name)", "def dense_conv_forward_2d_fast(\n inp_image: np.ndarray,\n filter: np.ndarray,\n output: np.ndarray,\n strides,\n padding):\n INPUT_DIMENSIONS = 4\n HEIGHT_IDX = 2 # H\n WIDTH_IDX = 3 # W\n INPUT_CHANNELS_IDX = 1 # C\n OUTPUT_CHANNELS_IDX = 0 # K\n NUM_IMAGES_IDX = 0 # N\n\n x_shape = inp_image.shape\n f_shape = filter.shape\n o_shape = output.shape\n\n if len(x_shape) != INPUT_DIMENSIONS or len(f_shape) != INPUT_DIMENSIONS or len(o_shape) != INPUT_DIMENSIONS:\n raise RuntimeError(\"conv2d: input, filter, and output must all have four dimensions.\")\n\n assert (x_shape[HEIGHT_IDX] % strides[1] == 0, \"Input height is not evenly divisible by stride size.\")\n assert (x_shape[WIDTH_IDX] % strides[0] == 0, \"Input width is not evenly divisible by stride size.\")\n assert (x_shape[INPUT_CHANNELS_IDX] == f_shape[INPUT_CHANNELS_IDX],\n \"Number of channels in input does not match number channels expected by convolution.\")\n assert (o_shape[OUTPUT_CHANNELS_IDX] == f_shape[OUTPUT_CHANNELS_IDX],\n \"Number of channels in output does not match number channels expected by convolution.\")\n\n N = x_shape[NUM_IMAGES_IDX]\n H = x_shape[HEIGHT_IDX]\n W = x_shape[WIDTH_IDX]\n K = f_shape[OUTPUT_CHANNELS_IDX]\n C = f_shape[INPUT_CHANNELS_IDX]\n R = f_shape[HEIGHT_IDX]\n S = f_shape[WIDTH_IDX]\n P = (x_shape[HEIGHT_IDX] - f_shape[HEIGHT_IDX]) / strides[1] + 1 # output height\n Q = (x_shape[WIDTH_IDX] - f_shape[WIDTH_IDX]) / strides[0] + 1 # output width\n\n assert (o_shape[HEIGHT_IDX] == P, f\"Output height should be {P}.\")\n assert (o_shape[WIDTH_IDX] == Q, f\"Output width should be {Q}.\")\n\n # output[...] = 0\n\n for i in range(0, (H - R) + 1, strides[1]):\n y = int(i / strides[1])\n for j in range(0, (W - S) + 1, strides[0]):\n x = int(j / strides[1])\n inp_view = inp_image[:, :, i:i + R, j:j + S]\n for k in range(0, K):\n f_slice = filter[k, :, :, :]\n prod = np.sum(inp_view * f_slice, (INPUT_CHANNELS_IDX, HEIGHT_IDX, WIDTH_IDX))\n output[:, k, y, x] = prod", "def convolve(images, kernels, padding='same', stride=(1, 1)):\n m, h, w = images.shape[:3]\n kh, kw, c, nc = kernels.shape\n sh, sw = stride\n if type(padding) is tuple:\n ph, pw = padding\n elif padding == 'valid':\n ph, pw = 0, 0\n else:\n ph = (((h - 1) * sh + kh - h) // 2) + 1\n pw = (((w - 1) * sw + kw - w) // 2) + 1\n out_images = np.zeros((m, (h - kh + (2 * ph))//sh + 1,\n (w - kw + (2 * pw))//sw + 1, nc))\n images = np.pad(images, ((0, 0), (ph, ph), (pw, pw), (0, 0)), 'constant')\n for i in range((h - kh + (2 * ph))//sh + 1):\n for j in range((w - kw + (2 * pw))//sw + 1):\n for n in range(nc):\n out_images[:, i, j, n] = np.sum(kernels[:, :, :, n] * images[\n :, i*sh: i*sh + kh, j*sw: j*sw + kw, :], axis=(1, 2, 3))\n return out_images", "def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name, padding='SAME', groups=1):\n # Get number of input channels\n input_channels = int(x.get_shape()[-1])\n\n # Create lambda function for the convolution\n convolve = lambda i, k: tf.nn.conv2d(i, k,\n strides=[1, stride_y, stride_x, 1],\n padding=padding)\n\n with tf.variable_scope(name) as scope:\n # Create tf variables for the weights and biases of the conv layer\n '''\n weights = tf.get_variable('weights', shape=[filter_height,\n filter_width,\n input_channels//groups,\n num_filters])\n biases = tf.get_variable('biases', shape=[num_filters])\n '''\n weights = weights_variable([filter_height, filter_width, input_channels // groups, num_filters])\n biases = biases_variable([num_filters])\n\n if groups == 1:\n conv = convolve(x, weights)\n\n # In the cases of multiple groups, split inputs & weights and\n else:\n # Split input and weights and convolve them separately\n input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x)\n weight_groups = tf.split(axis=3, num_or_size_splits=groups,\n value=weights)\n output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)]\n\n # Concat the convolved output together again\n conv = tf.concat(axis=3, values=output_groups)\n\n # Add biases\n # bias = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(conv))\n bias = tf.nn.bias_add(conv, biases)\n # bias = tf.reshape(rslt, tf.shape(conv))\n\n # bias = batch_norm(bias, True)\n\n # Apply relu function\n relu = tf.nn.relu(bias, name='relu')\n\n return relu", "def fold(input_f, input_shape, kernel_size=(3,3), stride=(1,1), padding=(0,0)):\n batch_size, in_chan, in_height, in_width = input_shape\n \n height_padded, width_padded = in_height + 2*padding[0], in_width + 2*padding[1]\n input_padded = np.zeros((batch_size, in_chan, height_padded, width_padded), dtype=input_f.dtype)\n \n k, i, j = get_indices(input_shape, kernel_size, stride, padding)\n \n input = input_f.reshape(in_chan*kernel_size[0]*kernel_size[1], -1, batch_size)\n input = input.transpose((2, 0, 1))\n np.add.at(input_padded, (slice(None), k, i, j), input)\n if padding == (0,0):\n return input_padded\n return input_padded[:, :, padding[0]: -padding[0], padding[1]: -padding[1]]", "def block1(x, filters, kernel_size=3, stride=1,\n conv_shortcut=True, dilation=1, name=None):\n bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1\n\n if conv_shortcut is True:\n if stride == 1:\n shortcut = layers.Conv2D(4 * filters, 1, strides=stride, use_bias=False,\n name=name + '_0_conv')(x)\n shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_0_bn')(shortcut)\n else:\n shortcut = layers.Conv2D(4 * filters, 3, strides=stride, use_bias=False,\n name=name + '_0_conv')(x)\n shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_0_bn')(shortcut)\n else:\n shortcut = x\n\n x = layers.Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_1_bn')(x)\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n padding = 'SAME' if stride == 1 else 'VALID'\n x = layers.Conv2D(filters, kernel_size, strides=stride, padding=padding,\n dilation_rate=dilation, use_bias=False, name=name + '_2_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_2_bn')(x)\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D(4 * filters, 1, use_bias=False, name=name + '_3_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_3_bn')(x)\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x", "def apply_filter(data, filter_bank, sfreq): \n if data.ndim == 1:\n filtered = np.zeros((1, filter_bank.shape[0], sfreq))\n for filt in range(filter_bank.shape[0]):\n filtered[0, filt, :] = np.convolve(filter_bank[filt,:], data)[int(sfreq-sfreq/2):int(sfreq+sfreq/2)]\n elif data.ndim == 2:\n filtered = np.zeros((data.shape[0], filter_bank.shape[0], sfreq))\n for chan in range(data.shape[0]):\n for filt in range(filter_bank.shape[0]):\n filtered[chan, filt, :] = np.convolve(filter_bank[filt, :], \\\n data[chan,:])[int(sfreq-sfreq/2):int(sfreq+sfreq/2)] # mode=\"full\"\n return filtered", "def conv1d2d3d(self, inputs, filters, kernels, strides, pad_stride1):\n if len(inputs.shape)==5:\n conv_fn = tf.layers.conv3d\n self._conv3d_num += 1\n elif len(inputs.shape) == 4:\n conv_fn = tf.layers.conv2d\n self._conv2d_num += 1\n elif len(inputs.shape) == 3:\n conv_fn = tf.layers.conv1d\n self._conv1d_num += 1\n else:\n import pdb; pdb.set_trace() # XXX BREAKPOINT\n pass\n raise NotImplementedError\n\n #inputs, padding = self.padding2d3d(inputs, kernels, strides, pad_stride1)\n\n assert self.data_format == 'channels_last'\n outputs = conv_fn(\n inputs=inputs, filters=filters, kernel_size=kernels, strides=strides,\n padding=self._padding[pad_stride1], use_bias=self.use_bias,\n kernel_initializer=KERNEL_INI,\n data_format=self.data_format)\n return outputs", "def block3(\n x,\n filters,\n kernel_size=3,\n stride=1,\n groups=32,\n conv_shortcut=True,\n name='',\n norm_use=\"bn\",\n):\n if conv_shortcut is True:\n shortcut = layers.Conv2D(\n (64 // groups) * filters,\n 1,\n strides=stride,\n use_bias=False,\n name=name + '_0_conv',\n )(x)\n shortcut = normalize_layer(shortcut, norm_use=norm_use, name=name + '_0_')\n else:\n shortcut = x\n\n x = layers.Conv2D(\n filters,\n 1,\n use_bias=False,\n name=name + '_1_conv',\n kernel_initializer='he_normal',\n )(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_1_')\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n c = filters // groups\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)\n x = layers.DepthwiseConv2D(\n kernel_size,\n strides=stride,\n depth_multiplier=c,\n use_bias=False,\n name=name + '_2_conv',\n kernel_initializer='he_normal',\n )(x)\n x_shape = backend.int_shape(x)[1:-1]\n x = layers.Reshape(x_shape + (groups, c, c))(x)\n output_shape = x_shape + (groups, c) if backend.backend() == 'theano' else None\n x = layers.Lambda(\n lambda x: sum([x[:, :, :, :, i] for i in range(c)]),\n output_shape=output_shape,\n name=name + '_2_reduce',\n )(x)\n x = layers.Reshape(x_shape + (filters, ))(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_2_')\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D((64 // groups) * filters, 1, kernel_initializer='he_normal',\n use_bias=False, name=name + '_3_conv')(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_3_')\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x", "def _conv_block(inputs, filters, kernel=(3, 3), strides=(1, 1), alpha=1.0, nl='RE'):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n filters = int(filters * alpha)\n x = KL.Conv2D(filters, kernel, padding='same', use_bias=False, strides=strides)(inputs)\n x = KL.BatchNormalization(axis=channel_axis)(x)\n return _return_activation(x, nl=nl)", "def conv2d_fft(input, filters, image_shape=None, filter_shape=None,\r\n border_mode='valid', pad_last_dim=False):\r\n\r\n # use symbolic shapes to compute shape info at runtime if not specified\r\n if image_shape is None:\r\n image_shape = input.shape\r\n\r\n if filter_shape is None:\r\n filter_shape = filters.shape\r\n\r\n # batch size, input channels, input dim 0, input dim 1\r\n b, ic, i0, i1 = image_shape\r\n # output channels, input channels, filter dim 0, filter dim 1\r\n oc, ic_, f0, f1 = filter_shape\r\n\r\n # pad filters/image to output shape\r\n if border_mode == 'valid':\r\n o0 = i0\r\n if pad_last_dim:\r\n o1 = i1 + 1\r\n input_padded = T.zeros((b, ic, o0, o1), dtype='float32')\r\n input_padded = T.set_subtensor(input_padded[:, :, :i0, :i1],\r\n input)\r\n else:\r\n o1 = i1\r\n input_padded = input\r\n\r\n filters_padded = T.zeros((oc, ic, o0, o1), dtype='float32')\r\n filters_padded = T.set_subtensor(filters_padded[:, :, :f0, :f1],\r\n filters)\r\n\r\n elif border_mode == 'full':\r\n\r\n # In this particular case, the values of (o0, o1) represent\r\n # the dimensions of the work buffer more than the actual dimensions\r\n # of the desired output.\r\n o0 = i0 + 2 * (f0 - 1)\r\n o1 = i1 + 2 * (f1 - 1)\r\n\r\n if pad_last_dim:\r\n o1 = o1 + 1\r\n\r\n # We line up the filters and the images in a way\r\n # such that the filters are tightly placed against the\r\n # top-left of the array, and the images intersect with\r\n # them on one pixel. The top-left pixel of the images\r\n # is the bottom-right pixel of the filters when we\r\n # do the layout here.\r\n\r\n filters_padded = T.zeros((oc, ic, o0, o1), dtype='float32')\r\n filters_padded = T.set_subtensor(filters_padded[:, :, :f0, :f1],\r\n filters)\r\n\r\n input_padded = T.zeros((b, ic, o0, o1), dtype='float32')\r\n input_padded = T.set_subtensor(input_padded[:, :, (f0 - 1):(f0 - 1 + i0), (f1 - 1):(f1 - 1 + i1)],\r\n input)\r\n else:\r\n raise ValueError('invalid mode')\r\n\r\n # reshape for FFT\r\n input_flat = input_padded.reshape((b * ic, o0, o1))\r\n filters_flat = filters_padded.reshape((oc * ic, o0, o1))\r\n\r\n # perform FFT\r\n input_fft_flat = cufft(input_flat) # (b * ic, o0, o1//2 + 1, 2)\r\n filters_fft_flat = cufft(filters_flat) # (oc * ic, o0, o1//2 + 1, 2)\r\n\r\n # unfold ic dimension\r\n input_fft_v_shape = (b, ic, o0, o1 // 2 + 1, 2)\r\n filters_fft_v_shape = (oc, ic, o0, o1 // 2 + 1, 2)\r\n input_fft_v = input_fft_flat.reshape(input_fft_v_shape)\r\n filters_fft_v = filters_fft_flat.reshape(filters_fft_v_shape)\r\n\r\n # (b, oc, o0, o1//2 + 1, 2)\r\n output_fft_s = mult_and_reduce(input_fft_v, filters_fft_v,\r\n input_shape=input_fft_v_shape,\r\n filter_shape=filters_fft_v_shape)\r\n\r\n # reshape for IFFT\r\n output_fft_flat = output_fft_s.reshape((b * oc, o0, o1 // 2 + 1, 2))\r\n\r\n # perform IFFT\r\n output_flat = cuifft(output_fft_flat) # (b * oc, o0, o1)\r\n\r\n # reshape\r\n output_circ = output_flat.reshape((b, oc, o0, o1)) # circular!\r\n\r\n # Now we extract the region of interest.\r\n # We just cut it out from the output_circ\r\n # array that was used for the computation.\r\n # We do not need to handle pad_last_dim in a\r\n # special way because we specify explicitly here\r\n # how much values are expected.\r\n if border_mode == 'valid':\r\n output = output_circ[:, :, (f0-1):(f0-1 + i0-f0+1), (f1-1):(f1-1 + i1-f1+1)]\r\n elif border_mode == 'full':\r\n output = output_circ[:, :, (f0-1):(f0-1 + i0+f0-1), (f1-1):(f1-1 + i1+f1-1)]\r\n else:\r\n raise ValueError('invalid mode')\r\n\r\n # Rescale manually. This is just a factor that comes in during the\r\n # trip through FFT and inverse FFT.\r\n output = (1.0 / T.cast(o0 * o1, 'float32')) * output\r\n\r\n # output should now be the result of a batched valid convolution\r\n # of the input with the filters.\r\n return basic_ops.as_cuda_ndarray_variable(output)", "def convolutional(X, X_test, input_shape, n_filters, filter_size):\n\n\tfilters_shape = (n_filters, input_shape[1], filter_size[0], filter_size[1])\n\tfilters = theano.shared(\n\t\tnumpy.random.uniform(low=-0.1, high=0.1, size=filters_shape).astype(numpy.float32),\n\t\t'conv_filters'\n\t)\n\n\toutput_shape = (input_shape[0], n_filters, input_shape[2], input_shape[3])\n\n\toutput = conv2d(input=X, filters=filters, filter_shape=filters_shape, image_shape=input_shape, border_mode='full')\n\toutput_test = conv2d(input=X_test, filters=filters, filter_shape=filters_shape, image_shape=input_shape, border_mode='full')\n\n\tshift_x = (filter_size[0] - 1) // 2\n\tshift_y = (filter_size[1] - 1) // 2\n\n\toutput = output[:,:,shift_x:input_shape[2]+shift_x,shift_y:input_shape[3]+shift_y]\n\toutput_test = output_test[:,:,shift_x:input_shape[2]+shift_x,shift_y:input_shape[3]+shift_y]\n\n\treturn output, output_test, [filters], output_shape", "def test_on_conv_transpose_2d_stride(self):\n\n # Channels/Colors, #filters, filter_size (square)\n conv_filter = objax.nn.ConvTranspose2D(1, 1, 2, strides=2, padding=objax.ConvPadding.VALID)\n weights = objax.TrainVar(jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 0, 1)))\n conv_filter.w = weights\n image = jn.array([[[[2., 1., 3., 4.],\n [5., 6., 7., 8.], [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n # NCHW: Batch, Channels/Colors, Height, Width\n features = conv_filter(image)\n expected_features = jn.array([[[[2., 4., 1., 2., 3., 6., 4., 8.],\n [6., 8., 3., 4., 9., 12., 12., 16.],\n [5., 10., 6., 12., 7., 14., 8., 16.],\n [15., 20., 18., 24., 21., 28., 24., 32.],\n [9., 18., 10., 20., 11., 22., 12., 24.],\n [27., 36., 30., 40., 33., 44., 36., 48.],\n [13., 26., 14., 28., 15., 30., 16., 32.],\n [39., 52., 42., 56., 45., 60., 48., 64.]]]])\n self.assertEqual(features.shape, (1, 1, 8, 8))\n self.assertTrue(jn.array_equal(features, expected_features))", "def dense_conv_forward_2d(inp_image: np.ndarray, kernel: np.ndarray, stride, padding):\n assert len(inp_image.shape) == 3, 'single 2D images only. No batches.'\n assert len(kernel.shape) == 4\n\n height, width, colors = inp_image.shape\n kernel_height, kernel_width, colors_in, colors_out = kernel.shape\n kernel_stride_x, kernel_stride_y = stride\n kernel_padding_x, kernel_padding_y = padding\n i_f = int(np.floor(kernel_width / 2.0))\n j_f = int(np.floor(kernel_height / 2.0))\n\n out_pixels = np.zeros((height, width, colors_out))\n for y in range(kernel_padding_y, height - kernel_padding_y,\n kernel_stride_y): # todo: add kernel_padding_y and kernel_stride_y fix to glsl\n for x in range(kernel_padding_x, width - kernel_padding_x,\n kernel_stride_x): # todo: add kernel_padding_x and kernel_stride_x fix to glsl\n output_select = [y, x, 0]\n input_select = np.asarray(\n [y * kernel_stride_y, x * kernel_stride_x, 0]\n )\n for i in range(-np.int(np.floor(kernel_width / 2.0)), np.int(np.ceil(kernel_width / 2.0))):\n for j in range(-np.int(np.floor(kernel_height / 2.0)), np.int(np.ceil(kernel_height / 2.0))):\n in_pixel_select = np.copy(input_select)\n in_pixel_select += [j, i, 0]\n for co in range(colors_out):\n output_select[2] = co\n for ci in range(colors_in):\n in_pixel_select[2] = ci\n kernel_select = np.asarray([j_f + j, i_f + i, ci, co])\n\n out_pixels[tuple(output_select)] += kernel[tuple(kernel_select)] * inp_image[\n tuple(in_pixel_select)]\n return out_pixels", "def convolve_grayscale_same(images, kernel):\n\n # num images\n n_images = images.shape[0]\n\n # input_width and input_height\n i_h = images.shape[1]\n i_w = images.shape[2]\n\n # kernel_width and kernel_height\n\n k_h = kernel.shape[0]\n k_w = kernel.shape[1]\n\n # pad_h ⊛ = int (k_h - 1)/2\n # pad_w ⊛ = int (k_w - 1)/2\n p_h = int((k_h - 1) / 2)\n p_w = int((k_w - 1) / 2)\n\n if k_h % 2 == 0:\n p_h = int(k_h / 2)\n\n if k_w % 2 == 0:\n p_w = int(k_w / 2)\n\n # output_height and output_width\n # H = i_h + 2pad - k_h + 1, W = i_w + 2pad - k_w + 1\n o_h = i_h + 2 * p_h - k_h + 1\n o_w = i_w + 2 * p_w - k_w + 1\n\n if k_h % 2 == 0:\n o_h = i_h + 2 * p_h - k_h\n\n if k_w % 2 == 0:\n o_w = i_w + 2 * p_w - k_w\n\n # creating outputs of size: n_images, o_h x o_w\n outputs = np.zeros((n_images, o_h, o_w))\n\n # creating pad of zeros around the output images\n padded_imgs = np.pad(images,\n pad_width=((0, 0), (p_h, p_h), (p_w, p_w)),\n mode=\"constant\",\n constant_values=0)\n\n # vectorizing the n_images into an array\n imgs_arr = np.arange(0, n_images)\n\n # iterating over the output array and generating the convolution\n for x in range(o_h):\n for y in range(o_w):\n x1 = x + k_h\n y1 = y + k_w\n outputs[imgs_arr, x, y] = np.sum(np.multiply(\n padded_imgs[imgs_arr, x: x1, y: y1], kernel), axis=(1, 2))\n\n return outputs", "def block(x, filters, kernel_size=3, up_stride=1, groups=32, conv_shortcut=True, name=None):\n bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1\n\n if conv_shortcut is True:\n if up_stride == 1:\n shortcut = layers.Conv2D((64 // groups) * filters, 1, use_bias=False, name=name + '_0_conv')(x)\n else:\n shortcut = layers.Conv2DTranspose((64 // groups) * filters, 1, strides=up_stride, use_bias=False,\n name=name + '_0_conv')(x)\n shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut)\n else:\n shortcut = x\n\n if up_stride == 1:\n x = layers.Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x)\n else:\n x = layers.Conv2DTranspose(filters, 1, strides=up_stride, use_bias=False,\n padding='same', name=name + '_1_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n c = filters // groups\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)\n x = layers.DepthwiseConv2D(kernel_size, depth_multiplier=c, use_bias=False, name=name + '_2_conv')(x)\n x_shape = backend.int_shape(x)[1:-1]\n x = layers.Reshape(x_shape + (groups, c, c))(x)\n output_shape = x_shape + (groups, c) if backend.backend() == 'theano' else None\n x = layers.Lambda(lambda x: sum([x[:, :, :, :, i] for i in range(c)]), output_shape=output_shape,\n name=name + '_2_reduce')(x)\n x = layers.Reshape(x_shape + (filters,))(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x)\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D((64 // groups) * filters, 1, use_bias=False, name=name + '_3_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x)\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x", "def forward(self, stride, padding, *args):\n #TODO\n parents = list(args)\n inp_ = parents[0].value\n kernel = parents[1].value\n \n (batch_size, in_channels, H, W) = inp_.shape\n (out_channels, in_channels_t, Hk, Wk) = kernel.shape\n assert in_channels == in_channels_t\n \n return conv2d((inp_, kernel, stride, padding))\n # return conv2d_mul(inp_, kernel, stride, padding)", "def convolve(img, fltr, same=False, stri=1, pad=0, repfilter=False):\n # focus = np.array{eltype(img),2} # scope outside of if block\n if np.ndim(img) == 3:\n imgd, imgx, imgy = np.shape(img)\n elif np.ndim(img) == 2:\n imgx, imgy = np.shape(img)\n imgd = 1\n else:\n print(\"Wrong dimensions of image file. Quitting.\")\n return\n\n if np.ndim(fltr) == 3:\n fd, fx, fy = np.shape(fltr)\n elif np.ndim(fltr) == 2:\n fx, fy = np.shape(fltr)\n fd = 1\n else:\n print(\"Wrong dimensions of filter. Quitting.\")\n return\n\n if fd != imgd: # as a convenience we could just replicate the 2d filter...\n print(\"Depths of image and filter not equal. Quitting.\")\n return\n\n if same:\n pad = math.ceil((fx - 1) / 2)\n\n if pad > 0:\n img = dopad(img, pad)\n\n # dimensions of the result of convolution\n x_out = (imgx + 2 * pad - fx) // stri + 1\n y_out = (imgy + 2 * pad - fy) // stri + 1\n\n # print(imgx, imgy)\n\n ret = np.zeros((x_out, y_out))\n if imgd > 1: # slice through the depth, the zeroth (first) dimension\n for i in zip(range(x_out), range(0, imgx, stri)):\n for j in zip(range(y_out), range(0, imgy, stri)):\n ret[i[0], j[0]] = np.sum(img[:, i[1]:i[1] + fx, j[1]:j[1] +\n fy] * fltr)\n else:\n for i in zip(range(x_out), range(0, imgx, stri)):\n for j in zip(range(y_out), range(0, imgy, stri)):\n ret[i[0], j[0]] = np.sum(img[i[1]:i[1] + fx, j[1]:j[1] +\n fy] * fltr)\n return ret", "def img_conv(X, filter):\n assert filter.shape[0] % 2 == 1\n assert filter.shape[1] % 2 == 1\n x_size = filter.shape[0] // 2\n y_size = filter.shape[1] // 2\n w = X.shape[0]\n h = X.shape[1]\n out = numpy.zeros(X.shape)\n for r in range(w):\n for c in range(h):\n for x in range(filter.shape[0]):\n pixel_x = r + x - x_size\n if pixel_x < 0:\n pixel_x = -pixel_x\n if pixel_x >= w:\n pixel_x = w - pixel_x - 2\n for y in range(filter.shape[1]):\n pixel_y = c + y - y_size\n if pixel_y < 0:\n pixel_y = -pixel_y\n if pixel_y >= h:\n pixel_y = h - pixel_y - 2\n #if pixel_x >= 0 and pixel_x < w and pixel_y >= 0 and pixel_y < h:\n out[r, c] += filter[x, y] * X[pixel_x, pixel_y]\n return out", "def cortex_conv(inp, filters, n_out_w=None, n_out_h=None, \n strides=(1, 1, 1, 1), padding='SAME', bias=None):\n\n\n n_out = filters.get_shape()[3].value\n if n_out is None and (n_out_w is None or n_out_h is None):\n raise Exception(\"Filter shape not inferrable from filter tensor \"\n \"and output shape not inferrable from n_out_w and n_out_h.\")\n elif n_out is None:\n n_out = n_out_w * n_out_h\n\n if n_out_h is None:\n if n_out_w is None:\n sqrt = int(math.sqrt(n_out))\n n_out_w = sqrt\n n_out_h = n_out // n_out_w\n else:\n if n_out_w is None:\n n_out_w = n_out // n_out_h\n\n conv_raw = tf.nn.conv2d(inp, filters, strides=strides, padding=padding)\n if bias is not None:\n conv_raw = tf.nn.bias_add(conv_raw, bias)\n shp = [s.value for s in conv_raw.get_shape()]\n reshaped = tf.reshape(conv_raw[:, :, :, :n_out_w * n_out_h],\n (shp[0], shp[1], shp[2], n_out_h, n_out_w))\n transposed = tf.transpose(reshaped, (0, 1, 3, 2, 4))\n output = tf.reshape(transposed, (shp[0], shp[1] * n_out_h, shp[2] * n_out_w,\n 1))\n return output", "def conv_layer(n_in_filters, n_filters, ker_size, stride=1, \n depthwise=False, zero_bn=False, act=True) :\n bn = nn.BatchNorm2d(n_filters)\n nn.init.constant_(bn.weight, 0. if zero_bn else 1.)\n conv = nn.Conv2d(n_in_filters, n_filters, ker_size, stride=stride,padding=ker_size//2, \n bias=False,groups = n_in_filters if depthwise else 1)\n layer = [conv, bn]\n if act: layer += [Swish()]\n return nn.Sequential(*layer)", "def Conv2D(\n inputs,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='same',\n data_format='channels_last',\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer=None,\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n split=1):\n if kernel_initializer is None:\n if get_tf_version_tuple() <= (1, 12):\n kernel_initializer = tf.contrib.layers.variance_scaling_initializer(2.0)\n else:\n kernel_initializer = tf.keras.initializers.VarianceScaling(2.0, distribution='untruncated_normal')\n dilation_rate = shape2d(dilation_rate)\n\n if split == 1 and dilation_rate == [1, 1]:\n # tf.layers.Conv2D has bugs with dilations (https://github.com/tensorflow/tensorflow/issues/26797)\n with rename_get_variable({'kernel': 'W', 'bias': 'b'}):\n layer = tf.layers.Conv2D(\n filters,\n kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n _reuse=tf.get_variable_scope().reuse)\n ret = layer.apply(inputs, scope=tf.get_variable_scope())\n ret = tf.identity(ret, name='output')\n\n ret.variables = VariableHolder(W=layer.kernel)\n if use_bias:\n ret.variables.b = layer.bias\n\n # compute the flops of the conv\n in_shape = inputs.get_shape().as_list()\n channel_axis = 3 if data_format == 'channels_last' else 1\n h_dim = 1 if data_format == 'channels_last' else 2\n w_dim = h_dim + 1\n in_channel = in_shape[channel_axis]\n out_channel = filters\n kernel_shape = shape2d(kernel_size)\n stride = shape4d(strides, data_format=data_format)\n flops = 1.0 * in_channel * out_channel * \\\n kernel_shape[0] * kernel_shape[1] / stride[h_dim] / stride[w_dim]\n if in_shape[h_dim] is not None and in_shape[h_dim] > 0:\n flops *= in_shape[h_dim] * in_shape[w_dim]\n ret.info = VariableHolder(flops=flops)\n\n else:\n # group conv implementation\n data_format = get_data_format(data_format, keras_mode=False)\n in_shape = inputs.get_shape().as_list()\n channel_axis = -1 if data_format == 'NHWC' else 1\n in_channel = in_shape[channel_axis]\n assert in_channel is not None, \"[Conv2D] Input cannot have unknown channel!\"\n assert in_channel % split == 0\n\n assert kernel_regularizer is None and bias_regularizer is None and activity_regularizer is None, \\\n \"Not supported by group conv now!\"\n\n out_channel = filters\n assert out_channel % split == 0\n assert dilation_rate == [1, 1] or get_tf_version_tuple() >= (1, 5), 'TF>=1.5 required for dilated conv.'\n\n kernel_shape = shape2d(kernel_size)\n filter_shape = kernel_shape + [in_channel / split, out_channel]\n stride = shape4d(strides, data_format=data_format)\n\n kwargs = dict(data_format=data_format)\n if get_tf_version_tuple() >= (1, 5):\n kwargs['dilations'] = shape4d(dilation_rate, data_format=data_format)\n\n W = tf.get_variable(\n 'W', filter_shape, initializer=kernel_initializer)\n\n if use_bias:\n b = tf.get_variable('b', [out_channel], initializer=bias_initializer)\n\n conv = None\n if get_tf_version_tuple() >= (1, 13):\n try:\n conv = tf.nn.conv2d(inputs, W, stride, padding.upper(), **kwargs)\n except ValueError:\n conv = None\n log_once(\"CUDNN group convolution support is only available with \"\n \"https://github.com/tensorflow/tensorflow/pull/25818 . \"\n \"Will fall back to a loop-based slow implementation instead!\", 'warn')\n if conv is None:\n inputs = tf.split(inputs, split, channel_axis)\n kernels = tf.split(W, split, 3)\n outputs = [tf.nn.conv2d(i, k, stride, padding.upper(), **kwargs)\n for i, k in zip(inputs, kernels)]\n conv = tf.concat(outputs, channel_axis)\n\n if activation is None:\n activation = tf.identity\n ret = activation(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name='output')\n\n ret.variables = VariableHolder(W=W)\n if use_bias:\n ret.variables.b = b\n\n h_dim = 1 if data_format == 'NHWC' else 2\n w_dim = h_dim + 1\n flops = 1.0 * in_channel * out_channel * \\\n kernel_shape[0] * kernel_shape[1] / stride[h_dim] / stride[w_dim] / split\n if in_shape[h_dim] is not None and in_shape[h_dim] > 0:\n flops *= in_shape[h_dim] * in_shape[w_dim]\n ret.info = VariableHolder(flops=flops)\n return ret", "def conv(self, inputs, filters, kernel_size, strides, padding='SAME', name='conv_layer'):\n input_channels = inputs[-1]\n kernel = tf.Variable(tf.random.truncated_normal(shape=[kernel_size, kernel_size, input_channels, filters]),\n dtype=tf.float32, name='kernel')\n bias = tf.Variable(tf.zeros(shape=[filters]), name='bias')\n conv = tf.nn.conv2d(inputs, filter=kernel,\n strides=[1, strides, strides, 1],\n padding=padding, name='conv')\n out = tf.nn.relu(conv + bias, name='relu')\n return out", "def block1(\n x,\n filters,\n kernel_size=3,\n stride=1,\n conv_shortcut=True,\n name='',\n norm_use=\"bn\"\n):\n if conv_shortcut is True:\n shortcut = layers.Conv2D(\n 4 * filters,\n 1,\n strides=stride,\n kernel_initializer='he_normal',\n name=name + '_0_conv',\n )(x)\n shortcut = normalize_layer(\n shortcut,\n norm_use=norm_use,\n name=name + '_0_',\n )\n else:\n shortcut = x\n\n x = layers.Conv2D(\n filters,\n 1,\n strides=stride,\n name=name + '_1_conv',\n kernel_initializer='he_normal',\n )(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_1_')\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n x = layers.Conv2D(\n filters,\n kernel_size,\n padding='SAME',\n kernel_initializer='he_normal',\n name=name + '_2_conv'\n )(x)\n x = normalize_layer(x, norm_use=norm_use, name=name+'_2_')\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D(\n 4 * filters, 1,\n name=name + '_3_conv',\n kernel_initializer='he_normal'\n )(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_3_')\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x", "def conv_block(input_tensor, kernel_size, filters, strides=(2, 2)):\n\n filters1, filters2, filters3 = filters\n\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n\n x = layers.Conv2D(filters1, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_tensor)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters2, kernel_size, strides=strides, padding='same',\n use_bias=False, kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters3, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n\n shortcut = layers.Conv2D(filters3, (1, 1), strides=strides, use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_tensor)\n shortcut = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(shortcut)\n\n x = layers.add([x, shortcut])\n x = layers.Activation('relu')(x)\n return x", "def sepfirnd(input,filters,axes,output=None,mode='reflect',cval=0.0,origin=0):\n if output is None:\n output = np.empty_like(input)\n tmp = output\n if np.isscalar(filters[0]):\n filters = [np.asarray(filters)]\n if np.isscalar(axes):\n axes = [axes]\n if len(axes) > 1:\n tmp = np.empty_like(output)\n if len(filters) == 1:\n filters = [filters[0]]*len(axes)\n if len(axes) & 1 == 1: #pre-swap so that last write goes to output\n output,tmp = tmp,output \n for filt,ax in zip(filters,axes):\n output,tmp = tmp,output #swap buffers\n convolve1d(input,filt,ax,output,mode,cval,origin)\n input = output\n return output", "def net_convolution2d(input_x, num_filter, filter_shape, strides, padding,\n\t use_cudnn_on_gpu=None, data_format=None, name=None, weights_standard_dev=None, bias_constant=None):\n\tweights_standard_dev = 0.1\n\tbias_constant = 0.1\n\tshape_w = list(filter_shape)\n\tshape_w.append(num_filter)\n\t#Initialize weights with a normal distribution of standard deviation 0.1\n\tweights = initialize_weights(shape_w, weights_standard_dev)\n\t#Initialize with a positive intial bias to avoid dead neurons since \n\t#we are using Rectified Linear Neurons\n\tshape_b = [num_filter]\n\tbiases = initialize_biases(shape_b, bias_constant)\n\n\treturn (tf.nn.conv2d(input=input_x, filter=weights, strides=strides, padding=padding,\n\t\t\t\t\t\t\tuse_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format, name=name)\n\t+ biases)", "def conv_forward(x, w):\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n out = np.zeros((N,F,H-HH+1,W-WW+1))\n \n w = np.flip(w)\n\n for n in range(N):\n for f in range(F):\n out[n][f] = filt3D(x[n],w[f])\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w)\n return out, cache", "def conv2d(input, filters, image_shape=None, filter_shape=None,\r\n border_mode='valid', subsample=(1, 1), **kargs):\r\n\r\n #accept Constant value for image_shape and filter_shape.\r\n if image_shape is not None:\r\n image_shape = list(image_shape)\r\n for i in xrange(len(image_shape)):\r\n if image_shape[i] is not None:\r\n try:\r\n image_shape[i] = get_scalar_constant_value(\r\n as_tensor_variable(image_shape[i]))\r\n except NotScalarConstantError, e:\r\n raise NotScalarConstantError(\r\n \"The convolution need that the shape\"\r\n \" information are constant values. We got\"\r\n \" %s for the image_shape parameter\" %\r\n image_shape[i])\r\n assert str(image_shape[i].dtype).startswith('int')\r\n image_shape[i] = int(image_shape[i])\r\n if filter_shape is not None:\r\n filter_shape = list(filter_shape)\r\n for i in xrange(len(filter_shape)):\r\n if filter_shape[i] is not None:\r\n try:\r\n filter_shape[i] = get_scalar_constant_value(\r\n as_tensor_variable(filter_shape[i]))\r\n except NotScalarConstantError, e:\r\n raise NotScalarConstantError(\r\n \"The convolution need that the shape\"\r\n \" information are constant values. We got\"\r\n \" %s for the filter_shape \"\r\n \"parameter\" % filter_shape[i])\r\n assert str(filter_shape[i].dtype).startswith('int')\r\n filter_shape[i] = int(filter_shape[i])\r\n\r\n if image_shape and filter_shape:\r\n try:\r\n assert image_shape[1] == filter_shape[1]\r\n except Exception:\r\n print 'image ', image_shape, ' filters ', filter_shape\r\n raise\r\n\r\n if filter_shape is not None:\r\n nkern = filter_shape[0]\r\n kshp = filter_shape[2:]\r\n else:\r\n nkern, kshp = None, None\r\n\r\n if image_shape is not None:\r\n bsize = image_shape[0]\r\n imshp = image_shape[1:]\r\n else:\r\n bsize, imshp = None, None\r\n\r\n op = ConvOp(output_mode=border_mode, dx=subsample[0], dy=subsample[1],\r\n imshp=imshp, kshp=kshp, nkern=nkern, bsize=bsize, **kargs)\r\n\r\n return op(input, filters)", "def convolution_internal(\n input, # pylint: disable=redefined-builtin\n filters,\n strides=None,\n padding=\"VALID\",\n data_format=None,\n dilations=None,\n name=None,\n call_from_convolution=True,\n num_spatial_dims=None):\n if (not isinstance(filters, variables_lib.Variable) and\n not tensor_util.is_tf_type(filters)):\n with ops.name_scope(\"convolution_internal\", None, [filters, input]):\n filters = ops.convert_to_tensor(filters, name='filters')\n if (not isinstance(input, tensor_lib.Tensor) and not tensor_util.is_tf_type(\n input)):\n with ops.name_scope(\"convolution_internal\", None, [filters, input]):\n input = ops.convert_to_tensor(input, name=\"input\")\n\n filters_rank = filters.shape.rank\n inputs_rank = input.shape.rank\n if num_spatial_dims is None:\n if filters_rank:\n num_spatial_dims = filters_rank - 2\n elif inputs_rank:\n num_spatial_dims = inputs_rank - 2\n else:\n raise ValueError(\n \"When `num_spatial_dims` is not set, one of `input.shape.rank` or \"\n \"`filters.shape.rank` must be known. \"\n f\"Received: input.shape={input.shape} of rank {inputs_rank} and \"\n f\"filters.shape={filters.shape} of rank {filters_rank}\")\n elif filters_rank and filters_rank - 2 != num_spatial_dims:\n raise ValueError(\n \"`filters.shape.rank - 2` should equal `num_spatial_dims`. Received: \"\n f\"filters.shape={filters.shape} of rank {filters_rank} and \"\n f\"num_spatial_dims={num_spatial_dims}\")\n\n if inputs_rank:\n num_batch_dims = inputs_rank - num_spatial_dims - 1 # Channel dimension.\n else:\n num_batch_dims = 1 # By default, assume single batch dimension.\n\n if num_spatial_dims not in {1, 2, 3}:\n raise ValueError(\n \"`num_spatial_dims` must be 1, 2, or 3. \"\n f\"Received: num_spatial_dims={num_spatial_dims}.\")\n\n if data_format is None or data_format in _CHANNELS_LAST_FORMATS:\n channel_index = num_batch_dims + num_spatial_dims\n else:\n channel_index = num_batch_dims\n\n if dilations is None:\n dilations = _get_sequence(dilations, num_spatial_dims, channel_index,\n \"dilations\")\n is_dilated_conv = False\n else:\n dilations = _get_sequence(dilations, num_spatial_dims, channel_index,\n \"dilations\")\n is_dilated_conv = any(i != 1 for i in dilations)\n\n strides = _get_sequence(strides, num_spatial_dims, channel_index, \"strides\")\n has_tpu_context = device_context.enclosing_tpu_context() is not None\n\n if name:\n default_name = None\n elif not has_tpu_context or call_from_convolution:\n default_name = \"convolution\"\n elif num_spatial_dims == 2: # Most common case.\n default_name = \"Conv2D\"\n elif num_spatial_dims == 3:\n default_name = \"Conv3D\"\n else:\n default_name = \"conv1d\"\n\n with ops.name_scope(name, default_name, [input, filters]) as name:\n # Fast path for TPU or if no dilation, as gradient only supported on TPU\n # for dilations.\n if not is_dilated_conv or has_tpu_context:\n if num_spatial_dims == 2: # Most common case.\n op = _conv2d_expanded_batch\n elif num_spatial_dims == 3:\n op = _conv3d_expanded_batch\n else:\n op = conv1d\n\n return op(\n input,\n filters,\n strides,\n padding=padding,\n data_format=data_format,\n dilations=dilations,\n name=name)\n else:\n if channel_index == 1:\n strides = strides[2:]\n dilations = dilations[2:]\n else:\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n\n op = Convolution(\n tensor_shape.as_shape(input.shape),\n tensor_shape.as_shape(filters.shape),\n padding,\n strides=strides,\n dilation_rate=dilations,\n name=name,\n data_format=data_format,\n num_spatial_dims=num_spatial_dims)\n return op(input, filters)", "def _conv_block(inputs: \"Layer\",\n filters: int,\n kernel: int or Tuple[int, int],\n strides: int or Tuple[int, int]) -> \"Layer\":\n\n layer = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n layer = BatchNormalization()(layer)\n layer = relu6(layer)\n return layer", "def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name,\n padding='SAME', trainable=True, log_weights=False):\n # Get number of input channels\n input_channels = int(x.get_shape()[-1])\n\n # Create lambda function for the convolution\n\n with tf.variable_scope(name) as scope:\n\n regularizer = tf.contrib.layers.l2_regularizer(scale=0.01)\n\n weights = tf.get_variable(name='weights',\n shape=[filter_height, filter_width,\n input_channels, num_filters],\n initializer=tf.glorot_uniform_initializer(),\n regularizer=regularizer,\n trainable=trainable)\n\n biases = tf.get_variable(name='biases',\n shape=[num_filters],\n initializer=tf.zeros_initializer(),\n trainable=trainable)\n\n out = tf.nn.conv2d(x, weights, strides=[1, stride_y, stride_x, 1],\n padding=padding)\n # Add biases\n out = tf.nn.bias_add(out, biases)\n\n # Apply relu function\n out = tf.nn.relu(out, name=scope.name)\n\n if log_weights == True:\n tf.summary.image('weights', weights[tf.newaxis,:,:,0,0,tf.newaxis])\n tf.summary.histogram('weights', weights)\n tf.summary.histogram('biases', biases)\n\n return out, weights, biases", "def myconv2d(input, weight, bias=None, stride=(1,1), padding=(0,0), dilation=(1,1), groups=1):\n batch_size, in_channels, in_h, in_w = input.shape\n out_channels, in_channels, kh, kw = weight.shape\n out_h = int((in_h - kh + 2 * padding[0]) / stride[0] + 1)\n out_w = int((in_w - kw + 2 * padding[1]) / stride[1] + 1)\n unfold = torch.nn.Unfold(kernel_size=(kh, kw), dilation=dilation, padding=padding, stride=stride)\n inp_unf = unfold(input)\n w_ = weight.view(weight.size(0), -1).t()\n if bias is None:\n out_unf = inp_unf.transpose(1, 2).matmul(w_).transpose(1, 2)\n else:\n out_unf = (inp_unf.transpose(1, 2).matmul(w_) + bias).transpose(1, 2)\n out = out_unf.view(batch_size, out_channels, out_h, out_w)\n return out.float()", "def clConvolution(self, size, mask):", "def new_implementation(input_size: int,\n channels: int = 4,\n kernel_size: int = 3,\n filters: int = 7,\n stride: int = 1) -> operations.Operation:\n\n # Todo implement the stride\n if stride != 1:\n raise AssertionError(\"stride must be equal to 1 (other values not yet implemented)\")\n\n # The kernel size must be an odd number (1, 3, etc)\n assert (kernel_size % 2) == 1\n\n # Define the variables\n var_n_filter = operations.Variable(\"n_filter\")\n var_S_row = operations.Variable(\"S_row\")\n var_S_col = operations.Variable(\"S_col\")\n var_I_chan = operations.Variable(\"I_chan\")\n var_K_row = operations.Variable(\"K_row\")\n var_K_col = operations.Variable(\"K_col\")\n\n # Define definitions\n # We assume that we don't need to add any padding (if padding is desired, then the data should be pre-padded before\n # going into our system. The output size is thus: output_size = (input_size-kernel_size)/stride + 2\n def_output_size = operations.Definition(\"output_size\", int((input_size - kernel_size) / stride + 1))\n def_kernel_size = operations.Definition(\"kernel_size\", kernel_size)\n def_channels = operations.Definition(\"channels\", channels)\n def_filters = operations.Definition(\"filters\", filters)\n\n # Define a root operation which will be the result of this method\n root_operation = operations.Root(\n def_output_size,\n def_kernel_size,\n def_channels,\n def_filters\n )\n\n # Define our loops\n loop_n_filters = operations.ForLoop(0, def_filters, 1, var_n_filter)\n loop_output_rows = operations.ForLoop(0, def_output_size, 1, var_S_row)\n loop_output_cols = operations.ForLoop(0, def_output_size, 1, var_S_col)\n loop_input_chan = operations.ForLoop(0, def_channels, 1, var_I_chan)\n loop_kernel_rows = operations.ForLoop(0, def_kernel_size, 1, var_K_row)\n loop_kernel_cols = operations.ForLoop(0, def_kernel_size, 1, var_K_col)\n\n # Add our newly created loops to the body of the root_operation in sequence\n root_operation.then(\n loop_output_rows.then(\n loop_output_cols.then(\n loop_n_filters.then(\n loop_input_chan.then(\n loop_kernel_rows.then(\n loop_kernel_cols\n )\n )\n )\n )\n )\n )\n\n # Define the assignment operation - even though this is a matrix assignment,\n # we neglect the lookup times for the arrays\n S = operations.Variable(\"S\")\n I = operations.Variable(\"I\")\n K = operations.Variable(\"K\")\n\n # Index on variables S, I and K\n S_idx = operations.Index(S, [\n var_n_filter, \"*\", def_output_size, \"^2\", \"+\", var_S_row, \"*\", def_output_size, \"+\", var_S_col])\n\n I_idx = operations.Index(I, [\n var_I_chan, \"*\", def_output_size, \"^2\", \"+\", \"(\", var_S_row, \"+\", var_K_row, \")\", \"*\", def_output_size, \"+\", \"(\",\n var_S_col, \"+\", var_K_col, \")\"])\n\n K_idx = operations.Index(K, [var_n_filter, \"*\", def_kernel_size, \"^2\", \"+\", var_K_row, \"*\", def_kernel_size, \"+\", var_K_col])\n\n # Perform memory and arithmetic\n assign_matrix_element = operations.Assign(S_idx).then(\n operations.Fetch(S_idx).then(\n operations.Add().then(\n operations.Fetch(I_idx).then(\n operations.Multiply().then(\n operations.Fetch(K_idx).then(operations.End())\n )\n )\n )\n )\n )\n\n # Add the assignment opration after the inner loop\n loop_kernel_cols.then(assign_matrix_element)\n\n # Define the multiplication operation\n\n return root_operation", "def _conv_block( inputs, filters, kernel, strides, nl):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n return _return_activation(x, nl)", "def conv2d(input, filters, image_shape=None, filter_shape=None,\r\n border_mode='valid', subsample=(1,1), **kargs):\r\n assert input.ndim in (2,3)\r\n assert filters.ndim in (2,3)\r\n\r\n ### use shape information if it is given to us ###\r\n if filter_shape and image_shape:\r\n if input.ndim==3:\r\n bsize = image_shape[0]\r\n else:\r\n bsize = 1\r\n imshp = (1,) + tuple(image_shape[-2:])\r\n\r\n if filters.ndim==3:\r\n nkern = filter_shape[0]\r\n else:\r\n nkern = 1\r\n kshp = filter_shape[-2:]\r\n else:\r\n nkern, kshp = None, None\r\n bsize, imshp = None, None\r\n\r\n ### reshape tensors to 4D, for compatibility with ConvOp ###\r\n if input.ndim==3:\r\n sym_bsize = input.shape[0]\r\n else:\r\n sym_bsize = 1\r\n\r\n if filters.ndim==3:\r\n sym_nkern = filters.shape[0]\r\n else:\r\n sym_nkern = 1\r\n\r\n new_input_shape = tensor.join(0, tensor.stack(sym_bsize,1), input.shape[-2:])\r\n input4D = tensor.reshape(input, new_input_shape, ndim=4)\r\n\r\n new_filter_shape = tensor.join(0, tensor.stack(sym_nkern,1), filters.shape[-2:])\r\n filters4D = tensor.reshape(filters, new_filter_shape, ndim=4)\r\n\r\n ### perform actual convolution ###\r\n op = conv.ConvOp(output_mode=border_mode,\r\n dx=subsample[0], dy=subsample[1],\r\n imshp=imshp, kshp=kshp, nkern=nkern, bsize=bsize,**kargs)\r\n\r\n output = op(input4D, filters4D)\r\n\r\n # flatten to 3D tensor if convolving with single filter or single image\r\n if input.ndim==2 or filters.ndim==2:\r\n output = tensor.flatten(output.T, outdim=3).T\r\n\r\n return output", "def conv2d(\n input: np.ndarray,\n weight: np.ndarray,\n bias: np.ndarray = None,\n stride: int = 1,\n padding: int = 0,\n groups: int = 1,\n dilation: int = 0,\n) -> np.ndarray:\n if input.ndim == 3:\n input = np.expand_dims(input, axis=0)\n assert dilation == 0, \"dilation > 0 not supported yet.\"\n assert input.ndim == weight.ndim\n assert weight.shape[1] * groups == input.shape[1]\n if bias is None:\n bias = np.zeros((weight.shape[0],))\n assert weight.shape[0] == bias.shape[0]\n assert weight.shape[2] == weight.shape[3], \"non-equal kernel size not supported\"\n C_out, _, K, _ = weight.shape\n padded_input = np.pad(\n input, ((0, 0), (0, 0), (padding, padding), (padding, padding)), constant_values=0.0\n )\n N, C_in, H, W = padded_input.shape\n C_in_grp = C_in // groups # C_in group size\n C_out_grp = C_out // groups # C_out group size\n out = []\n for g in range(groups):\n input_g = padded_input[:, g * C_in_grp : (g + 1) * C_in_grp]\n weight_g = weight[g * C_out_grp : (g + 1) * C_out_grp, ...]\n bias_g = bias[g * C_out_grp : (g + 1) * C_out_grp]\n out_g = np.zeros((N, C_out_grp, (H - K + 1) // stride, (W - K + 1) // stride))\n for i in range((H - K + 1) // stride):\n for j in range((W - K + 1) // stride):\n si, sj = stride * i, stride * j\n input_block = input_g[:, None, :, si : si + K, sj : sj + K]\n out_g[:, :, i, j] = (input_block * weight_g).reshape(N, C_out_grp, -1).sum(\n axis=2\n ) + bias_g[None, :]\n out.append(out_g)\n return np.concatenate(out, axis=1)", "def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32):\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n \n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n ############################################################################\n \n # Store weights and biases for the convolutional layer using the keys 'W1' and 'b1'; \n C, H, W = input_dim\n filter_sizes = (filter_size, filter_size)\n self.params['W1'] = np.random.normal(0, weight_scale, [num_filters, C, filter_sizes[0], filter_sizes[1]])\n self.params['b1'] = np.zeros((num_filters, ))\n\n # use keys 'W2' and 'b2' for the weights and biases of the hidden affine layer;\n # In this case, ConvLayer doesn't reduce the spatial size of the input, (N, C, H, W) -> Conv -> (N, F, H, W)\n # To satisfy this constraint, (W + 2 * pad - filter_size) / stride + 1 = W need to hold, which led to pad = (F - S) / 2 where S == 1\n # (N, C, H, W) -> Conv -> (N, F, H, W) -> Pooling -> (N, F, H/2, W/2)\n # In a FC_NN, FCL weights (input_dim, hidden_dim) where every img is flatten into a 1D array of length D = F * H/2 * W/2.\n self.params['W2'] = np.random.normal(0, weight_scale, [num_filters * (H / 2) * (W / 2), hidden_dim])\n self.params['b2'] = np.zeros((hidden_dim, ))\n\n # And the keys 'W3' and 'b3' for the weights and biases of the output affine layer. \n self.params['W3'] = np.random.normal(0, weight_scale, [hidden_dim, num_classes])\n self.params['b3'] = np.zeros((num_classes, ))\n\n \n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def conv2d(x, n_filters,\n n_in = 0,\n k_h=5, k_w=5,\n stride_h=2, stride_w=2,\n stddev=0.02,\n activation=lambda x: x,\n bias=False,\n padding='VALID',\n name=\"Conv2D\"):\n with tf.variable_scope(name):\n with tf.name_scope('weights'):\n if(n_in == 0):\n w = tf.get_variable(\n 'w', [k_h, k_w, x.get_shape()[-1], n_filters],\n initializer=tf.contrib.layers.xavier_initializer())\n else:\n w = tf.get_variable(\n 'w', [k_h, k_w, n_in, n_filters],\n initializer=tf.contrib.layers.xavier_initializer())\n variable_summaries(w, name + '/weights')\n with tf.name_scope('conv'): \n conv = tf.nn.conv2d(\n x, w, strides=[1, stride_h, stride_w, 1], padding=padding)\n if bias:\n with tf.name_scope('biases'):\n b = tf.get_variable(\n 'b', [n_filters],\n initializer=tf.contrib.layers.xavier_initializer())\n variable_summaries(b, name + '/bias')\n with tf.name_scope('conv'): \n conv = conv + b\n \n with tf.name_scope('conv'): \n tf.histogram_summary(name + '/conv', conv) \n return conv", "def test_convolution():\n # Default test\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,1,4,4,3]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Default test\")\n # All dimensions 1\n inputs_shape = [1,1,1,1,1]\n filters_shape = [1,1,1,1,1]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Input and filter dimensions 1\")\n # Filter spans all dimensions\n # This will lead to a failure for theano 2d3d for some reason\n # (for now we ignore this and remove theano2d3d for this test\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,3,4,5,3]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Filter dimension = Input dimension\")\n # Filter smaller for all dimensions\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,2,2,2,3]\n test_convolution_for_parameters(inputs_shape, filters_shape, \n \"Filter dimension < all Input dimension\")\n # 1,1,1,1,1 filter\n # Filter smaller for all dimensions\n inputs_shape = [3,3,4,5,1]\n filters_shape = [3,1,1,1,1]\n test_convolution_for_parameters(inputs_shape, filters_shape, \n \"Filter dimension 1 everywhere\")", "def compute_output(self, input_images, filter_shape, image_shape, poolsize=(2, 2), \n Pstruct = None, b= None):\n\n assert image_shape[1] == filter_shape[1]\n # the bias is a 1D tensor -- one bias per output feature map\n # convolve input feature maps with filters\n\n\n batch_size = image_shape[0] \n fwidth = Pstruct[0]['U1'].shape[0]\n fheight = Pstruct[0]['U2'].shape[0]\n nbr_channels = image_shape[1]\n nbr_filters = Pstruct[0]['U3'].shape[0]\n initial_n_rows = image_shape[2]\n initial_n_cols = image_shape[3]\n \n # Final number of rows and columns \n final_n_rows = initial_n_rows - fwidth + 1\n final_n_cols = initial_n_cols - fheight + 1\n # The convolved input images\n input4D = theano.shared(np.zeros((batch_size, nbr_filters, \n final_n_rows, final_n_cols)))\n print 'batch size ', batch_size \n one_image_shape = (1, initial_n_rows, initial_n_cols)\n # assert one_image_shape == (1,28,28)\n for image_index in range(batch_size):\n for channel_index in range(nbr_channels):\n # Convolve image with index image_index in the batch\n input4D = self.convolve_one_image(input4D, \n input_images[image_index,channel_index,:,:].reshape((1, initial_n_rows, initial_n_cols)),\n one_image_shape,\n Pstruct, \n filter_shape, \n image_index,\n channel_index) \n # downsample each feature map individually, using maxpooling\n start = time.time()\n pooled_out = downsample.max_pool_2d(input=input4D,\n ds=poolsize, \n ignore_border=True)\n end = time.time()\n self.downsample_time = (end - start)*1000/ image_shape[0]\n \n \n # add the bias term. Since the bias is a vector (1D array), we first\n # reshape it to a tensor of shape (1,n_filters,1,1). Each bias will\n # thus be broadcasted across mini-batches and feature map\n # width & height\n self.output = T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))", "def call(self, input):\n for r in range(self.tile_num):\n for c in range(self.tile_num):\n # do frequency conv on each tile\n offset = [[r*self.tile_size+self.tile_size/2, c*self.tile_size+self.tile_size/2] for i in range(BATCHSIZE)]\n input_tile = tf.image.extract_glimpse(input, \n [self.tile_size, self.tile_size],\n offset, centered=False, normalized=False) \n pad_pixels = (self.fft_size - self.tile_size) / 2\n input_tile = tf.image.pad_to_bounding_box(\n input_tile, pad_pixels, pad_pixels, self.fft_size, self.fft_size)\n\n input_tile = tf.transpose(input_tile, perm=[0,3,1,2])\n input_fft = tf.spectral.fft2d(tf.cast(input_tile, tf.complex64))\n output_fft = tf.multiply(self.kernel_freq, input_fft[0,:])\n output_fft_accum = tf.reduce_sum(output_fft, 1)\n output_batch_i = tf.spectral.ifft2d(output_fft_accum)\n bias_expand = tf.expand_dims(tf.expand_dims(self.bias, 1),1)\n output_tile_accum = tf.expand_dims(tf.real(output_batch_i) + bias_expand, 0)\n for b in range(1,BATCHSIZE):\n output_fft = tf.multiply(self.kernel_freq, input_fft[b,:])\n output_fft_accum = tf.reduce_sum(output_fft, 1)\n output_fft_batch_i = tf.spectral.ifft2d(output_fft_accum)\n bias_expand = tf.expand_dims(tf.expand_dims(self.bias, 1),1)\n output_tile_accum = tf.concat([output_tile_accum, \n tf.expand_dims(tf.real(output_fft_batch_i) + bias_expand, 0)],0)\n\n # Concat col tiles\n output_accum_col = output_tile_accum\n if c != 0:\n overlap = output_accum_col[:,:,:,-pad_pixels:] + output_tile_accum[:,:,:,0:pad_pixels]\n output_accum_col = tf.concat([output_accum_col[:,:,:,0:-pad_pixels], \n overlap, \n output_tile_accum[:,:,:,pad_pixels:]], \n 3)\n # Concat tow output tiles\n output_accum = output_accum_col\n if r != 0:\n overlap = output_accum[:,:,-pad_pixels:,:] + output_accum_col[:,:,0:pad_pixels,:]\n output_accum = tf.concat([output_accum[:,:,0:-pad_pixels,:], \n overlap, \n output_accum_col[:,:,pad_pixels:,:]], \n 2)\n\n output_accum = tf.transpose(output_accum, perm=[0,2,3,1])\n return tf.image.crop_to_bounding_box(output_accum, 0, 0, self.img_size, self.img_size)", "def conv2d_forward(x, w, b, pad, stride):\n #raise NotImplementedError\n \n\n \n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n ba,h,wd,c=x.shape\n f,fh,fw,c=w.shape\n n_h=((h-fh+2*pad)//stride)+1\n n_w=((wd-fw+2*pad)//stride)+1\n x_paded=np.pad(x,pad,'constant')\n temp_dim=x_paded.shape[3]\n #print(temp_dim)\n out=np.zeros((ba,n_h,n_w,f))\n for m in range(0,ba):\n for i in range(0,n_h):\n for j in range(0,n_w):\n for n in range(0,f):\n h_t=i*stride\n h_t2=i*stride+fh\n w_t=j*stride\n w_t2=j*stride+fw\n temp=x_paded[pad+m,h_t:h_t2,w_t:w_t2,pad:temp_dim-pad] \n out[m,i,j,n]=np.sum(temp*w[n,:,:,:])+b[n]\n \n return out", "def convolution(image, kernel, scale=None, offset=0):\n kernel = np.array(kernel).flatten().tolist()\n if len(kernel)==9:\n size = (3,3)\n elif len(kernel)==25:\n size = (5,5)\n else:\n raise ValueError('Kernel size must be (3,3) or (5,5).')\n return image.filter(ImageFilter.Kernel(size, kernel, scale, offset))", "def _conv_bn(**conv_params):\n filters = conv_params[\"filters\"]\n kernel_size = conv_params[\"kernel_size\"]\n strides = conv_params.setdefault(\"strides\", (1, 1,1))\n kernel_initializer = conv_params.setdefault(\"kernel_initializer\", \"he_normal\")\n padding = conv_params.setdefault(\"padding\", \"same\")\n kernel_regularizer = conv_params.setdefault(\"kernel_regularizer\", l2(1.e-4))\n\n def f(input):\n conv = Conv3D(filters=filters, kernel_size=kernel_size,\n strides=strides, padding=padding,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=kernel_regularizer)(input)\n return BatchNormalization()(conv)\n\n return f", "def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True, dilation_rate=1):\n # first layer\n x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer=\"he_normal\",\n padding=\"same\", dilation_rate=dilation_rate)(input_tensor)\n if batchnorm:\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n\n # second layer\n x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer=\"he_normal\",\n padding=\"same\", dilation_rate=dilation_rate)(x)\n if batchnorm:\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n return x", "def convolve(data, h, res_g=None, sub_blocks=None, mode='constant'):\n sub_blocks = sub_blocks or (1,) * data.ndim\n\n if not len(data.shape) in [1, 2, 3]:\n raise ValueError(\"dim = %s not supported\" % (len(data.shape)))\n\n if len(data.shape) != len(h.shape):\n raise ValueError(\"dimemnsion of data (%s) and h (%s) are different\" % (len(data.shape), len(h.shape)))\n\n if isinstance(data, OCLArray) and isinstance(h, OCLArray):\n return _convolve_buf(data, h, res_g)\n elif isinstance(data, np.ndarray) and isinstance(h, np.ndarray):\n if sub_blocks == (1,) * data.ndim and mode == 'constant':\n res = _convolve_np(data, h)\n else:\n # cut the image into tile and operate on every of them\n N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]\n Npads = [int(s / 2) for s in h.shape]\n res = np.empty(data.shape, np.float32)\n for data_tile, data_s_src, data_s_dest \\\n in tile_iterator(data, blocksize=N_sub,\n padsize=Npads,\n mode=mode):\n res_tile = _convolve_np(data_tile.copy(),\n h)\n res[data_s_src] = res_tile[data_s_dest]\n return res\n else:\n raise TypeError(\"unknown types (%s, %s)\" % (type(data), type(h)))", "def ddcconv1d(inputs: tf.Variable,\n weights: tf.Variable,\n offsets: tf.Variable,\n dilation_rate: int = 1,\n offset_mode='F',\n interpolate=True,\n name: str='ddcc1d'):\n with tf.variable_scope(name):\n batch_size, seq_length, channels = (int(v) for v in inputs.shape)\n filters, _, kernel_size = (int(v) for v in weights.shape)\n\n spec_shapes = {\n 'B': batch_size,\n 'S': seq_length,\n 'F': filters,\n 'C': channels,\n 'K': kernel_size\n }\n\n # Indices stuff\n with tf.variable_scope('KernelBaseIndices'):\n base_indices = np.arange(seq_length).repeat(kernel_size).reshape((-1, kernel_size))\n window_indices = tf.constant(base_indices, dtype=tf.float32, name='window_indices')\n receptive_field = tf.constant(np.linspace(-kernel_size + 1, 0, kernel_size) * dilation_rate,\n name='receptive_field',\n dtype=tf.float32)\n kernel_indices = window_indices + receptive_field\n\n with tf.variable_scope('BatchIndices'):\n # Create batch indices constant in BSFCK shape\n batch_indices_np = expand_transform(np.arange(batch_size, dtype=np.int32), 'B', 'BSFCK', spec_shapes, numpy=True)\n batch_indices = tf.constant(batch_indices_np, dtype=tf.int32, name='batch_indices')\n\n with tf.variable_scope('ChannelIndices'):\n # Create channel indices constant in BSFCK shape\n channel_indices_np = expand_transform(np.arange(channels, dtype=np.int32), 'C', 'BSFCK', spec_shapes, numpy=True)\n channel_indices = tf.constant(channel_indices_np, dtype=tf.int32, name='channel_indices')\n\n with tf.variable_scope('Sampling'):\n # SAMPLING IS EXTREMELY EXPENSIVE!!!!!\n coords = get_coords(kernel_indices, offsets, offset_mode=offset_mode, spec_shapes=spec_shapes)\n\n if interpolate:\n # Left and right indices, e.g. index of 3.65 would be 3 on the left and 4 on the right\n indices_left = tf.cast(tf.floor(coords), tf.int32)\n indices_right = tf.cast(tf.ceil(coords), tf.int32)\n\n # Calculate interpolation, for index 3.65 interpolation factor would be 0.65\n interpolation = coords - tf.cast(indices_left, tf.float32)\n\n # Sample both values (on the lef and right)\n # Sample input of shape BSC with BSFCK3 indices (produced by stack) -> BSFCK for each side (left and right)\n vals_left = tf.gather_nd(inputs, tf.stack((batch_indices, indices_left, channel_indices), axis=-1))\n vals_right = tf.gather_nd(inputs, tf.stack((batch_indices, indices_right, channel_indices), axis=-1))\n\n # Interpolated values\n samples = vals_left + (vals_right - vals_left) * interpolation\n else:\n batch_idx = tf.stack((batch_indices, tf.cast(tf.floor(coords), tf.int32), channel_indices), axis=-1)\n samples = tf.gather_nd(inputs, batch_idx)\n\n with tf.variable_scope('Convolution'):\n # Apply weights: BSFCK * FCK = BSFCK\n conv = samples * weights\n\n # Sum across kernel: BSFCK -> BSFC\n conv = tf.reduce_sum(conv, axis=-1)\n\n # Sum across channels: BSFC -> BSF\n conv = tf.reduce_sum(conv, axis=-1)\n\n return conv", "def conv_forward(A_prev, W, b, hparameters):\n \n # Size and dimension\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n (f, f, n_C_prev, n_C) = W.shape\n stride = hparameters[\"stride\"]\n pad = hparameters[\"pad\"]\n n_H = int((n_H_prev - f + 2 * pad) / stride) + 1\n n_W = int((n_W_prev - f + 2 * pad) / stride) + 1\n\n Z = np.zeros((m, n_H, n_W, n_C)) # Initialize output\n A_prev_pad = zero_pad(A_prev, pad) # Padding the previous layer\n \n for i in range(m):\n a_prev_pad = A_prev_pad[i, :, :, :]\n for h in range(n_H):\n for w in range(n_W):\n for c in range(n_C):\n # Find the current \"slice\"\n vert_start = h * stride\n vert_end = h * stride + f\n horiz_start = w * stride\n horiz_end = w * stride + f\n a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]\n\n # Convolve the slice with current filter W and bias b\n Z[i, h, w, c] = conv_single_step(a_slice_prev, W[:, :, :, c], b[:, :, :, c])\n\n assert(Z.shape == (m, n_H, n_W, n_C))\n cache = (A_prev, W, b, hparameters)\n\n return Z, cache", "def conv1d(data_arr, kernel_arr, tarr_len, discrete_kernel_shape, mode='valid'):\n\n assert(data_arr.ndim == 2)\n output_shape = discrete_kernel_shape[1:]\n if (kernel_arr.ndim == 2):\n # Algorithm assumes a \"to\" axis on the kernel. Add it.\n kernel_arr = add_axes(kernel_arr, 1, 'before last')\n discrete_kernel_shape = discrete_kernel_shape[0:1] + (1,) + discrete_kernel_shape[1:2]\n else:\n check(kernel_arr.ndim == 3)\n\n # Convolutions leave the time component on the inside, but we want it on the outside\n # So we do the iterations in reverse order, and flip the result with transpose()\n # The result is indexed as [tidx][to idx][from idx]\n if cf.use_theano:\n # We use slices from_idx:from_idx+1 because conv2d expects 2D objects\n # We then index [:,0] to remove the spurious dimension\n result = T.stack(\n [ T.stack(\n [ T.signal.conv.conv2d(data_arr[:, from_idx:from_idx+1 ],\n kernel_arr[:, to_idx, from_idx:from_idx+1 ],\n image_shape = (tarr_len, 1),\n filter_shape = (discrete_kernel_shape[0], 1),\n border_mode = mode)[:,0]\n for to_idx in np.arange(discrete_kernel_shape[1]) ] )\n for from_idx in np.arange(discrete_kernel_shape[2]) ] ).T\n else:\n assert(discrete_kernel_shape == kernel_arr.shape)\n assert(tarr_len == data_arr.shape[0])\n result = np.stack(\n [ np.stack(\n [ scipy.signal.convolve(data_arr[:, from_idx ],\n kernel_arr[:, to_idx, from_idx ],\n mode=mode)\n for to_idx in np.arange(kernel_arr.shape[1]) ] )\n for from_idx in np.arange(kernel_arr.shape[2]) ] ).T\n\n return result.reshape((tarr_len - discrete_kernel_shape[0] + 1,) + output_shape)", "def conv2d_bn(x,\n filters,\n filter_shape,\n padding='valid',\n strides=(1, 1),\n data_format='channels_first',\n name=None):\n if name is not None:\n bn_name = name + '_bn'\n conv_name = name + '_conv'\n else:\n bn_name = None\n conv_name = None\n bn_axis = 1 if data_format=='channels_first' else -1\n x = layers.Conv2D(\n filters, filter_shape,\n strides=strides,\n padding=padding,\n use_bias=False, # batch norm already does some shifting\n data_format=data_format,\n name=conv_name)(x)\n x = layers.BatchNormalization(\n axis=bn_axis,\n scale=False, # scaling will be done by the next layer (only for linear activation)\n name=bn_name)(x)\n return layers.Activation('relu', name=name)(x)", "def conv(batch_input, out_channels, stride):\n with tf.variable_scope(\"conv\"):\n in_channels = batch_input.get_shape()[3]\n filter = tf.get_variable(\"filter\", [4, 4, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))\n # [batch, in_height, in_width, in_channels], [filter_width, filter_height, in_channels, out_channels]\n # => [batch, out_height, out_width, out_channels]\n padded_input = tf.pad(batch_input, [[0, 0], [1, 1], [1, 1], [0, 0]], mode=\"CONSTANT\")\n conv = tf.nn.conv2d(padded_input, filter, [1, stride, stride, 1], padding=\"VALID\")\n return conv", "def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value\n input,\n filter=None,\n strides=None,\n padding=None,\n use_cudnn_on_gpu=True,\n data_format=\"NHWC\",\n dilations=[1, 1, 1, 1],\n name=None,\n filters=None):\n filter = deprecation.deprecated_argument_lookup(\n \"filters\", filters, \"filter\", filter)\n padding, explicit_paddings = convert_padding(padding)\n if data_format is None:\n data_format = \"NHWC\"\n channel_index = 1 if data_format.startswith(\"NC\") else 3\n\n strides = _get_sequence(strides, 2, channel_index, \"strides\")\n dilations = _get_sequence(dilations, 2, channel_index, \"dilations\")\n\n shape = input.shape\n # shape object may lack ndims, e.g., if input is an np.ndarray. In that case,\n # we fall back to len(shape).\n ndims = getattr(shape, \"ndims\", -1)\n if ndims == -1:\n ndims = len(shape)\n if ndims in (4, 3, 2, 1, 0, None):\n # We avoid calling squeeze_batch_dims to reduce extra python function\n # call slowdown in eager mode. This branch doesn't require reshapes.\n return gen_nn_ops.conv2d(\n input,\n filter=filter,\n strides=strides,\n padding=padding,\n use_cudnn_on_gpu=use_cudnn_on_gpu,\n explicit_paddings=explicit_paddings,\n data_format=data_format,\n dilations=dilations,\n name=name)\n return squeeze_batch_dims(\n input,\n functools.partial(\n gen_nn_ops.conv2d,\n filter=filter,\n strides=strides,\n padding=padding,\n use_cudnn_on_gpu=use_cudnn_on_gpu,\n explicit_paddings=explicit_paddings,\n data_format=data_format,\n dilations=dilations),\n inner_rank=3,\n name=name)", "def moffat_convolution_fft(im_array,n_fwhm,beta,fwhm) :\n\n r_s = fwhm/(2. *math.sqrt(2.**(1./beta)-1.))\n\n im_kernel_array = moffat_kernel(n_fwhm,beta,r_s)\n fftconv_image = signal.fftconvolve(im_array,im_kernel_array,mode = 'same')\n\n return (fftconv_image)", "def __reslayer_bottleneck(self, inputs, in_filters, out_filters, stride=1):\n with tf.variable_scope('sub1'):\n kernel = tf.get_variable('weights', [1, 1, in_filters, out_filters / 4],\n initializer=xavier_initializer(\n dtype=tf.float32),\n dtype=tf.float32)\n conv = tf.nn.conv2d(inputs, kernel, [1, stride, stride, 1],\n padding='SAME',\n name='conv')\n batch_norm = self.__batch_norm_wrapper(conv, decay=0.9999, shape=[0, 1, 2])\n conv = tf.nn.elu(batch_norm, 'elu')\n\n with tf.variable_scope('sub2'):\n kernel = tf.get_variable('weights',\n [3, 3, out_filters / 4, out_filters / 4],\n initializer=xavier_initializer(\n dtype=tf.float32),\n dtype=tf.float32)\n conv = tf.nn.conv2d(conv, kernel, [1, 1, 1, 1], padding='SAME',\n name='conv1')\n batch_norm = self.__batch_norm_wrapper(conv, decay=0.9999, shape=[0, 1, 2])\n conv = tf.nn.elu(batch_norm, 'elu')\n\n with tf.variable_scope('sub3'):\n kernel = tf.get_variable('weights', [1, 1, out_filters / 4, out_filters],\n initializer=xavier_initializer(\n dtype=tf.float32),\n dtype=tf.float32)\n conv = tf.nn.conv2d(conv, kernel, [1, 1, 1, 1],\n padding='SAME',\n name='conv')\n batch_norm = self.__batch_norm_wrapper(conv, decay=0.9999, shape=[0, 1, 2])\n\n with tf.variable_scope('subadd'):\n if in_filters != out_filters:\n kernel = tf.get_variable('weights', [1, 1, in_filters, out_filters],\n initializer=xavier_initializer(\n dtype=tf.float32),\n dtype=tf.float32)\n inputs = tf.nn.conv2d(\n inputs, kernel, [1, stride, stride, 1], padding='SAME')\n batch_norm += inputs\n conv = tf.nn.elu(batch_norm, 'elu')\n\n num = np.power(2, np.floor(np.log2(out_filters) / 2))\n\n grid = self.__put_activations_on_grid(conv, (int(num),\n int(out_filters /\n num)))\n tf.summary.image('sub3/activations', grid, max_outputs=1)\n\n return conv", "def upconv_block(i, filters, shape, activation='relu', padding='same',\n data_format='channels_first'):\n c1 = Conv3D(filters, shape, activation=activation,\n padding=padding, data_format=data_format)(i)\n c2 = Conv3D(filters, shape, activation=activation,\n padding=padding, data_format=data_format)(c1)\n u = UpSampling3D(size=(1, 2, 2), data_format=data_format)(c2)\n c3 = Conv3D(int(filters / 2),\n (1, 2, 2),\n activation=activation,\n padding=padding,\n data_format=data_format)(u)\n return c3", "def convolve(img, fourier_kernel):\n return np.fft.ifftshift(np.fft.irfft2(np.fft.rfft2(img) * fourier_kernel))", "def deconv_layer_with_stride(self, inputs, field_size, channels_size, stride,\n initializer_type, name, act_func=tf.nn.relu):\n batch, height, width, in_channels = inputs.get_shape().as_list()\n #shape0 = tf.shape(inputs)[0]\n assert in_channels == channels_size[0], (\n 'Number of input channels doe not match filter inputs channels.'\n )\n with tf.variable_scope(name):\n channels_size.reverse() # now [out_c, in_c]\n filter_size = field_size + channels_size\n bias_size = [channels_size[0]]\n\n if initializer_type:\n initializer = tf.contrib.layers.xavier_initializer()\n else:\n initializer = tf.truncated_normal_initializer(stddev=.1)\n\n weights = tf.get_variable('W', filter_size, initializer=initializer)\n biases = tf.get_variable(\n 'b', bias_size, initializer=tf.constant_initializer(.1))\n\n #target_shape_tensor = tf.stack(\n # [shape0, stride*height, stride*width, channels_size[0]])\n\n conv = tf.nn.conv2d_transpose(\n inputs,\n weights,\n #target_shape_tensor,\n [batch, stride*height, stride*width, channels_size[0]],\n [1, stride, stride, 1],\n padding='SAME')\n conv_bias = tf.nn.bias_add(conv, biases)\n output = act_func(conv_bias)\n #output.set_shape([batch, stride*height, stride*width, channels_size[0]])\n\n return output", "def conv2d(inputs, filters, kernel_size=3, strides=(1, 1)):\n return tf.layers.conv2d(\n inputs,\n filters,\n kernel_size,\n strides,\n padding=\"same\",\n activation=tf.nn.relu,\n bias_initializer=tf.initializers.constant(0.0),\n kernel_initializer=tf.keras.initializers.glorot_normal(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay))", "def block2(\n x,\n filters,\n kernel_size=3,\n stride=1,\n conv_shortcut=False,\n name='',\n norm_use=\"bn\"\n):\n preact = normalize_layer(x, norm_use=norm_use, name=name + '_preact_')\n preact = layers.Activation('relu', name=name + '_preact_relu')(preact)\n\n if conv_shortcut is True:\n shortcut = layers.Conv2D(\n 4 * filters,\n 1,\n strides=stride,\n kernel_initializer='he_normal',\n name=name + '_0_conv',\n )(preact)\n else:\n shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x\n\n x = layers.Conv2D(\n filters,\n 1,\n strides=1,\n use_bias=False,\n kernel_initializer='he_normal',\n name=name + '_1_conv',\n )(preact)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_1_')\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)\n x = layers.Conv2D(\n filters,\n kernel_size,\n strides=stride,\n kernel_initializer='he_normal',\n use_bias=False,\n name=name + '_2_conv'\n )(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_2_')\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D(\n 4 * filters,\n 1,\n name=name + '_3_conv',\n kernel_initializer='he_normal'\n )(x)\n x = layers.Add(name=name + '_out')([shortcut, x])\n return x", "def conv_forward(x, w):\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n N,C,H,W = x.shape\n F,C,HH,WW = w.shape\n H1 = H-HH+1\n W1 = W-WW+1\n out = np.zeros([N,F,H1,W1])\n wn = np.tile(w,(N,1,1,1,1))\n all_but_first = tuple(range(out.ndim))[1:]\n for f in range(F):\n for i in range(H1):\n for j in range(W1):\n out[:,f,i,j] = np.sum(x[:,:,i:i+HH,j:j+WW] * wn[:,f], axis=all_but_first)\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w)\n return out, cache", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n \"\"\"\n\tcompared to the 4a solution this just adds padding to the filter if its smaller than the image\n\tthis is done by using the second parameter in fft.fft2 \n\t\n\tfirst it applies fourier transforms on the kernel and the image\n\tthen it sets the image to be the pointwise multiplication of the transforms\n\n the image is inverse fourier transformed and filtered for real values\n the domain image is shifted and taken the absolute value of\n the fourier transform of the image and kernel are also shifted and set to be the absolute value\n\tlastly everything is displayed in the subplots\n \"\"\"\n conv_result = im \n \n if verbose:\n fftKernel=np.fft.fft2(kernel,im.shape)\n fftImage=np.fft.fft2(conv_result)\n\t\t\n\t\t\n\t\t\n conv_result=np.multiply(fftImage,fftKernel)\n fftImageTransformed=conv_result\n\t\t\n \n conv_result=np.fft.ifft2(conv_result)\n \n conv_result=np.real(conv_result)\n\n fftImageTransformed=np.fft.fftshift(fftImageTransformed)\n fftImage=np.fft.fftshift(fftImage)\n fftKernel=np.fft.fftshift(fftKernel)\n\n fftImageTransformed=np.absolute(fftImageTransformed)\n fftImage=np.absolute(fftImage)\n fftKernel=np.absolute(fftKernel)\n\t\t\n\t\t\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(20, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 5, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 5, 2)\n plt.imshow(fftImage, cmap=\"gray\")\n plt.subplot(1, 5, 3)\n plt.imshow(fftKernel, cmap=\"gray\")\n plt.subplot(1, 5, 4)\n plt.imshow(fftImageTransformed, cmap=\"gray\")\n plt.subplot(1, 5, 5)\n plt.imshow(conv_result, cmap=\"gray\")\n ### END YOUR CODE HERE ###\n return conv_result", "def pool_forward(A_prev, kernel_shape, stride=(1, 1), mode='max'):\n\n m = A_prev.shape[0]\n image_h = A_prev.shape[1]\n image_w = A_prev.shape[2]\n nc = A_prev.shape[3]\n filter_h = kernel_shape[0]\n filter_w = kernel_shape[1]\n s1 = stride[0]\n s2 = stride[1]\n n_dim1 = int((image_h - filter_h) / stride[0]) + 1\n n_dim2 = int((image_w - filter_w) / stride[1]) + 1\n pool = np.zeros((m, n_dim1, n_dim2, nc))\n new_images = A_prev.copy()\n\n for x in range(n_dim1):\n for y in range(n_dim2):\n mini_matrix = new_images[:, x * s1: x * s1 + filter_h,\n y * s2: y * s2 + filter_w, :]\n if mode == 'max':\n values = np.max(mini_matrix, axis=(1, 2))\n else:\n values = np.average(mini_matrix, axis=(1, 2))\n pool[:, x, y, :] = values\n return pool", "def schedule_conv2d_NCHWc(num_filter, kernel_size, stride, padding, outs):\n s = tvm.create_schedule([x.op for x in outs])\n\n def traverse(op):\n \"\"\"Traverse operators from computation graph\"\"\"\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_broadcast(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if tensor.op.input_tensors:\n traverse(tensor.op)\n\n if 'conv2d_NCHWc' in op.tag:\n conv_out = op.output(0)\n kernel = conv_out.op.input_tensors[1]\n data_vec = conv_out.op.input_tensors[0]\n data = data_vec.op.input_tensors[0] \\\n if isinstance(data_vec.op, tvm.tensor.ComputeOp) and \"pad\" not in data_vec.op.tag \\\n else data_vec\n if isinstance(data.op, tvm.tensor.ComputeOp) and \"pad\" in data.op.tag:\n data_pad = data\n data = data_pad.op.input_tensors[0]\n\n n, ic_chunk, h, w, ic_block = [x.value for x in data.shape]\n ic = ic_chunk * ic_block\n original_data = tvm.placeholder((n, ic, h, w), dtype=conv_out.dtype)\n\n kh, kw = kernel_size\n original_kernel = tvm.placeholder((num_filter, ic, kh, kw), dtype=conv_out.dtype)\n\n wkl = _get_workload(original_data, original_kernel, stride, padding, conv_out.dtype)\n sch = _get_schedule(wkl)\n _SCH_TO_SCH_FUNC[type(sch)](s, wkl, data_vec,\n kernel, conv_out, outs[0])\n\n traverse(outs[0].op)\n return s", "def pool(images, kernel_shape, stride, mode='max'):\n k_h, k_w = kernel_shape\n sh, sw = stride\n m, ih, iw, ic = images.shape\n nh = int(((ih - k_h) / sh) + 1)\n nw = int(((iw - k_w) / sw) + 1)\n conv = np.zeros((m, nh, nw, ic))\n for i in range(nh):\n for j in range(nw):\n imp = images[:, (i * sh):(i * sh) + k_h, (j * sw):(sw * j) + k_w]\n if mode == 'max':\n res = np.max(imp, axis=1)\n res = np.max(res, axis=1)\n if mode == 'avg':\n res = np.mean(imp, axis=1)\n res = np.mean(res, axis=1)\n conv[:, i, j] = res\n return conv", "def conv(f, kerneltype, kernelwidth, boundary='extend'):\n if kerneltype == 'box': kernel = Box1DKernel(kernelwidth)\n elif kerneltype == 'gaussian': kernel = Gaussian1DKernel(kernelwidth)\n fconv = convolve(f, kernel, boundary=boundary)\n return fconv", "def _conv_block(\n x: tf.Tensor, layers: int, filters: int, is_training: bool\n) -> tf.Tensor:\n for i in range(layers):\n x = tf.layers.Conv2D(filters, 3, padding=\"same\")(x)\n x = tf.layers.BatchNormalization(fused=True)(x, training=is_training)\n x = tf.nn.relu(x)\n return tf.layers.MaxPooling2D(2, 2, padding=\"valid\")(x)", "def PeriodicConv(out_chan, filter_shape,\n strides=None, padding='VALID', dimension_numbers=('NHWC', 'HWIO', 'NHWC'), W_init=None,\n b_init=normal(1e-6), ignore_b=False, dtype=jnp.float64):\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n one = (1,) * len(filter_shape)\n strides = strides or one\n W_init = W_init or glorot_normal(rhs_spec.index('I'), rhs_spec.index('O'))\n\n def init_fun(rng, input_shape):\n\n # add padding dimensions for periodic BC; move this line into conv_general_shape_tuple after defining padding='PERIODIC'\n\n\n add_input = list(np.array(filter_shape) - 1) # new\n input_shape += np.array([0]+add_input+[0]) # only works with stride=(1,1)\n\n filter_shape_iter = iter(filter_shape)\n kernel_shape = [out_chan if c == 'O' else\n input_shape[lhs_spec.index('C')] if c == 'I' else\n next(filter_shape_iter) for c in rhs_spec]\n\n output_shape = lax.conv_general_shape_tuple(\n input_shape, kernel_shape, strides, padding, dimension_numbers)\n\n\n k1, k2 = random.split(rng)\n\n if not ignore_b:\n bias_shape = [out_chan if c == 'C' else 1 for c in out_spec]\n bias_shape = tuple(itertools.dropwhile(lambda x: x == 1, bias_shape))\n\n W, b = W_init(k1, kernel_shape, dtype=dtype), b_init(k2, bias_shape, dtype=dtype)\n return tuple(output_shape), (W, b)\n else:\n W = W_init(k1, kernel_shape, dtype=dtype)\n return output_shape, (W, )\n\n def apply_fun(params, inputs, **kwargs):\n\n # move this line into lax.conv_general_dilated after defining padding='PERIODIC'\n inputs = periodic_padding(inputs.astype(dtype), filter_shape, strides)\n # print(inputs.shape)\n if not ignore_b:\n W, b = params\n return lax.conv_general_dilated(inputs, W, strides, padding, one, one,\n dimension_numbers) + b\n else:\n W = params\n return lax.conv_general_dilated(inputs, W, strides, padding, one, one,\n dimension_numbers)\n\n return init_fun, apply_fun", "def conv2d_block(input_tensor, n_filters, kernel_size = 3, batchnorm = True):\n # first layer\n x = layers.Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size), kernel_initializer = 'random_uniform', padding = 'same')(input_tensor)\n if batchnorm:\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n \n # second layer\n x = layers.Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size),\\\n kernel_initializer = 'he_normal', padding = 'same')(x)\n if batchnorm:\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n \n return x", "def conv2d_output_shape(height, width, filter_height, filter_width, out_channels, stride):\n return (out_channels, ((height - filter_height) / stride + 1), ((width - filter_width) / stride + 1))", "def conv_forward_naive(x, w, b, conv_param):\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n stride = conv_param['stride']\n pad = conv_param['pad']\n #Use np.pad for zero padding of the input.\n #Save shape of input data and filters.\n N,C,H,W = x.shape\n F,C,HH,WW = w.shape\n x = np.pad(x,[(0,0),(0,0),(1,1),(1,1)],mode = 'constant')\n #Convolve each filter to create the activation maps.\n '''Compute activation maps size.First dimension:number of training examples.\n Second dimension:depth is as the number of filters.\n Width and height will be computed based on the equation that we showed in the lectures.\n The equation :(W - F + 2P)/S + 1 where:\n -W:input size.\n -F:receptive field(number of filters).\n -P: padding size.\n -S: the stride that we use.\n '''\n out_width = int((W - WW + 2 * pad) / (stride) + 1)\n out_height = int((H - HH + 2 * pad) / (stride) + 1)\n out = np.zeros((N,F,out_height,out_width))\n #Compute the activation maps for each one of the N training examples.\n for t in range(N):\n curr_x = x[t,:,:,:]\n #Loop over each filter.\n for k in range(F):\n curr_filter = w[k,:,:,:]\n #Go over all valid spots in current training example.\n out_i = 0\n for i in range(0,x.shape[2] - HH + 1,stride):\n out_j = 0\n for j in range(0,x.shape[3] - WW + 1,stride):\n #Compute dot product in current spot.\n dot_product = np.sum(curr_filter * curr_x[:,i:(i + HH),j:(j + WW)])\n out[t,k,out_i,out_j] = dot_product \\\n + b[k]\n #Increment out_j\n out_j += 1\n out_i += 1\n\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w, b, conv_param)\n return out, cache", "def conv2d(layer_input, filters, f_size=3, padding='same', strides=2):\n d = Conv2D(filters, kernel_size=f_size, strides=strides, padding=padding)(layer_input)\n d = LeakyReLU(alpha=0.2)(d)\n d = BatchNormalization()(d)\n d = Conv2D(filters, kernel_size=f_size, strides=1, padding=padding)(d)\n d = LeakyReLU(alpha=0.2)(d)\n d = BatchNormalization()(d)\n return d", "def conv2d(layer_input, filters, f_size=3, padding='same', strides=2):\n d = Conv2D(filters, kernel_size=f_size, strides=strides, padding=padding)(layer_input)\n d = LeakyReLU(alpha=0.2)(d)\n d = BatchNormalization()(d)\n d = Conv2D(filters, kernel_size=f_size, strides=1, padding=padding)(d)\n d = LeakyReLU(alpha=0.2)(d)\n d = BatchNormalization()(d)\n return d", "def conv2d_fixed_padding(inputs,\n filters,\n kernel_size,\n strides,\n data_format='channels_first'):\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format=data_format)\n\n outputs = tf.layers.conv2d(\n inputs=inputs,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=('SAME' if strides == 1 else 'VALID'),\n use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(),\n data_format=data_format)\n\n return outputs", "def convolution_shape(input_shape, n_filters, filter_shape, stride, padding):\n img_height, img_width, _ = input_shape\n height = (img_height + 2 * padding[0] - filter_shape[0]) / float(stride) + 1\n width = (img_width + 2 * padding[1] - filter_shape[1]) / float(stride) + 1\n\n return int(height), int(width), n_filters", "def conv2d_bn(self,x,filters,num_row,num_col,padding='same',strides=(1, 1),name=None):\n if name is not None:\n bn_name = name + '_bn'\n conv_name = name + '_conv'\n else:\n bn_name = None\n conv_name = None\n bn_axis = 3\n x = layers.Conv2D(\n filters, (num_row, num_col),\n strides=strides,\n padding=padding,\n use_bias=False,\n name=conv_name)(\n x)\n x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)\n x = layers.Activation('relu', name=name)(x)\n return x", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n\t\n ### START YOUR CODE HERE ### (You can change anything inside this block) \n\t\n H,W = np.shape(im)\n h,w = np.shape(kernel)\n t_b = (H-h)//2\n l_r = (W-w)//2\n kernel_padded = np.pad(kernel, ((t_b, t_b+1),(l_r, l_r+1)), 'constant')\n kernel_padded = np.pad(kernel, ((0, 2*t_b),(0, 2*l_r)), 'constant')\n fft_kernel = np.fft.fft2(kernel_padded, s=None, axes=(-2, -1), norm=None)\n \n \n im_fft = np.fft.fft2(im, s=None, axes=(-2, -1), norm=None) \n im_filt = im_fft*fft_kernel \n conv_result = np.fft.ifft2(im_filt, s=None, axes=(-2, -1), norm=None).real \n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(12, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 2, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 2, 2) \n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result", "def test_positional_convolution_forward(ctx):\n # num_batch * channel * height * width input\n # i.e. (2, 2, 6, 6)\n in_data = \\\n mx.nd.array(\n [\n [[[1, 2, -1, 0, 1, 1],\n [3, 6, -5, 4, 2, -2],\n [9, 6, -1, 3, 1, 3],\n [4, 2, 5, 7, 3, 1],\n [0, 1, 1, 2, 2, 1],\n [3, 1, 2, 4, 3, 3]],\n\n [[3, 1, 2, 4, 3, 3],\n [0, 1, 1, 2, 2, 1],\n [4, 2, 5, 7, 3, 1],\n [9, 6, -1, 3, 1, 3],\n [3, 6, -5, 4, 2, -2],\n [1, 2, -1, 0, 1, 1]]],\n [[[1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 2, 1],\n [0, 0, 1, 1, 2, 2],\n [3, 3, 0, -1, -1, -2],\n [3, 1, 0, 3, 3, 2],\n [5, 6, 7, -1, -2, 0]],\n\n [[5, 6, 7, -1, -2, 0],\n [3, 1, 0, 3, 3, 2],\n [3, 3, 0, -1, -1, -2],\n [0, 0, 1, 1, 2, 2],\n [6, 5, 4, 3, 2, 1],\n [1, 2, 3, 4, 5, 6]]]\n ], ctx=ctx)\n\n # num_filter * channel * K * K weight\n # i.e. (2, 2, 3, 3)\n weight = \\\n mx.nd.array(\n [\n [[[1, 0, 1],\n [0, 2, -1],\n [2, 3, 1]],\n\n [[1, 1, 0],\n [2, -1, 2],\n [3, -2, 4]]],\n\n [[[0, 1, 2],\n [-1, 2, 3],\n [4, 1, -5]],\n\n [[3, 0, -1],\n [-1, 2, 1],\n [5, 6, 2]]]\n ], ctx=ctx)\n\n # num_batch * channel * out_height * out_width scale\n # i.e. (2, 2, 6, 6)\n scale = \\\n mx.nd.array(\n [\n [[[1, 1, 1, 1, 1, 1],\n [1, -1, 1, -1, 1, -1],\n [-1, 1, -1, 1, -1, 1],\n [-1, -1, -1, -1, -1, -1],\n [2, 1, 2, 2, 1, 1],\n [1, 2, 1, 2, 1, 2]],\n\n [[1, 1, 1, 1, 1, 1],\n [1, -1, -1, 1, 1, 1],\n [-1, 1, -1, 1, -1, 1],\n [1, -1, -1, -1, -1, 1],\n [2, -1, 2, -2, 1, 1],\n [1, 2, 1, 2, 1, 2]]],\n\n [[[6, 5, 4, 3, 2, 1],\n [1, 2, 3, 4, 5, 6],\n [1, -1, 2, -2, 3, -3],\n [4, -4, 5, -5, 6, -6],\n [1, 1, 1, 1, 1, 1],\n [-1, -1, -1, -1, -1, -1]],\n\n [[-1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, 1],\n [4, -4, 5, -5, 6, -6],\n [1, -1, 2, -2, 3, -3],\n [1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 2, 1]]],\n ], ctx=ctx)\n\n # num_filter bias\n # i.e. (2, )\n bias = \\\n mx.nd.array(\n [1, 2], ctx=ctx)\n\n in_data_var = mx.symbol.Variable(name=\"in_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n scale_var = mx.symbol.Variable(name=\"scale\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n\n op = mx.symbol.contrib.PositionalConvolution(name='test_positional_convolution',\n data=in_data_var,\n scale=scale_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=2,\n pad=(1, 1), kernel=(3, 3), stride=(1, 1))\n be = op.bind(ctx=ctx, args={'in_data': in_data,\n 'scale': scale,\n 'weight': weight,\n 'bias': bias})\n be.forward(True)\n out_o = be.outputs[0].asnumpy()\n print(out_o)", "def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):\n static_shape = inputs.get_shape()\n if not static_shape or len(static_shape) != 4:\n raise ValueError(\"Inputs to conv must have statically known rank 4. \"\n \"Shape: \" + str(static_shape))\n # Add support for left padding.\n if kwargs.get(\"padding\") == \"LEFT\":\n dilation_rate = (1, 1)\n if \"dilation_rate\" in kwargs:\n dilation_rate = kwargs[\"dilation_rate\"]\n assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1\n height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]\n cond_padding = tf.cond(\n tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),\n lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))\n width_padding = 0 if static_shape[2] == 1 else cond_padding\n padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]\n inputs = tf.pad(inputs, padding)\n # Set middle two dimensions to None to prevent convolution from complaining\n inputs.set_shape([static_shape[0], None, None, static_shape[3]])\n kwargs[\"padding\"] = \"VALID\"\n\n def conv2d_kernel(kernel_size_arg, name_suffix):\n \"\"\"Call conv2d but add suffix to name.\"\"\"\n name = \"{}_{}\".format(kwargs.get(\"name\", \"conv\"), name_suffix)\n original_name = kwargs.pop(\"name\", None)\n original_force2d = kwargs.pop(\"force2d\", None)\n result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)\n if original_name is not None:\n kwargs[\"name\"] = original_name # Restore for other calls.\n if original_force2d is not None:\n kwargs[\"force2d\"] = original_force2d\n return result\n\n return conv2d_kernel(kernel_size, \"single\")", "def conv_layer(self, bias, img, fltr):\n # filter shape: w, h, c, n\n fltr_n, fltr_w, fltr_h, fltr_c = fltr.shape\n # image shape: w, h, c\n img_w, img_h, img_c = img.shape\n\n output_dim = int((img_w - fltr_w) / self._stride) + 1\n output = np.zeros((output_dim, output_dim, fltr_n))\n\n for f in range(fltr_n):\n in_x = out_x = 0\n while in_x + fltr_w <= img_w:\n in_y = out_y = 0\n while in_y + fltr_h <= img_h:\n val = np.sum(fltr[f] * img[in_x:in_x + fltr_w, in_y:in_y + fltr_h, :]) + bias[f]\n normalized = val / (fltr_w * fltr_h)\n output[out_x, out_y, f] = normalized\n in_y += self._stride\n out_y += 1\n in_x += self._stride\n out_x += 1\n return output", "def conv(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=1, stride=stride, bias=False)", "def Conv2D(inputs, filters, kernel_size, data_format, strides=1):\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format)\n\n return tf.layers.Conv2D(filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=('same' if strides == 1 else 'valid'),\n use_bias=False,\n data_format=data_format)(inputs)", "def test_conv2d():\n img = np.array([\n [0.3, 0.5, 0.7, 0.9],\n [0.1, 0.3, 0.5, 0.7],\n [0.9, 0.7, 0.5, 0.3],\n ])\n template = np.array([\n [1, 0],\n [1, 0],\n ])\n template = np.flipud(np.fliplr(template))\n return fftconvolve(img, template, mode='valid')", "def conv_forward(x, w, b, conv_param):\n stride = conv_param['stride']\n pad = conv_param['pad']\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n H_out = 1 + (H + 2 * pad - HH) / stride\n W_out = 1 + (H + 2 * pad - WW) / stride\n H_out = int(H_out)\n W_out = int(W_out)\n\n out = np.zeros((N, F, H_out, W_out))\n for n in range(N):\n conv_in = np.pad(x[n], ((0, 0), (pad, pad), (pad, pad)), mode='constant')\n for f in range(F):\n conv_w = w[f]\n conv_b = b[f]\n for i in range(H_out):\n for j in range(W_out):\n conv_i = i * stride\n conv_j = j * stride\n conv_area = conv_in[:, conv_i : conv_i + HH, conv_j : conv_j + WW]\n out[n, f, i, j] = np.sum(conv_area * conv_w) + conv_b\n\n cache = (x, w, b, conv_param)\n return out, cache", "def conv1d(\n value=None,\n filters=None,\n stride=None,\n padding=None,\n use_cudnn_on_gpu=None,\n data_format=None,\n name=None,\n input=None, # pylint: disable=redefined-builtin\n dilations=None):\n value = deprecation.deprecated_argument_lookup(\"input\", input, \"value\", value)\n with ops.name_scope(name, \"conv1d\", [value, filters]) as name:\n # Reshape the input tensor to batch_shape + [1, in_width, in_channels]\n if data_format is None or data_format == \"NHWC\" or data_format == \"NWC\":\n data_format = \"NHWC\"\n spatial_start_dim = -3\n channel_index = 2\n elif data_format == \"NCHW\" or data_format == \"NCW\":\n data_format = \"NCHW\"\n spatial_start_dim = -2\n channel_index = 1\n else:\n raise ValueError(\"`data_format` must be 'NWC' or 'NCW'. \"\n f\"Received: data_format={data_format}\")\n strides = [1] + _get_sequence(stride, 1, channel_index, \"stride\")\n dilations = [1] + _get_sequence(dilations, 1, channel_index, \"dilations\")\n\n value = array_ops.expand_dims(value, spatial_start_dim)\n filters = array_ops.expand_dims(filters, 0)\n if value.shape.ndims in (4, 3, 2, 1, 0, None):\n result = gen_nn_ops.conv2d(\n value,\n filters,\n strides,\n padding,\n use_cudnn_on_gpu=use_cudnn_on_gpu,\n data_format=data_format,\n dilations=dilations,\n name=name)\n else:\n result = squeeze_batch_dims(\n value,\n functools.partial(\n gen_nn_ops.conv2d,\n filter=filters,\n strides=strides,\n padding=padding,\n use_cudnn_on_gpu=use_cudnn_on_gpu,\n data_format=data_format,\n dilations=dilations,\n ),\n inner_rank=3,\n name=name)\n return array_ops.squeeze(result, [spatial_start_dim])", "def conv_2D(img,kernel,stride=1):\n\n m,n = img.shape\n r,c = kernel.shape\n\n kernel = np.flip(kernel,axis=1)\n kernel = np.flip(kernel,axis=0)\n\n c_m, c_n = int(np.ceil((m-r+1)/stride)), int(np.ceil((n-c+1)/stride))\n img_conv = np.zeros((c_m,c_n),dtype=float)\n\n for i,j in it.product(range(c_m),range(c_n)):\n img_conv[i,j] = (img[i*stride:i*stride+r,j*stride:j*stride+c] * kernel).sum()\n\n return img_conv", "def _cconvolve(x, H, nfft, wlen, axis):\n \n # pad with wlen-1 zeros for overlap & FFT\n x = pad_along_axis(x, [0, wlen - 1], axis=axis)\n xf = np.fft.rfft(x, nfft, axis=axis)\n \n # take product with window in freq. domain\n product = multiply_along_axis(xf, H, axis=axis)\n\n # back transform to sample domain and return\n return np.fft.irfft(product, axis=axis).real", "def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda', gpu=True):\r\n\r\n assert isinstance(factor, int) and factor >= 1\r\n\r\n # Check weight shape.\r\n w = tf.convert_to_tensor(w)\r\n assert w.shape.rank == 4\r\n convH = w.shape[0]\r\n convW = w.shape[1]\r\n inC = Oncuda._shape(w, 2)\r\n outC = Oncuda._shape(w, 3)\r\n assert convW == convH\r\n\r\n # Setup filter kernel.\r\n if k is None:\r\n k = [1] * factor\r\n k = Oncuda._setup_kernel(k) * (gain * (factor ** 2))\r\n p = (k.shape[0] - factor) - (convW - 1)\r\n\r\n # Determine data dimensions.\r\n if data_format == 'NCHW':\r\n stride = [1, 1, factor, factor]\r\n output_shape = [Oncuda._shape(x, 0), outC, (Oncuda._shape(x, 2) - 1) * factor + convH, (Oncuda._shape(x, 3) - 1) * factor + convW]\r\n num_groups = Oncuda._shape(x, 1) // inC\r\n else:\r\n stride = [1, factor, factor, 1]\r\n output_shape = [Oncuda._shape(x, 0), (Oncuda._shape(x, 1) - 1) * factor + convH, (Oncuda._shape(x, 2) - 1) * factor + convW, outC]\r\n num_groups = Oncuda._shape(x, 3) // inC\r\n\r\n # Transpose weights.\r\n w = tf.reshape(w, [convH, convW, inC, num_groups, -1])\r\n w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2])\r\n w = tf.reshape(w, [convH, convW, -1, num_groups * inC])\r\n\r\n # Execute.\r\n x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format)\r\n return Oncuda._simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl, gpu=gpu)" ]
[ "0.77974457", "0.68532217", "0.6843606", "0.68252665", "0.6792097", "0.6787238", "0.6778164", "0.6764888", "0.67277294", "0.66646516", "0.6578862", "0.65779644", "0.6572897", "0.65086555", "0.64898103", "0.6478974", "0.6441421", "0.64312285", "0.6427597", "0.6360412", "0.6359944", "0.63500553", "0.6331426", "0.630478", "0.6289885", "0.62894833", "0.6286213", "0.6273556", "0.62409383", "0.6236706", "0.6230877", "0.6229002", "0.6225157", "0.62181956", "0.62084496", "0.62001705", "0.61952424", "0.6194187", "0.61835575", "0.6181558", "0.61783254", "0.61681765", "0.6156195", "0.6149066", "0.6135066", "0.6130706", "0.61296713", "0.6128902", "0.61209", "0.6110322", "0.61007255", "0.6090038", "0.6086874", "0.6086638", "0.60827523", "0.6069087", "0.60624033", "0.60613877", "0.60470545", "0.60369587", "0.60348415", "0.6034222", "0.6033962", "0.6021348", "0.60157007", "0.60073406", "0.6005627", "0.6005464", "0.60034186", "0.600247", "0.5999268", "0.5997919", "0.5997392", "0.5991928", "0.59919125", "0.5990031", "0.5989547", "0.5983604", "0.5980518", "0.5977329", "0.59729666", "0.59724385", "0.5967644", "0.59629565", "0.59629565", "0.59525186", "0.5948714", "0.5937881", "0.5935745", "0.5928359", "0.5921384", "0.5920463", "0.5918905", "0.59092075", "0.5904927", "0.58980036", "0.58978814", "0.5897708", "0.58953375", "0.58902043" ]
0.62824273
27
A naive implementation of the forward pass for a maxpooling layer.
def max_pool_forward(x, pool_param): out = None ########################################################################### # TODO: Implement the max-pooling forward pass # ########################################################################### N, C, H, W = x.shape pool_height = pool_param['pool_height'] pool_width = pool_param['pool_width'] stride = pool_param['stride'] H_prime = int(1 + (H - pool_height) / stride) W_prime = int(1 + (W - pool_width) / stride) #python 3 / is just float number division out = np.zeros((N,C,H_prime,W_prime)) for n in range(N): for i in range(H_prime): for j in range(W_prime): h_start = i * stride h_end = h_start + pool_height w_start = j * stride w_end = w_start + pool_width pool_window = x[n, :, h_start:h_end, w_start:w_end] pool_window = pool_window.reshape((C,-1)) out[n,:,i,j] = np.max(pool_window, axis=1) ########################################################################### # END OF YOUR CODE # ########################################################################### cache = (x, pool_param) return out, cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_pool_forward_naive(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max pooling forward pass #\n ###########################################################################\n N,C,H,W = x.shape\n pool_width = pool_param['pool_width']\n pool_height = pool_param['pool_height']\n stride = pool_param['stride']\n #Compute output size.\n out_width = int((W - pool_width) / stride + 1)\n out_height = int((H - pool_height) / stride + 1) \n out = np.zeros((N,C,out_height,out_width))\n #Naive implementation:Loop over each training example and max pool.(Naive===lots of FOR)\n for i in range(N):\n #Counters for output indices.\n a = 0;b = 0\n for t in range(0,H - pool_height + 1,stride):\n for k in range(0,W - pool_width + 1,stride):\n #Get max in each depth.\n for c in range(C):\n out[i,c,a,b] += np.max(x[i,c,t:(t + pool_height),k:(k + pool_width)])\n if (b == out_width - 1):\n a += 1\n b = 0\n else:\n b += 1\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache", "def max_pool_forward_naive(x, pool_param):\n out = None\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache", "def max_pool_forward_naive(x, pool_param):\n\tout = None\n\t\n\tN, C, H, W = x.shape\n\tHH = pool_param['pool_height']\n\tWW = pool_param['pool_width']\n\tstride = pool_param['stride']\n\tHp = int(1 + (H-HH)/stride)\n\tWp = int(1 + (W-WW)/stride)\n\n\tout = np.zeros((N,C,Hp,Wp))\n\n\tfor n in range(N):\n\t\tfor j in range(Hp):\n\t\t\tfor i in range(Wp):\n\t\t\t\tout[n,:,j,i] = np.amax(x[n,:,j*stride:j*stride+HH,i*stride:i*stride+WW], axis=(-1,-2))\n\n\tcache = (x, pool_param)\n\treturn out, cache", "def max_pool_forward_naive(x, pool_param):\n N, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n\n Hc = (H - pool_height) / stride + 1\n Wc = (W - pool_width) / stride + 1\n out = np.random.randn(N, C, Hc, Wc)\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n for i in xrange(N):\n for c in xrange(C):\n for hc in xrange(Hc):\n for wc in xrange(Wc):\n out[i, c, hc, wc] = np.max(x[i, c, hc:stride*hc+pool_height, wc:stride*wc+pool_width])\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def max_pool_forward(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n \n N, C, H, W = x.shape\n pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']\n stride = pool_param['stride']\n \n HH = 1 + (H - pool_height) // stride\n WW = 1 + (W - pool_width) // stride\n x_strides = x[0][0].strides\n strides = tuple(np.array(x_strides)*stride)\n \n out = np.zeros((N,C,HH,WW))\n \n for n in range(N):\n for c in range(C):\n out_shape = (HH,WW,pool_height,pool_width)\n pool_blocks = np.lib.stride_tricks.as_strided(x[n][c],out_shape,strides+x_strides)\n out[n][c] = np.max(pool_blocks, axis=(2,3))\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache", "def forward(self, x):\n\n x = F.max_pool2d(F.relu(self.batch_norm1(self.conv1(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm2(self.conv2(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm3_b(self.conv3_b(F.relu(self.batch_norm3_a(self.conv3_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm4_b(self.conv4_b(F.relu(self.batch_norm4_a(self.conv4_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm5_b(self.conv5_b(F.relu(self.batch_norm5_a(self.conv5_a(x)))))), 3, stride=2, padding=1)\n x = self.avg_pool(x).view(-1,512)\n out = self.linear(x)\n\n return out", "def forward(self, x):\n \n x = F.relu(self.conv1_bn(self.conv1(self.conv0_bn(x))))\n x = F.relu(self.conv2_bn(self.conv2(x)))\n x = F.relu(self.conv3_bn(self.conv3( self.maxpool2(x))))\n x = F.relu(self.conv4_bn(self.conv4( self.maxpool3(x))))\n x = self.maxpool4(x) \n x = x.view(-1, 1184)\n x = F.relu(self.fc1(x))\n x = self.dense1_bn(x)\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x)", "def max_pool_forward(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n (N, C, H, W) = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n HH = int(1 + (H - pool_height) / stride)\n WW = int(1 + (W - pool_width) / stride)\n\n out = np.zeros((N, C, HH, WW))\n\n for n in range(N):\n for h in range(HH):\n for w in range(WW):\n h1 = h * stride\n h2 = h1 + pool_height\n w1 = w * stride\n w2 = w1 + pool_width\n block = x[n, :, h1:h2, w1:w2]\n out[n,:,h,w] = np.max(block.reshape((C, pool_height*pool_width)), axis=1)\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache", "def conv_relu_pool_forward_naive(x, w, b, conv_param, pool_param):\n\ta, conv_cache = conv_forward_naive(x, w, b, conv_param)\n\ts, relu_cache = relu_forward(a)\n\tout, pool_cache = max_pool_forward_naive(s, pool_param)\n\tcache = (conv_cache, relu_cache, pool_cache)\n\treturn out, cache", "def forward(input, label, conv, maxpool, softmax):\n output = conv.forward((input / 255) - 0.5)\n output = maxpool.forward(output)\n output = softmax.forward(output)\n\n loss = -np.log(output[label])\n acc = 1 if np.argmax(output) == label else 0\n\n return output, loss, acc", "def _pool_step(\n X,\n pool_size, #TODO(mmd): Better name\n pooler = tf.nn.max_pool,\n):\n # TODO(mmd): Why all the expansion squeezing necessary?\n x = tf.expand_dims(x, 3) # num_samples x num_features x num_filters_in x 1\n x = pooler(x, ksize=[1,pool_size,1,1], strides=[1,pool_size,1,1], padding='SAME')\n #tf.maximum\n return tf.squeeze(x, [3]) # num_samples x num_features / p x num_filters", "def forward(self, x):\n x = self.conv1(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x)\n x = self.conv2(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x) \n x = self.maxpool(x) \n return x", "def forward(self, inputData):\n weights = self.Weights\n biases = self.Biases\n poolParams = self.poolParams\n cache = [] #zmienna przechowujaca produkty warstw - pomocna do propagacji wstecznej\n #warstwa wejsciowa\n layer0 = np.asarray(inputData)\n cache.append(layer0)\n #pierwsza warstwa konwolucyjna\n layer1 = convolution_forward(np.asarray([layer0]),weights[0],biases[0])\n cache.append(layer1)\n #pierwsza warstwa max poolingu\n layer2 = maxpool_forward(layer1, poolParams[0][0], poolParams[0][1])\n cache.append(layer2)\n #druga warstwa konwolucyjna\n layer3 = convolution_forward(layer2,weights[1],biases[1])\n cache.append(layer3)\n #druga warstwa max poolingu\n layer4 = maxpool_forward(layer3, poolParams[1][0], poolParams[1][1])\n cache.append(layer4)\n #pierwsza warstwa fully connected zrealizowana jako warstwa konwolucyjna\n layer5 = convolution_forward( layer4,weights[2] ,biases[2] )\n cache.append(layer5)\n #druga warstwa fully connected z funkcja aktywacji typu ReLU\n layer6 = act.relu(np.dot(weights[3],layer5[:,0]).transpose() + biases[3]).transpose()\n cache.append(layer6)\n #softmax\n layer7 = np.dot( weights[4], layer6[:,0] ).transpose() + biases[4]\n layer7 -= np.max(layer7)\n layer7 = np.exp(layer7)/sum(np.exp(layer7))\n\n return (layer7, cache)", "def forward(self, inp: torch.Tensor) -> torch.Tensor:\n x = self.conv1(inp)\n x = self.maxpool(x)\n\n for i in range(self._num_layers):\n x = getattr(self, \"C%d\" % (i + 1))(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x", "def forward(self, x):\n x = self.pool(x)\n x = self.conv(x)\n x = x.reshape(x.shape[0], -1)\n x = self.relu(self.fc1(x))\n x = self.dropout1(x)\n x = self.fc2(x)\n x = self.dropout2(x)\n x = self.fc3(x)\n x = self.dropout3(x)\n x = self.fc4(x)\n\n return x", "def forward(self, *inputs):\n\n x = self.relu1(self.maxpool1(self.conv1(*inputs)))\n x = self.relu2(self.maxpool2(self.conv2_drop(self.conv2(x))))\n x = x.view(x.size(0), -1)\n x = self.relu3(self.fc1(x))\n x = self.dropout2(x)\n x = self.fc2(x)\n return self.log_softmax(x)", "def forward(self, input):\n x = self.emb(input)\n x = F.max_pool2d(x, kernel_size=x.shape[2:])\n x = x.view(x.shape[0:2])\n x = F.log_softmax(self.fc_final(x), dim=-1)\n return x", "def conv_relu_pool_forward(x, w, b, conv_param, pool_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n s, relu_cache = relu_forward(a)\n out, pool_cache = max_pool_forward_fast(s, pool_param)\n cache = (conv_cache, relu_cache, pool_cache)\n return out, cache", "def conv_relu_pool_forward(x, w, b, conv_param, pool_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n s, relu_cache = relu_forward(a)\n out, pool_cache = max_pool_forward_fast(s, pool_param)\n cache = (conv_cache, relu_cache, pool_cache)\n return out, cache", "def forward(self, x, pool_size=(2, 2), pool_type=\"avg\"):\n\n x = F.relu_(self.norm1(self.conv1(x)))\n x = F.relu_(self.norm2(self.conv2(x)))\n if pool_type == \"max\":\n x = F.max_pool2d(x, kernel_size=pool_size)\n elif pool_type == \"avg\":\n x = F.avg_pool2d(x, kernel_size=pool_size)\n elif pool_type == \"avg+max\":\n x1 = F.avg_pool2d(x, kernel_size=pool_size)\n x2 = F.max_pool2d(x, kernel_size=pool_size)\n x = x1 + x2\n else:\n raise Exception(\"Incorrect pooling type!\")\n return x", "def forward(self, x):\n x = x.permute(0, 2, 1)\n if self.return_indices:\n x, indices = F.max_pool1d(x, self.kernel_size, return_indices=self.return_indices)\n else:\n x = F.max_pool1d(x)\n x = x.permute(0, 2, 1)\n\n if self.return_indices:\n output = x, indices\n else:\n output = x\n return output", "def forward(self, x): \n # Layer 1\n x = F.elu(self.conv1(x)) # bsize x l1_channels x 1 x Nsamples\n x = self.batchnorm1(x)\n x = F.dropout(x, 0.25)\n x = x.permute(0, 2, 1, 3) # bsize x 1 x l1_channels x Nsamples\n\n # Layer 2\n x = self.padding1(x)\n x = F.elu(self.conv2(x)) # bsize x l2_channels x l1_channels x Nsamples\n x = self.batchnorm2(x) \n x = F.dropout(x, 0.25)\n x = self.pooling2(x) # bsize x l2_channels x floor(l1_channels/2) x floor(Nsamples/4)\n\n # Layer 3\n x = self.padding2(x)\n x = F.elu(self.conv3(x)) # bsize x l3_channels x floor(l1_channels/2) x floor(Nsamples/4)\n x = self.batchnorm3(x)\n x = F.dropout(x, 0.25)\n x = self.pooling3(x) # bsize x l3_channels x floor(l1_channels/4) x floor(Nsamples/16)\n\n # Fully-connected Layer\n x = x.view(-1, self.fc1.in_features) # bsize x (l3_channels*floor(l1_channels/4)*floor(Nsamples/16))\n x = F.sigmoid(self.fc1(x)) # bisze x self.fc1.out_features \n \n if self.fc1.out_features == 1:\n x = x.view(-1) # bsize (1D if 1 output unit)\n \n return x", "def forward(self, *inputs):\n\n x = functional.relu(functional.max_pool2d(self.conv1(*inputs), 2))\n x = functional.relu(functional.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = functional.relu(functional.max_pool2d(self.conv3(x), 2))\n x = x.view(x.size(0), -1)\n x = functional.relu(self.fc1(x))\n x = functional.dropout(x, training=self.training)\n x = self.fc2(x)\n return functional.log_softmax(x, dim=1)", "def max_pool_backward_naive(dout, cache):\n dx = None\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx", "def forward(self, x):\n if x.size()[0] != 1 or x.size()[1] != 200 or x.size()[2] != 96:\n return torch.zeros(1,1)\n x = x.view(1,1,x.size()[1],x.size()[2]) #1,1,200,96\n x = nn.MaxPool2d(2)(self.conv1(x))\n x = self.dropout(F.relu(x)) #1,3,96,46\n x = nn.MaxPool2d(2)(self.conv2(x))\n x = self.dropout(F.relu(x)) #1,6,47,21\n x = nn.MaxPool2d(2)(self.conv3(x))\n x = self.dropout(F.relu(x)) #1,12,21,8\n x = nn.MaxPool2d(2)(self.conv4(x))#1,24,8,2\n x = x.view(1,-1)#1,384\n x = self.fc1(F.relu(x))\n x = self.fc2(F.relu(x))\n x = self.fc3(F.relu(x))\n return F.sigmoid(x)", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)):\n if l == 0:\n z = self.layers[l].forward(x)\n else:\n z = self.layers[l].forward(a)\n a = self.activations[l].forward(z)\n\n # output from softmax layer\n out = a\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def train(input, label, conv, maxpool, softmax, lr=0.005):\n # Forward\n output, loss, accuracy = forward(input, label, conv, maxpool, softmax)\n\n gradient = np.zeros(10)\n gradient[label] = -1 / output[label]\n\n # Backprop\n gradient = softmax.backprop(gradient, lr)\n gradient = maxpool.backprop(gradient)\n gradient = conv.backprop(gradient, lr)\n\n return loss, accuracy", "def max_pool_backward_naive(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max pooling backward pass #\n ###########################################################################\n #Extract info from cache.\n x,pool_param = cache\n pool_width = pool_param['pool_width']\n pool_height = pool_param['pool_height']\n stride = pool_param['stride']\n N,C,H,W = x.shape\n\n #Start computing dx,same as forward pass loop with the correct stride over x.\n dx = np.zeros_like(x)\n for i in range(N):\n a = 0;b = 0\n for t in range(0,H - pool_height + 1,stride):\n for k in range(0,W - pool_width + 1,stride):\n #Go over all of the channels.\n for c in range(C):\n #Find max.\n max_index = np.argmax(x[i,c,t:(t + pool_height),k:(k + pool_width)])\n #Conver flat index.\n index = np.unravel_index(max_index,(pool_height,pool_width))\n dx[i,c,t + index[0],k + index[1]] += dout[i,c,a,b]\n if (b == dout.shape[3] - 1):\n a += 1\n b = 0\n else:\n b += 1\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def forward(self, input):\n x = self.emb(input)\n x = F.avg_pool2d(x, kernel_size=x.shape[2:])\n x = x.view(x.shape[0:2])\n output = F.log_softmax(self.fc_final(x), dim=-1)\n return output", "def forward(self, inputs):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_5(inputs=down4,\n layer_size=3)\n down6, indices_6, unpool_shape6 = self.layer_6(inputs=down5,\n layer_size=3)\n up5 = self.layer_7(inputs=down6, indices=indices_6,\n output_shape=unpool_shape6, layer_size=3)\n up4 = self.layer_8(inputs=up5, indices=indices_5,\n output_shape=unpool_shape5, layer_size=3)\n up3 = self.layer_9(inputs=up4, indices=indices_4,\n output_shape=unpool_shape4, layer_size=3)\n up2 = self.layer_10(inputs=up3, indices=indices_3,\n output_shape=unpool_shape3, layer_size=3)\n up1 = self.layer_11(inputs=up2, indices=indices_2,\n output_shape=unpool_shape2, layer_size=2)\n output = self.layer_12(inputs=up1, indices=indices_1,\n output_shape=unpool_shape1, layer_size=2)\n\n return output", "def forward(self, x):\n # Convolutional Layers\n ## add pooling layers\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = x.view(-1, 256) # flatten to pass to fully connected layers\n\n # fully connected layers\n ## and dropout layers\n x = F.relu(self.dropout(self.fc1(x)))\n x = F.relu(self.dropout(self.fc2(x)))\n x = self.fc3(x)\n\n return x", "def add_pooling_layer(self, input_layer):\n return self._max_pool(input_layer)", "def forward(self, inputs):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_5(inputs=down4,\n layer_size=3)\n\n up5 = self.layer_6(inputs=down5, indices=indices_5,\n output_shape=unpool_shape5, layer_size=3)\n up4 = self.layer_7(inputs=up5, indices=indices_4,\n output_shape=unpool_shape4, layer_size=3)\n up3 = self.layer_8(inputs=up4, indices=indices_3,\n output_shape=unpool_shape3, layer_size=3)\n up2 = self.layer_9(inputs=up3, indices=indices_2,\n output_shape=unpool_shape2, layer_size=2)\n output = self.layer_10(inputs=up2, indices=indices_1,\n output_shape=unpool_shape1, layer_size=2)\n\n return output", "def max_pool_backward_naive(dout, cache):\n x, pool_params = cache\n N, C, H, W = x.shape\n\n pool_height = pool_params['pool_height']\n pool_width = pool_params['pool_width']\n stride = pool_params['stride']\n\n Hc = (H - pool_height) / stride + 1\n Wc = (W - pool_width) / stride + 1\n\n dx = np.zeros(x.shape)\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n for i in xrange(N):\n for c in xrange(C):\n for hc in xrange(Hc):\n for wc in xrange(Wc):\n subx = x[i, c, hc:stride*hc+pool_height, wc:stride*wc+pool_width]\n subdx = dx[i, c, hc:stride*hc+pool_height, wc:stride*wc+pool_width]\n max_value = np.max(subx)\n \n subdx += (subx == max_value) * dout[i, c, hc, wc]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx", "def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.maxpool(out)\n out = self.avgpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out", "def __call__(self, x):\n \n # Define the \"forward pass\" for this model based on the architecture detailed above.\n\n x = relu(self.conv1(x)[...,:-1,:])\n x = batchnorm(x, eps=1e-7)\n x = max_pool(x, (2, 2), 2)\n\n x = relu(self.conv2(x))\n x = batchnorm(x, eps=1e-7)\n x = max_pool(x, (2, 2), 2)\n\n x = relu(self.conv3(x)[...,:,:-1])\n x = batchnorm(x, eps=1e-7)\n x = max_pool(x, (2, 2), 2)\n\n x = relu(self.conv4(x)[...,:,:-1])\n x = batchnorm(x, eps=1e-7)\n x = max_pool(x, (2, 2), 2)\n\n x = relu(self.dense1(x.reshape(x.shape[0], -1)))\n return self.dense2(x)", "def mpool1( x, p):\n if p > 1:\n x = tf.expand_dims(x, 3) # N x M x F x 1\n x = tf.nn.max_pool(x, ksize=[1, p, 1, 1], strides=[1, p, 1, 1], padding='SAME')\n # tf.maximum\n return tf.squeeze(x, [3]) # N x M/p x F\n else:\n return x", "def max_pool_backward(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max-pooling backward pass #\n ###########################################################################\n x, pool_param = cache\n N, C, H, W = x.shape\n pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']\n stride = pool_param['stride']\n \n HH = 1 + (H - pool_height) // stride\n WW = 1 + (W - pool_width) // stride\n x_strides = x[0][0].strides\n strides = tuple(np.array(x_strides)*stride)\n \n dx = np.zeros(x.shape)\n \n for n in range(N):\n for c in range(C):\n for h in range(HH):\n for w in range(WW):\n h_start = stride * h\n h_end = h_start + pool_height\n\n w_start = stride * w\n w_end = w_start + pool_width\n\n # get the pool window in the input x\n pool_window = x[n, c, h_start:h_end, w_start:w_end]\n \n m = np.max(pool_window)\n dx_window = np.where(pool_window == m, 1, 0)\n \n dx[n, c, h_start:h_end, w_start:w_end] += dx_window * dout[n, c, h, w]\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def max_pool(x,\n k_h,\n k_w,\n s_h,\n s_w,\n name,\n padding=\"VALID\"):\n with tf.name_scope(name):\n outputs = tf.nn.max_pool(x, [1, k_h, k_w, 1], [1, s_h, s_w, 1], padding)\n # Return layer's output\n return outputs", "def forward(self, inputs):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_6(inputs=down4,\n layer_size=3)\n\n inter = self.layer_inter(down5)\n\n up1 = self.layer_7(inputs=inter, indices=indices_5, layer_size=3)\n\n up2 = self.layer_8(inputs=up1, indices=indices_4, layer_size=3)\n\n up3 = self.layer_9(inputs=up2, indices=indices_3, layer_size=3)\n\n up4 = self.layer_10(inputs=up3, indices=indices_2, layer_size=2)\n\n up5 = self.layer_11(inputs=up4, indices=indices_1, layer_size=2)\n return up5", "def forward(self, out):\n\n # 0th layer.\n index = 0\n out = self.pad_out(out, index)\n out = self.conv0(out)\n out = self.bn0(out)\n out = self.relu(out)\n\n # 1st layer.\n index = 1\n out = self.pad_out(out, index)\n out = self.conv1(out)\n out = self.bn1(out)\n out = self.relu(out)\n\n # 2nd layer.\n index = 2\n out = self.pad_out(out, index)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n # Classification.\n # Average across the channels.\n # https://discuss.pytorch.org/t/global-average-pooling-in-pytorch/6721/4\n # In Keras it is implemented as: K.mean(inputs, axis=1). The channel is\n # the last dimension in Keras.\n out = torch.mean(out, dim=2)\n out = self.lin(out)\n\n # To imitate the cross entropy loss with the nll (negative log\n # likelihood) loss.\n out = log_softmax(out, dim=-1)\n\n return out", "def maxpool(input, filter_h, filter_w, stride_h, stride_w, padding, name):\n with tf.name_scope(name):\n mp = tf.nn.max_pool(input, ksize=[1, filter_h, filter_w, 1], strides=[1, stride_h, stride_w, 1],\n padding=padding)\n # print(name + \" : \", str(mp.shape))\n return mp", "def feed_forward_net(net_def, inputs):\n\n inp = inputs.copy()\n for n in range(0, len(net_def['layers'])):\n\n outputs = process_layer(net_def['layers'][n], inp)\n\n inp = outputs.copy()\n\n # Index of largest value\n return np.argmax(inp)", "def max_pool_backward(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max-pooling backward pass #\n ###########################################################################\n x, pool_param = cache\n \n N, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n H_prime = int(1 + (H - pool_height) / stride)\n W_prime = int(1 + (W - pool_width) / stride) #python 3 / is just float number division\n \n dx = np.zeros((N, C, H, W))\n \n for n in range(N):\n for c in range(C):\n for i in range(H_prime):\n for j in range(W_prime):\n h_start = i * stride\n h_end = h_start + pool_height\n w_start = j * stride\n w_end = w_start + pool_width\n pool_window = x[n, c, h_start:h_end, w_start:w_end]\n maxValue = np.max(pool_window)\n dx[n,c,h_start:h_end,w_start:w_end] += dout[n,c,i,j] * (pool_window == maxValue)\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def forward_pass(self):\n # Have to use one_hot labels since sparse softmax doesn't allow\n # second derivatives.\n one_hot_train_labels = tf.one_hot(self.data.train_labels, self.way)\n train_embeddings_ = self.embedding_fn(\n self.data.train_images,\n depth_multiplier=self.depth_multiplier,\n reuse=tf.AUTO_REUSE)\n train_embeddings = train_embeddings_['embeddings']\n embedding_vars_dict = train_embeddings_['params']\n\n with tf.variable_scope('linear_classifier', reuse=tf.AUTO_REUSE):\n embedding_depth = train_embeddings.shape.as_list()[-1]\n fc_weights = weight_variable([embedding_depth, MAX_WAY])\n fc_bias = bias_variable([MAX_WAY])\n\n embedding_vars_keys = []\n embedding_vars = []\n embedding_vars_copy_ops = []\n for name, var in embedding_vars_dict.iteritems():\n embedding_vars_keys.append(name)\n if not self.is_training:\n with tf.variable_scope('weight_copy'):\n shape = var.shape.as_list()\n var_copy = tf.Variable(\n tf.zeros(shape), collections=[tf.GraphKeys.LOCAL_VARIABLES])\n var_copy_op = tf.assign(var_copy, var)\n embedding_vars_copy_ops.append(var_copy_op)\n embedding_vars.append(var_copy)\n else:\n embedding_vars.append(var)\n\n fc_vars_copy_ops = []\n if not self.is_training:\n with tf.variable_scope('weight_copy'):\n # fc_weights copy\n fc_weights_copy = tf.Variable(\n tf.zeros(fc_weights.shape.as_list()),\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n fc_weights_copy_op = tf.assign(fc_weights_copy, fc_weights)\n fc_vars_copy_ops.append(fc_weights_copy_op)\n\n # fc_bias copy\n fc_bias_copy = tf.Variable(\n tf.zeros(fc_bias.shape.as_list()),\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n fc_bias_copy_op = tf.assign(fc_bias_copy, fc_bias)\n fc_vars_copy_ops.append(fc_bias_copy_op)\n\n fc_weights = fc_weights_copy\n fc_bias = fc_bias_copy\n\n fc_vars = [fc_weights, fc_bias]\n num_embedding_vars = len(embedding_vars)\n num_fc_vars = len(fc_vars)\n\n def _cond(step, *args):\n del args\n num_steps = self.num_update_steps\n if not self.is_training:\n num_steps += self.additional_test_update_steps\n return step < num_steps\n\n def _body(step, *args):\n \"\"\"The inner update loop body.\"\"\"\n updated_embedding_vars = args[0:num_embedding_vars]\n updated_fc_vars = args[num_embedding_vars:num_embedding_vars +\n num_fc_vars]\n train_embeddings = self.embedding_fn(\n self.data.train_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, updated_embedding_vars)),\n depth_multiplier=self.depth_multiplier,\n reuse=True)['embeddings']\n\n if self.proto_maml_fc_layer_on_support_set:\n # Set fc layer weights with prototypical equivalent values.\n prototypes = self.proto_maml_prototypes(train_embeddings)\n pmaml_fc_weights = self.proto_maml_fc_weights(\n prototypes, zero_pad_to_max_way=True)\n pmaml_fc_bias = self.proto_maml_fc_bias(\n prototypes, zero_pad_to_max_way=True)\n train_logits = tf.matmul(train_embeddings,\n pmaml_fc_weights) + pmaml_fc_bias\n else:\n updated_fc_weights, updated_fc_bias = updated_fc_vars\n train_logits = tf.matmul(train_embeddings,\n updated_fc_weights) + updated_fc_bias\n\n train_logits = train_logits[:, 0:self.way]\n loss = tf.losses.softmax_cross_entropy(one_hot_train_labels, train_logits)\n\n if self.debug_log:\n print_op = tf.print(['step: ', step, updated_fc_bias[0], 'loss:', loss])\n else:\n print_op = tf.no_op()\n\n embedding_grads = tf.gradients(loss, updated_embedding_vars)\n # Only computes fc grad when it's not created from prototypes.\n if not self.proto_maml_fc_layer_on_support_set:\n fc_grads = tf.gradients(loss, updated_fc_vars)\n\n if self.first_order:\n\n def _stop_grads(grads):\n return [tf.stop_gradient(dv) for dv in grads]\n\n embedding_grads = _stop_grads(embedding_grads)\n if not self.proto_maml_fc_layer_on_support_set:\n fc_grads = _stop_grads(fc_grads)\n\n # Apply gradients\n def _apply_grads(variables, grads):\n \"\"\"Applies gradients using SGD on a list of variables.\"\"\"\n v_new = []\n for (v, dv) in zip(variables, grads):\n if (not self.train_batch_norm and\n ('offset' in v.name or 'scale' in v.name)):\n v_new.append(v)\n else:\n v_new.append(v - self.alpha * dv)\n return v_new\n\n with tf.control_dependencies([print_op]):\n updated_embedding_vars = _apply_grads(updated_embedding_vars,\n embedding_grads)\n # Only apply fc grad when it's not created from prototypes.\n if not self.proto_maml_fc_layer_on_support_set:\n updated_fc_vars = _apply_grads(updated_fc_vars, fc_grads)\n step = step + 1\n return tuple([step] + list(updated_embedding_vars) +\n list(updated_fc_vars))\n\n # MAML meta updates using query set examples from an episode.\n if self.zero_fc_layer:\n # To account for variable class sizes, we initialize the output\n # weights to zero. See if truncated normal initialization will help.\n zero_weights_op = tf.assign(fc_weights, tf.zeros_like(fc_weights))\n zero_bias_op = tf.assign(fc_bias, tf.zeros_like(fc_bias))\n fc_vars_init_ops = [zero_weights_op, zero_bias_op]\n else:\n fc_vars_init_ops = fc_vars_copy_ops\n\n if self.proto_maml_fc_layer_init:\n train_embeddings = self.embedding_fn(\n self.data.train_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, embedding_vars)),\n depth_multiplier=self.depth_multiplier,\n reuse=True)['embeddings']\n prototypes = self.proto_maml_prototypes(train_embeddings)\n pmaml_fc_weights = self.proto_maml_fc_weights(\n prototypes, zero_pad_to_max_way=True)\n pmaml_fc_bias = self.proto_maml_fc_bias(\n prototypes, zero_pad_to_max_way=True)\n fc_vars = [pmaml_fc_weights, pmaml_fc_bias]\n\n with tf.control_dependencies(fc_vars_init_ops + embedding_vars_copy_ops):\n # We will first compute gradients using the initial weights\n # Don't want to restore it during eval.\n step = tf.Variable(\n 0,\n trainable=False,\n name='inner_step_counter',\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n loop_vars = [step] + embedding_vars + fc_vars\n step_and_all_updated_vars = tf.while_loop(\n _cond, _body, loop_vars, swap_memory=True)\n step = step_and_all_updated_vars[0]\n all_updated_vars = step_and_all_updated_vars[1:]\n updated_embedding_vars = all_updated_vars[0:num_embedding_vars]\n updated_fc_weights, updated_fc_bias = all_updated_vars[\n num_embedding_vars:num_embedding_vars + num_fc_vars]\n\n # Forward pass the training images with the updated weights in order to\n # compute the means and variances, to use for the query's batch norm.\n support_set_moments = None\n if not self.transductive_batch_norm:\n support_set_moments = self.embedding_fn(\n self.data.train_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, updated_embedding_vars)),\n depth_multiplier=self.depth_multiplier,\n reuse=True)['moments']\n\n test_embeddings = self.embedding_fn(\n self.data.test_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, updated_embedding_vars)),\n moments=support_set_moments, # Use support set stats for batch norm.\n depth_multiplier=self.depth_multiplier,\n reuse=True,\n backprop_through_moments=self.backprop_through_moments)['embeddings']\n\n if not self.proto_maml_fc_layer_on_query_set:\n self.test_logits = (tf.matmul(test_embeddings, updated_fc_weights) +\n updated_fc_bias)[:, 0:self.way]\n else:\n train_embeddings = self.embedding_fn(\n self.data.train_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, updated_embedding_vars)),\n depth_multiplier=self.depth_multiplier,\n reuse=True)['embeddings']\n prototypes = self.proto_maml_prototypes(train_embeddings)\n pmaml_fc_weights = self.proto_maml_fc_weights(prototypes)\n pmaml_fc_bias = self.proto_maml_fc_bias(prototypes)\n self.test_logits = (\n tf.matmul(test_embeddings, pmaml_fc_weights) + pmaml_fc_bias)", "def max_pool_backward_naive(dout, cache):\n\tdx = None\n\n\tx, pool_param = cache\n\tN,C,H,W = x.shape\n\tHH = pool_param['pool_height']\n\tWW = pool_param['pool_width']\n\tstride = pool_param['stride']\n\tHp = int(1 + (H-HH)/stride)\n\tWp = int(1 + (W-WW)/stride)\n\n\tdx = np.zeros_like(x)\n\n\tfor n in range(N):\n\t\tfor c in range(C):\n\t\t\tfor j in range(Hp):\n\t\t\t\tfor i in range(Wp):\n\t\t\t\t\tind = np.argmax(x[n,c,j*stride:j*stride+HH,i*stride:i*stride+WW])\n\t\t\t\t\tind1, ind2 = np.unravel_index(ind, (HH,WW))\n\t\t\t\t\tdx[n,c,j*stride:j*stride+HH,i*stride:i*stride+WW][ind1, ind2] = dout[n,c,j,i]\n\n\treturn dx", "def loss(self, X, y=None):\n W1 = self.params['W1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n X, cache_conv = conv_forward(X, W1)\n X, x_relu1 = relu_forward(X)\n X, cache_maxpool = max_pool_forward(X, pool_param)\n N1,C1,H1,W1 = X.shape\n X = X.reshape(N1, C1 * H1 * W1)\n X, cache_fc2 = fc_forward(X, W2, b2)\n X, x_relu2 = relu_forward(X)\n X, cache_fc3 = fc_forward(X, W3, b3)\n scores = X\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. #\n ############################################################################\n loss, dx = softmax_loss(X, y)\n dx, dw, db = fc_backward(dx, cache_fc3)\n grads['W3'] = dw\n grads['b3'] = db\n dx = relu_backward(dx, x_relu2)\n dx, dw, db = fc_backward(dx, cache_fc2)\n grads['W2'] = dw\n grads['b2'] = db\n xx, Ind, pp = cache_maxpool\n N2,C2,H2,W2 = xx.shape\n H2 = int(H2/2)\n W2 = int(W2/2)\n dx = dx.reshape(N2,C2,H2,W2)\n dx = max_pool_backward(dx, cache_maxpool)\n dx = relu_backward(dx, x_relu1)\n dx, dw = conv_backward(dx, cache_conv)\n grads['W1'] = dw\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def forward(self,x):\n x = x.transpose(1,2).contiguous()\n x = F.leaky_relu(self.fc1(x), 0.2)\n x = F.leaky_relu(self.bn2(self.fc2(x)), 0.2)\n x = F.leaky_relu(self.bn3(self.fc3(x)), 0.2)\n x = torch.sigmoid(self.fc4(x))\n return x.transpose(1,2)", "def forward(self, input):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n assert input.shape[1] == self.n_neurons, \"The shape of the input tensor is not correct.\"\n\n bn_fct = CustomBatchNormManualFunction()\n out = bn_fct.apply(input, self.gamma, self.beta, self.eps)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward(self, x):\n conv_output = self.conv1(x)\n\n # The window size of max pooling layer of CNN depends on the dimension of conv1d output.\n # Since padding size is 1 and kernal size is 5, so the output of conv1d is with dimension\n # length_of_input_sequence - 2 + 5 - 1 = length_of_input_sequence - 2\n x_conv = F.max_pool1d(F.relu(conv_output), x.size()[-1] - 2)\n return x_conv", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_relu_pool_forward(x, w, b, conv_param, pool_param): return out, cache;\n out, cache['layer1'] = layer_utils.conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) \n # def affine_relu_forward(x, w, b): return out, cache;\n out, cache['layer2'] = layer_utils.affine_relu_forward(out, W2, b2)\n # def affine_forward(x, w, b): return out, cache;\n scores, cache['layer3'] = layers.affine_forward(out, W3, b3)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW3, db3 = layers.affine_backward(dscores, cache['layer3']) \n # def affine_relu_backward(dout, cache): return dx, dw, db;\n dout, dW2, db2 = layer_utils.affine_relu_backward(dout, cache['layer2'])\n # def conv_relu_pool_backward(dout, cache): return dx, dw, db;\n dout, dW1, db1 = layer_utils.conv_relu_pool_backward(dout, cache['layer1'])\n\n # reg\n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def forward(self, x):\n\n # 2.2 BUG: Did Bob do anything wrong in the forward method?\n # HINT: Usually a CNN would expect correctly normalized data.\n # Roughly make input to be within -1 to 1 range\n x = (x - 127.5) / 127.5\n\n # Apply conv layers\n x = self.convs(x)\n\n # Global average pooling\n x = x.mean(-1).mean(-1)\n\n # Output layer\n x = self.output(x)\n\n return x", "def forward(self, inputs):\n max_idx_1 = inputs['max_idx_1']\n max_idx_2 = inputs['max_idx_2']\n max_idx_3 = inputs['max_idx_3']\n max_idx_4 = inputs['max_idx_4']\n max_idx_5 = inputs['max_idx_5']\n x = inputs['out']\n\n out = self.relu(self.deconv6_1(x))\n out = self.max_unpool2d(out, max_idx_5)\n\n out = self.relu(self.deconv5_1(out))\n out = self.max_unpool2d(out, max_idx_4)\n\n out = self.relu(self.deconv4_1(out))\n out = self.max_unpool2d(out, max_idx_3)\n\n out = self.relu(self.deconv3_1(out))\n out = self.max_unpool2d(out, max_idx_2)\n\n out = self.relu(self.deconv2_1(out))\n out = self.max_unpool2d(out, max_idx_1)\n\n out = self.relu(self.deconv1_1(out))\n raw_alpha = self.deconv1(out)\n return raw_alpha", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n cnn_out, cnn_cache = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n hidden_out, hidden_cache = affine_relu_forward(cnn_out, W2, b2)\n scores, scores_cache = affine_forward(hidden_out, W3, b3)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n\n # Compute loss and gradients\n loss, dscores = softmax_loss(scores, y)\n dhidden, grads['W3'], grads['b3'] = affine_backward(dscores, scores_cache)\n dcnn, grads['W2'], grads['b2'] = affine_relu_backward(dhidden, hidden_cache)\n dX, grads['W1'], grads['b1'] = conv_relu_pool_backward(dcnn, cnn_cache)\n\n # Regularization\n loss = loss + 0.5*self.reg*np.sum(self.params['W3']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W2']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W1']**2)\n grads['W3'] = grads['W3'] + self.reg * self.params['W3']\n grads['W2'] = grads['W2'] + self.reg * self.params['W2']\n grads['W1'] = grads['W1'] + self.reg * self.params['W1']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def forward_pass(self, x, targets=None):\n self.x = x\n if targets is None:\n loss = None\n else:\n self.targets = targets\n\n result = x\n for layer in self.layers:\n result = layer.forward_pass(result)\n\n # softamax activation on input\n self.y = softmax(result)\n\n if targets is not None:\n loss = self.loss_func(self.y, self.targets)\n\n return loss, self.y", "def forward(self, inputs):\n\n # Convolution layers\n x = self.extract_features(inputs)\n\n # Pooling and final linear layer\n x = F.adaptive_avg_pool2d(x, 1).squeeze(-1).squeeze(-1)\n if self._dropout:\n x = F.dropout(x, p=self._dropout, training=self.training)\n x = self._fc(x)\n return x", "def max_pool_backward(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max-pooling backward pass #\n ###########################################################################\n (x, pool_param) = cache\n (N, C, H, W) = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n HH = int(1 + (H - pool_height) / stride)\n WW = int(1 + (W - pool_width) / stride)\n\n dx = np.zeros_like(x)\n\n for n in range(N):\n for c in range(C):\n for h in range(HH):\n for w in range(WW):\n h1 = h * stride\n h2 = h1 + pool_height\n w1 = w * stride\n w2 = w1 + pool_width\n block = np.reshape(x[n, c, h1:h2, w1:w2], (pool_height*pool_width))\n mask = np.zeros_like(block)\n mask[np.argmax(block)] = 1\n dx[n,c,h1:h2,w1:w2] += np.reshape(mask,(pool_height,pool_width)) * dout[n,c,h,w]\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def forward(self, x):\n out = self.pre_processing(x)\n out = self.a3(out)\n out = self.b3(out)\n out = self.maxpool(out)\n out = self.a4(out)\n out = self.b4(out)\n out = self.c4(out)\n out = self.d4(out)\n out = self.e4(out)\n out = self.maxpool(out)\n out = self.a5(out)\n out = self.b5(out)\n out = self.avgpool(out)\n out = out.view(out.size(0), -1) # reshape the output tensor\n out = self.linear(out)\n\n return out", "def _pool(prev_layer, layer_name):\n with tf.name_scope(layer_name):\n return tf.nn.max_pool(prev_layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')", "def loss(self, X, y=None):\n W1 = self.params['W1']\n mode = 'test' if y is None else 'train'\n\n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n cache = {}\n\n if self.use_batchnorm:\n for bn_param in self.bn_params:\n bn_param[mode] = mode\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n input = X\n for l in xrange(1, self.conv_layers + 1):\n if self.use_batchnorm:\n W, b, gamma, beta = self.get_params_for_layer(l, get_gamma_beta=True)\n input, cache['cache%d' % l] = conv_norm_relu_pool_forward(input, W, b, conv_param, pool_param, gamma, beta, self.bn_params[l])\n else:\n W, b = self.get_params_for_layer(l)\n input, cache['cache%d' % l] = conv_relu_pool_forward(input, W, b, conv_param, pool_param)\n\n l = self.conv_layers + 1\n if self.use_batchnorm:\n W, b, gamma, beta = self.get_params_for_layer(l, get_gamma_beta=True)\n h_out, h_cache = affine_norm_relu_forward(input, W, b, gamma, beta, self.bn_params[l])\n else:\n W, b = self.get_params_for_layer(l)\n h_out, h_cache = affine_relu_forward(input, W, b)\n\n l = l + 1\n W, b = self.get_params_for_layer(l)\n scores, scores_cache = affine_forward(h_out, W, b)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n loss, loss_dx = softmax_loss(scores, y)\n\n for l in xrange(1, self.num_layers + 1):\n loss += 0.5 * self.reg * np.sum(self.params['W%d' % l] * self.params['W%d' % l])\n\n l = self.num_layers\n scores_dx, scores_dw, scores_db = affine_backward(loss_dx, scores_cache)\n self.set_grads(l, grads, scores_dw, scores_db)\n l = l - 1\n\n if self.use_batchnorm:\n a_dx, a_dw, a_db, a_dgamma, a_dbeta = affine_norm_relu_backward(scores_dx, h_cache)\n self.set_grads(l, grads, a_dw, a_db, a_dgamma, a_dbeta)\n else:\n a_dx, a_dw, a_db = affine_relu_backward(scores_dx, h_cache)\n self.set_grads(l, grads, a_dw, a_db)\n l = l - 1\n\n conv_layers = l\n next_input = a_dx\n for l in xrange(conv_layers, 0, -1):\n current_cache = cache['cache%d' % l]\n if self.use_batchnorm:\n c_dx, c_dw, c_db, c_dgamma, c_dbeta = conv_norm_relu_pool_backward(next_input, current_cache)\n self.set_grads(l, grads, c_dw, c_db, c_dgamma, c_dbeta)\n else:\n c_dx, c_dw, c_db = conv_relu_pool_backward(next_input, current_cache)\n self.set_grads(l, grads, c_dw, c_db)\n next_input = c_dx\n\n for l in xrange(1, self.conv_layers + 3):\n grads['W%d' % l] += self.reg * self.params['W%d' % l]\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def forward(self, inputs, inputs1):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_5(inputs=down4,\n layer_size=3)\n\n up5 = self.layer_6(inputs=down5, indices=indices_5,\n output_shape=unpool_shape5, layer_size=3)\n up4 = self.layer_7(inputs=up5, indices=indices_4,\n output_shape=unpool_shape4, layer_size=3)\n up3 = self.layer_8(inputs=up4, indices=indices_3,\n output_shape=unpool_shape3, layer_size=3)\n up2 = self.layer_9(inputs=up3, indices=indices_2,\n output_shape=unpool_shape2, layer_size=2)\n output = self.layer_10(inputs=up2, indices=indices_1,\n output_shape=unpool_shape1, layer_size=2)\n\n # Second Modality\n\n down11, indices_11, unpool_shape11 = self.layer_11(inputs=inputs,\n layer_size=2)\n down12, indices_12, unpool_shape12 = self.layer_12(inputs=down1,\n layer_size=2)\n down13, indices_13, unpool_shape13 = self.layer_13(inputs=down2,\n layer_size=3)\n down14, indices_14, unpool_shape14 = self.layer_14(inputs=down3,\n layer_size=3)\n down15, indices_15, unpool_shape15 = self.layer_15(inputs=down4,\n layer_size=3)\n\n up15 = self.layer_16(inputs=down15, indices=indices_15,\n output_shape=unpool_shape15, layer_size=3)\n up14 = self.layer_17(inputs=up15, indices=indices_14,\n output_shape=unpool_shape4, layer_size=3)\n up13 = self.layer_18(inputs=up14, indices=indices_13,\n output_shape=unpool_shape13, layer_size=3)\n up12 = self.layer_19(inputs=up13, indices=indices_12,\n output_shape=unpool_shape12, layer_size=2)\n output1 = self.layer_110(inputs=up12, indices=indices_11,\n output_shape=unpool_shape11, layer_size=2)\n\n # End Pipe\n\n Concat = torch.cat((output, output1), 1)\n\n finalout = self.layer_1110(Concat)\n\n return finalout", "def forward(self, Xo):\n N = Xo.size()[0]\n # assert Xo.size() == (N, 3, 448, 448)\n X = self.features(Xo)\n # assert X.size() == (N, 128, 112, 112)\n Xp = nn.MaxPool2d(kernel_size=4, stride=4)(X)\n # Xp = F.adaptive_avg_pool2d(X, (1, 1))\n # assert Xp.size() == (N, 128, 28, 28)\n Xp = Xp.view(-1, 128*28*28 )\n # 3 way, get attention mask\n X1 = self.fc1(Xp)\n X2 = self.fc2(Xp)\n X3 = self.fc3(Xp)\n # X1 = F.relu(self.fc1_(Xp))\n # X2 = F.relu(self.fc2_(Xp))\n # X3 = F.relu(self.fc3_(Xp))\n # X1 = self.fc1(X1)\n # X2 = self.fc2(X2)\n # X3 = self.fc3(X3)\n # multiple mask elementwisely, get 3 attention part\n X1 = X1.unsqueeze(dim=2).unsqueeze(dim=3) * X\n X2 = X2.unsqueeze(dim=2).unsqueeze(dim=3) * X\n X3 = X3.unsqueeze(dim=2).unsqueeze(dim=3) * X\n #get the graduate w.r.t input image and multiple, then X1 become N*3*448*448\n X1=self.weightByGrad(X1,Xo)\n X2=self.weightByGrad(X2,Xo)\n X3=self.weightByGrad(X3,Xo)\n # use stn to crop, size become (N,3,96,96)\n # X1 = self.stn(X1, 0)\n # X2 = self.stn(X2, 1)\n # X3 = self.stn(X3, 2)\n #3 BCNN 3 size==(N,200)\n X1=self.BCNN_N(X1,self.bcnnConv_1,self.bfc1)\n X2=self.BCNN_N(X2,self.bcnnConv_2,self.bfc2)\n X3=self.BCNN_N(X3,self.bcnnConv_3,self.bfc3)\n #sum them up, for the predict max\n res=X1+X2+X3\n\n return res", "def create_max_pool(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')", "def pool_layer( x, wpool, padding, name ):\n top = tf.layers.max_pooling2d( x, \n 2, \n [2, wpool], \n padding=padding, \n name=name )\n return top", "def forward(image, label):\n # We transform the image from [0, 255] to [-0.5, 0.5] to make it easier\n # to work with. This is standard practice.\n\n # num_filters hard coded as = 3\n filter_values = []\n for i in range(20): # population\n # firstly, generate 20 different filters\n filter_values.append(np.random.randn(8, 3, 3) / 9)\n\n out = []\n loss = 100\n acc = 0\n\n for generation in range(100): # generation size = 100\n for j, filter_value in enumerate(filter_values): # population size\n out = conv.forward((image / 255) - 0.5, filter_value)\n out = pool.forward(out)\n out = softmax.forward(out)\n # Calculate cross-entropy loss and accuracy. np.log() is the natural log.\n new_loss = -np.log(out[label])\n if new_loss < loss:\n loss = new_loss\n acc = 1 if np.argmax(out) == label else 0\n # else:\n # filter_values[j] = np.random.randn(8, 3, 3) / 9\n\n # mutation\n for k, filter_value in enumerate(filter_values):\n mutation_probability = random.uniform(0, 1)\n # if larger than 0.5 then mutate\n if mutation_probability > 0.5:\n # random number of elements to change\n # because it is 3x3 filter,\n # 8 x (3x3) = 72\n # so, we don't want to change to many element\n number_of_elements = random.randint(1, 20) # TODO: optimize the param\n\n # the elements that have been already changed\n has_changed_list = []\n for h in range(number_of_elements):\n row = random.randint(0, 2)\n col = random.randint(0, 2)\n # filter_size = 8 x (3x3),\n # so randomly change one filter\n the_number = random.randint(0, 7)\n key_value_pair = the_number + row + col\n\n if key_value_pair not in has_changed_list:\n element = filter_value[the_number, row, col]\n # TODO: find a better way of mutating the filter weight\n filter_value[the_number, row, col] = mutation(element)\n has_changed_list.append(key_value_pair)\n\n return out, loss, acc", "def forward(self, x):\n x = self.first_deconv(x)\n x = self.first_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.second_deconv(x)\n x = self.second_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.third_deconv(x)\n x = self.third_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.fourth_deconv(x)\n x = self.fourth_batch_norm(x)\n\n x = self.fifth_deconv(x)\n x = self.fifth_batch_norm(x)\n\n x = self.sixth_deconv(x)\n x = self.sixth_batch_norm(x)\n\n x = self.seventh_deconv(x)\n\n # sigmoid_out = nn.functional.sigmoid(x)\n tanh_out = nn.functional.tanh(x)\n\n out = (tanh_out + 1) * 255 / 2\n\n # print 'out.shape =', out.shape\n\n return out", "def forward(self, x: Tensor, graph_id: IntTensor,) -> Tensor:\n values = self.gate(x) * self.mlp(x)\n aggregated_values = self.pooling(values, batch=graph_id)\n\n return aggregated_values", "def _max_pool(x):\n return tf.nn.max_pool(value=x,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')", "def Naive_forwardpass(self):\n\n for filter_k in range(0, self.n_filters):\n filter_col = self.im2col(self.filter_map[filter_k].data_mtx)\n for hgt_indx in range(0, self.Output_Height):\n for wdth_indx in range(0, self.Output_Width):\n wdth_start_index = wdth_indx * self.stride_len\n wdth_end_index= wdth_start_index + self.filter_size\n hgt_start_index = hgt_indx * self.stride_len\n hgt_end_index = hgt_start_index + self.filter_size\n trn_img_area = self.input_vol.padded_mtx[:, wdth_start_index:wdth_end_index,\n hgt_start_index:hgt_end_index]\n trn_img_col = self.im2col(trn_img_area)\n self.output_Tensor.data_mtx[filter_k,wdth_indx , hgt_indx] = self.convolution_op(trn_img_col,\n filter_col) + np.sum(self.bias_vol[filter_k].data_mtx)\n return self.output_Tensor", "def forward(self, x):\n out = self.net(x)\n out = self.avg(out)\n out = out.view(out.size(0), -1)\n out = self.fc1(out)\n\n return func.log_softmax(out, dim=-1)", "def forward(self, x):\n x = self.feature_extractor(x)\n batch_size, hidden = x.size()\n\n x = self.layer_1(x)\n x = torch.relu(x)\n x = self.layer_2(x)\n x = torch.relu(x)\n x = self.layer_3(x)\n\n x = torch.log_softmax(x, dim=1)\n return x", "def forward(self, data, n_branches, extract_features=None, \n conv_classifier=False, use_softmax=False, **kwargs):\n res = list()\n for j in range(n_branches): # Siamese/triplet nets; sharing weights\n x = data[j]\n \n # if in feature extracting phase, extract hypercolumn for specified features\n if isinstance(extract_features,list):\n activations = dict()\n names = list()\n for i, l in enumerate(self.branches):\n names.append('x'+str(i))\n if i == 0:\n activations[names[i]] = l(x)\n if activations[names[i]].shape[2:] != data[j].shape[2:]:\n activations[names[i]] = nn.functional.interpolate(\n activations[names[i]], size=data[j].shape[2:], \n mode='bilinear', align_corners=True)\n else:\n activations[names[i]] = l(activations[names[i-1]])\n if activations[names[i]].shape[2:] != data[j].shape[2:]:\n activations[names[i]] = nn.functional.interpolate(\n activations[names[i]], size=data[j].shape[2:], \n mode='bilinear', align_corners=True)\n \n # return a list of features\n #features = [x]\n features=list()\n features.extend([activations[names[i]] for i in extract_features])\n \n return features\n \n # if in training or validation phase forward images through branch \n else:\n res.append(self.branches(x))\n \n # concatenate the output of difference of branches\n x = torch.abs(res[1] - res[0])\n if n_branches == 3:\n x = torch.cat(x, torch.abs(res[2] - res[1]), 1)\n \n # joint layers\n x = self.joint(x)\n if extract_features == 'joint': \n return x\n x = nn.functional.adaptive_avg_pool2d(x, (data[0].shape[2], data[0].shape[3]))\n if not conv_classifier:\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n else:\n x = self.classifier(x)\n if use_softmax: # is True during inference\n x = nn.functional.softmax(x, dim=1)\n else:\n x = nn.functional.log_softmax(x, dim=1)\n\n return x", "def fprop_pool(self, layer, I, O, argmax=None, beta=0.0):\n\n assert layer.sizeI == I.size\n assert layer.sizeO == O.size\n assert layer.op == \"max\" or layer.op == 'avg'\n if layer.op == \"max\":\n assert layer.sizeO == argmax.size\n\n J, T, R, S = layer.JTRS\n C, D, H, W, N = layer.dimI\n K, M, P, Q, N = layer.dimO\n pad_c, pad_d, pad_h, pad_w = layer.padding\n str_c, str_d, str_h, str_w = layer.strides\n if layer.op == \"max\":\n bMax = 1\n elif layer.op == 'avg':\n bMax = 0\n if self.check_caffe_compat():\n bCeil = 1\n else:\n bCeil = 0\n\n primitives = c_longlong(layer.dnnPrimitives.ctypes.data)\n self.mklEngine.MaxPooling_fprop(\n I.get_prim(), O.get_prim(), primitives, layer.initOk_f, bMax,\n N, C, H, W, S, R, str_h, str_w, pad_h, pad_w, K, P, Q, bCeil)\n layer.initOk_f = 1\n O.shape5D = layer.dimO", "def pool(input_x, size):\n return tf.nn.max_pool(input_x, ksize=[1, size, size, 1], strides=[1, size, size, 1], padding='SAME')", "def forward(self, x, indices, **kwargs):\n\n x = x.permute(0, 2, 1)\n x = F.max_unpool1d(x, indices, self.kernel_size)\n x = x.permute(0, 2, 1)\n return x", "def relu_forward(x):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n out=np.maximum(0,x)\n cache=x\n return out,cache", "def loss(self, X, y=None):\n\n # In dev testing, the loss fnc stops at \"scores\" , unfollowed by \"softmax\" probability prediction.\n # In real testing, \"self.predict()\" needs to be implemented in Solver() class.\n \n if y is None:\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = \"test\"\n\n\n W1, b1 = self.params['W1'], self.params['b1']\n gamma1, beta1 = self.params[\"sbnGamma1\"], self.params[\"sbnBeta1\"]\n bn_param1 = self.bn_params[0]\n\n W2, b2 = self.params['W2'], self.params['b2']\n gamma2, beta2 = self.params[\"sbnGamma2\"], self.params[\"sbnBeta2\"]\n bn_param2 = self.bn_params[1]\n\n W3, b3 = self.params['W3'], self.params['b3']\n gamma3, beta3 = self.params[\"bnGamma3\"], self.params[\"bnBeta3\"]\n bn_param3 = self.bn_params[2]\n\n W4, b4 = self.params['W4'], self.params['b4']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param = self.conv_param\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = self.maxpool_params\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_sbn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): return out, cache;\n out, cache[\"layer1\"] = layer_utils.conv_sbn_relu_forward(X, W1, b1, gamma1, beta1, conv_param, bn_param1) \n out, cache[\"layer2\"] = layer_utils.conv_sbn_relu_forward(out, W2, b2, gamma2, beta2, conv_param, bn_param2)\n\n # def max_pool_forward_fast(x, pool_param): return out, cache;\n out, cache[\"maxpool\"] = fast_layers.max_pool_forward_fast(out, pool_param)\n\n # def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): return out, cache;\n \n out, cache[\"layer3\"] = layer_utils.affine_bn_relu_forward(out, W3, b3, gamma3, beta3, bn_param3)\n\n # def affine_forward(x, w, b): return out, cache;\n scores, cache[\"layer4\"] = layers.affine_forward(out, W4, b4)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) + np.sum(W4 * W4))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW4, db4 = layers.affine_backward(dscores, cache[\"layer4\"]) \n\n # def affine_bn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW3, db3, dgamma3, dbeta3 = layer_utils.affine_bn_relu_backward(dout, cache[\"layer3\"])\n\n # print cache[\"layer3\"]\n\n # def max_pool_backward_fast(dout, cache): return max_pool_backward_im2col(dout, real_cache);\n # def max_pool_backward_im2col(dout, cache): return dx;\n dout = fast_layers.max_pool_backward_fast(dout, cache[\"maxpool\"])\n\n # def conv_sbn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW2, db2, dgamma2, dbeta2 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer2\"])\n _, dW1, db1, dgamma1, dbeta1 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer1\"])\n\n # reg\n grads['W4'], grads['b4'] = dW4 + self.reg * W4, db4\n \n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads[\"bnGamma3\"], grads[\"bnBeta3\"] = dgamma3, dbeta3\n\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads[\"sbnGamma2\"], grads[\"sbnBeta2\"] = dgamma2, dbeta2\n\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n grads[\"sbnGamma1\"], grads[\"sbnBeta1\"] = dgamma1, dbeta1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def forward(self, x): # pylint: disable=invalid-name\n\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n\n out = self.relu(out)\n out = self.maxpool(out)\n\n return out", "def _forward(self):\n\n tf.summary.image(\"image\", tensor=tf.reshape(self.x, (self.batch_size, 28, 28, 1)), max_outputs=10)\n x = self.x\n\n # x = layers.dropout(self.x, keep_prob=0.7)\n # with tf.variable_scope(\"layer1\") as scope:\n h = tf.nn.relu(layers.fully_connected(x, num_outputs=self.input_size // 2, activation_fn=None))\n # tf.summary.histogram(\"moving_mean1\", tf.get_variable(scope + \"moving_mean\"))\n # with tf.variable_scope(\"layer2\") as scope:\n # h = tf.nn.relu(layers.fully_connected(h, num_outputs=32, activation_fn=None))\n # tf.summary.histogram(\"moving_mean2\", tf.get_variable(\"moving_mean\"))\n # with tf.variable_scope(\"layer3\") as scope:\n self.logits = layers.fully_connected(h, num_outputs=10, activation_fn=None)\n # tf.summary.histogram(\"moving_mean3\", tf.get_variable(\"moving_mean\"))\n\n self.probability = tf.nn.softmax(self.logits)\n self.prediction = tf.argmax(self.probability, axis=1)", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n\n # pass conv_param to the forward pass for the convolutional layer\n # Padding and stride chosen to preserve the input spatial size\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n h1, c1 = conv_forward_im2col(X, W1, b1, conv_param) #\n h1, r1 = relu_forward(h1)\n h1, p1 = max_pool_forward_fast(h1, pool_param) #\n max_pool_shape = h1.shape\n h1 = h1.reshape(X.shape[0], -1)\n h2, c2 = affine_relu_forward(h1, W2, b2)\n scores, c3 = affine_forward(h2, W3, b3)\n\n if y is None:\n return scores\n\n loss, dx = softmax_loss(scores, y)\n\n loss += self.reg / 2 * (self.params['W1']**2).sum()\n loss += self.reg / 2 * (self.params['W2']**2).sum()\n loss += self.reg / 2 * (self.params['W3']**2).sum()\n\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n grads = {}\n dx, grads['W3'], grads['b3'] = affine_backward(dx, c3)\n grads['W3'] += self.reg * self.params['W3']\n dx, grads['W2'], grads['b2'] = affine_relu_backward(dx, c2)\n dx = dx.reshape(max_pool_shape)\n dx = max_pool_backward_fast(dx, p1)\n dx = relu_backward(dx, r1)\n dx, grads['W1'], grads['b1'] = conv_backward_im2col(dx, c1)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def forward_propogation(X, parameters):\n W1 = parameters['W1']\n W2 = parameters['W2']\n W3 = parameters['W3']\n W4 = parameters['W4']\n W5 = parameters['W5']\n\n Z1 = tf.nn.conv2d(X, W1, strides=[1,1,1,1], padding='SAME')\n A1 = tf.nn.relu(Z1)\n P1 = tf.nn.max_pool(A1, ksize=[1,2,2,1], strides=[1,2,2,1],\n padding='SAME')\n \n Z2 = tf.nn.conv2d(P1, W2, strides=[1,1,1,1], padding='SAME')\n A2 = tf.nn.relu(Z2)\n P2 = tf.nn.max_pool(A2, ksize=[1,2,2,1], \n strides=[1,2,2,1],padding='SAME')\n Z3 = tf.nn.conv2d(P2, W3, strides=[1,1,1,1], padding='SAME')\n A3 = tf.nn.relu(Z3)\n P3 = tf.nn.max_pool(A3, strides=[1,1,1,1],\n ksize=[1,2,2,1], padding='SAME')\n\n Z4 = tf.nn.conv2d(P3, W4, strides=[1,1,1,1], padding='SAME')\n A4 = tf.nn.relu(Z4)\n P4 = tf.nn.max_pool(A4, strides=[1,1,1,1],\n ksize=[1,2,2,1],padding='SAME')\n\n Z5 = tf.nn.conv2d(P4, W5, strides=[1,1,1,1], padding='SAME')\n A5 = tf.nn.relu(Z5)\n P5 = tf.nn.max_pool(A5, strides=[1,1,1,1],\n ksize=[1,2,2,1], padding='SAME')\n\n P5 = tf.contrib.layers.flatten(P5)\n Z6 = tf.contrib.layers.fully_connected(P5, 2,\n activation_fn = None)\n\n return Z6", "def forward(self, x):\n\n if x.dim() == 3:\n x = x.unsqueeze(1)\n x = x.transpose(1, 3)\n x = self.norm0(x)\n x = x.transpose(1, 3)\n\n x = self.conv_block1(x, pool_size=(2, 2), pool_type=\"avg\")\n x = F.dropout(x, p=0.2, training=self.training)\n x = self.conv_block2(x, pool_size=(2, 2), pool_type=\"avg\")\n x = F.dropout(x, p=0.2, training=self.training)\n x = self.conv_block3(x, pool_size=(2, 2), pool_type=\"avg\")\n x = F.dropout(x, p=0.2, training=self.training)\n x3_out = self.conv_block4(x, pool_size=(2, 2), pool_type=\"avg\")\n x = F.dropout(x3_out, p=0.2, training=self.training)\n x2_out = self.conv_block5(x, pool_size=(2, 2), pool_type=\"avg\")\n x = F.dropout(x2_out, p=0.2, training=self.training)\n x1_out = self.conv_block6(x, pool_size=(1, 1), pool_type=\"avg\")\n x = F.dropout(x1_out, p=0.2, training=self.training)\n x = torch.mean(x, dim=3)\n\n (x1, _) = torch.max(x, dim=2)\n x2 = torch.mean(x, dim=2)\n x = x1 + x2\n\n # [B x 1 x emb_dim]\n if not self.return_reps:\n return x.unsqueeze(1)\n\n return x.unsqueeze(1), (x1_out, x2_out, x3_out)", "def conv_relu_pool_backward_naive(dout, cache):\n\tconv_cache, relu_cache, pool_cache = cache\n\tds = max_pool_backward_naive(dout, pool_cache)\n\tda = relu_backward(ds, relu_cache)\n\tdx, dw, db = conv_backward_naive(da, conv_cache)\n\treturn dx, dw, db", "def forward(self, x):\n sources = list()\n new_sources = list()\n\n # apply lds to the initial image\n x_pool = self.lds(x)\n\n # apply vgg up to conv4_3\n for k in range(22):\n x = self.features[k](x)\n conv4_3_bn = self.ibn1(x)\n x_pool1_skip, x_pool1_icn = self.icn1(x_pool)\n s = self.Norm1(conv4_3_bn * x_pool1_icn)\n\n # apply vgg up to fc7\n for k in range(22, 34):\n x = self.features[k](x)\n conv7_bn = self.ibn2(x)\n x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)\n p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)\n\n x = self.features[34](x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extra):\n x = v(x)\n if k == 0:\n x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)\n w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)\n elif k == 2:\n x_pool4_skip, x_pool4_icn = self.icn4(x_pool3_skip)\n q = self.Norm4(self.dsc3(w) + x * x_pool4_icn)\n elif k == 4:\n o = self.Norm5(self.dsc4(q) + x)\n sources.append(o)\n elif k == 7 or k == 9:\n sources.append(x)\n else:\n pass\n\n # project the forward features into lower dimension.\n tmp1 = self.proj1(p)\n tmp2 = self.proj2(w)\n tmp3 = self.proj3(q)\n tmp4 = self.proj4(o)\n\n # The conv4_3 level\n proj1 = F.upsample(tmp1, scale_factor=2, mode='bilinear')\n proj2 = F.upsample(tmp2, scale_factor=4, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=8, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=16, mode='bilinear')\n proj = torch.cat([proj1, proj2, proj3, proj4], dim=1)\n\n agent1 = self.agent1(s)\n\n convert1 = self.convert1(proj)\n pred1 = torch.cat([agent1, convert1], dim=1)\n pred1 = self.merge1(pred1)\n new_sources.append(pred1)\n\n # The fc_7 level\n proj2 = F.upsample(tmp2, scale_factor=2, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=4, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=8, mode='bilinear')\n proj = torch.cat([proj2, proj3, proj4], dim=1)\n\n agent2 = self.agent2(p)\n convert2 = self.convert2(proj)\n pred2 = torch.cat([agent2, convert2], dim=1)\n pred2 = self.merge2(pred2)\n new_sources.append(pred2)\n\n # The conv8 level\n proj3 = F.upsample(tmp3, scale_factor=2, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=4, mode='bilinear')\n proj = torch.cat([proj3, proj4], dim=1)\n\n agent3 = self.agent3(w)\n convert3 = self.convert3(proj)\n pred3 = torch.cat([agent3, convert3], dim=1)\n pred3 = self.merge3(pred3)\n new_sources.append(pred3)\n\n # The conv9 level\n proj4 = F.upsample(tmp4, scale_factor=2, mode='bilinear')\n proj = proj4\n\n agent4 = self.agent4(q)\n convert4 = self.convert4(proj)\n pred4 = torch.cat([agent4, convert4], dim=1)\n pred4 = self.merge4(pred4)\n new_sources.append(pred4)\n\n for prediction in sources:\n new_sources.append(prediction)\n\n return new_sources", "def forward(self, inputs):\n #NOTE: Already merge axis 0(batches) and axis 1(channels) before extracting feature phase,\n # please refer to paddlevideo/modeling/framework/recognizers/recognizer2d.py#L27\n #y = paddle.reshape(\n # inputs, [-1, inputs.shape[2], inputs.shape[3], inputs.shape[4]])\n\n ####ResNet-C: use three 3x3 conv, replace, one 7x7 conv\n y = self.conv1_1(inputs)\n y = self.conv1_2(y)\n y = self.conv1_3(y)\n\n y = self.pool2D_max(y)\n for block in self.block_list:\n y = block(y)\n return y", "def forward(self, x):\r\n\r\n y = self.conv1(x)\r\n y = self.bn1(y)\r\n y = F.relu(y, inplace = True)\r\n y = self.conv2(y)\r\n y = self.bn2(y)\r\n y = F.relu(y, inplace = True)\r\n\r\n return y", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def forward(self, x):\n\n\t\t## Conv layers\n\t\tx = self.avgpool(F.tanh(self.conv1(x)))\n\t\tx = self.avgpool(F.tanh(self.conv2(x)))\n\t\tx = F.tanh(self.conv3(x))\n\n\t\t## Flatten\n\t\tx = x.view(x.size(0), -1)\n\n\t\t## Fully connected layers\n\t\tx = F.tanh(self.fc1(x))\n\t\tx = self.fc2(x)\n\n\t\tx = F.softmax(x, dim=1)\n\n\t\treturn x", "def conv_net(x, keep_prob, nconv1, nconv2, nfullyconn, nfullyconn2):\n # TODO: Apply 1, 2, or 3 Convolution and Max Pool layers\n # Play around with different number of outputs, kernel size and stride\n # Function Definition from Above:\n # conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)\n #layer_norm = tflearn.layers.normalization.batch_normalization (x, name='BatchNormalization')\n layer_conv = conv2d_maxpool(x, nconv1, (2,2), (2,2), (2,2), (2,2))\n #layer_conv = tf.nn.dropout(layer_conv, keep_prob)\n layer_conv = tf.layers.batch_normalization (layer_conv, name='BatchNormalization')\n #layer_conv = tf.nn.dropout(layer_conv, keep_prob)\n #print(layer_conv)\n layer_conv = conv2d_maxpool(x, nconv2, (5,5), (2,2), (2,2), (2,2))\n layer_conv = tf.layers.batch_normalization (layer_conv, name='BatchNormalization2')\n # TODO: Apply a Flatten Layer\n # Function Definition from Above:\n # flatten(x_tensor)\n layer_flat = flatten(layer_conv)\n #layer_flat = tflearn.layers.normalization.batch_normalization (layer_flat, name='BatchNormalization')\n \n \n\n # TODO: Apply 1, 2, or 3 Fully Connected Layers\n # Play around with different number of outputs\n # Function Definition from Above:\n # fully_conn(x_tensor, num_outputs)\n #layer_fully_conn = fully_conn(x, nfullyconn)\n layer_fully_conn = fully_conn(layer_flat, nfullyconn)\n #print(\"Fully Connected Outputs: {}\".format(layer_fully_conn.shape[1]))\n #layer_fully_conn = fully_conn(layer_fully_conn, nconv)\n layer_fully_conn = tf.layers.batch_normalization (layer_fully_conn, name='BatchNormalization3')\n layer_flat = flatten(layer_fully_conn)\n layer_fully_conn = fully_conn(layer_flat, nfullyconn2)\n layer_fully_conn = tf.layers.batch_normalization (layer_fully_conn, name='BatchNormalization4')\n layer_flat = flatten(layer_fully_conn)\n layer_fully_conn = tf.nn.dropout(layer_fully_conn, keep_prob)\n #layer_fully_conn = tf.nn.dropout(layer_fully_conn, keep_prob)\n \n # TODO: Apply an Output Layer\n # Set this to the number of classes\n # Function Definition from Above:\n # output(x_tensor, num_outputs)\n layer_final = output(layer_fully_conn, 46)\n \n \n # TODO: return output\n return layer_final", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n N, C, H, W = X.shape;\n\n #print 'X shape = ' + str(X.shape);\n\n # Get conv layer output. Note that it is not 2-dimensional \n # conv - relu - 2x2 maxpool\n v1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param);\n\n #print 'v1 shape = ' + str(v1.shape);\n\n # Reshape to 2D\n v1shape = v1.shape; # Used to reshape back to original form in backward pass\n v1 = np.reshape(v1,(N,-1));\n #print 'v1 shape = ' + str(v1.shape);\n\n # Feed forward to hidden layer (affine-relu)\n v2, cache2 = affine_relu_forward(v1, W2, b2);\n #print 'v2 shape = ' + str(v2.shape);\n\n # Feed forward to final layer (affine only)\n v3, cache3 = affine_forward(v2, W3, b3)\n #print 'v3 shape = ' + str(v3.shape);\n\n # Compute scores\n scores = v3;\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n # Calculate softmax loss from layer 2 output\n # Loss gets regularized here\n # Each separate gradient must be regularized later when calculated\n loss, dv3 = softmax_loss(scores,y); # Softmax loss and gradient\n #print 'dv3 shape = ' + str(dv3.shape);\n reg = self.reg;\n loss += 0.5 * reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3)); # Regularize\n\n # Do backward pass through layer 2 affine\n dv2, dw3, db3 = affine_backward(dv3, cache3);\n dw3 += reg*W3; # Regularize\n #print 'dv2 shape = ' + str(dv2.shape);\n\n\n # Backward pass through hidden layer\n dv1, dw2, db2 = affine_relu_backward(dv2, cache2);\n dw2 += reg*W2; # Regularize\n #print 'dv1 shape = ' + str(dv1.shape);\n\n # Reshape dv1 to be compatible with convolutional layer\n dv1 = np.reshape(dv1,v1shape);\n #print 'dv1 shape = ' + str(dv1.shape);\n\n # Do backward pass through convolutional layer\n dx, dw1, db1 = conv_relu_pool_backward(dv1, cache1);\n dw1 += reg*W1; # Regularize\n\n # Store all weight and bias gradients in grads\n grads['W1'] = dw1; grads['b1'] = db1;\n grads['W2'] = dw2; grads['b2'] = db2;\n grads['W3'] = dw3; grads['b3'] = db3;\n\n\n\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def max_pooling(self, filter_):\n return self.add_layer(max_pooling, filter_)", "def max_pooling_layer(X, kernel_size=2, stride=2, padding='SAME', name=None):\n with tf.variable_scope(name) as scope:\n pool = tf.nn.max_pool(X, ksize=[1, kernel_size, kernel_size, 1],\n strides=[1, stride, stride, 1],\n padding=padding, name=name)\n return pool", "def __init__(self, num_layers, num_mlp_layers, input_dim, hidden_dim,\n output_dim, final_dropout, learn_eps, graph_pooling_type,\n neighbor_pooling_type):\n super(GIN, self).__init__()\n self.num_layers = num_layers\n self.learn_eps = learn_eps\n\n # List of MLPs\n self.ginlayers = torch.nn.ModuleList()\n self.batch_norms = torch.nn.ModuleList()\n\n for layer in range(self.num_layers):\n if layer == 0:\n mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim)\n else:\n mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim)\n\n self.ginlayers.append(\n GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps))\n self.batch_norms.append(nn.BatchNorm1d(hidden_dim))\n\n # Linear function for graph poolings of output of each layer\n # which maps the output of different layers into a prediction score\n self.linears_prediction = torch.nn.ModuleList()\n\n for layer in range(num_layers):\n if layer == 0:\n self.linears_prediction.append(\n nn.Linear(input_dim, output_dim))\n else:\n self.linears_prediction.append(\n nn.Linear(hidden_dim, output_dim))\n\n self.drop = nn.Dropout(final_dropout)\n\n if graph_pooling_type == 'sum':\n self.pool = SumPooling()\n elif graph_pooling_type == 'mean':\n self.pool = AvgPooling()\n elif graph_pooling_type == 'max':\n self.pool = MaxPooling()\n else:\n raise NotImplementedError", "def prop_max_pool(self, activation, relevance, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1]):\n act = tf.expand_dims(activation, 3) # N x M x F x 1\n z = tf.nn.max_pool(act, ksize, strides, padding='SAME') + self.epsilon\n with self.model.graph.as_default():\n rel = tf.expand_dims(relevance, 3)\n s = rel / z\n c = gen_nn_ops.max_pool_grad_v2(act, z, s, ksize, strides, padding='SAME')\n tmp = c * act\n return tf.squeeze(tmp, [3])", "def forward_pass(X,architecture):\n \n architecture['layer1'][0] = X\n kernel_shape1 = architecture['layer1'][7]\n stride1 = architecture['layer1'][8]\n if kernel_shape1 is not None and not isinstance(kernel_shape1,int):\n X_input_1_im2col,imX = im2col(X,kernel_shape1,stride1,im_needed = False, shape_specified = True)\n architecture['layer1'][4] = X_input_1_im2col\n else:\n architecture['layer1'][4] = None\n\n for layer in range(len(architecture)): # Feedforward from the first till the second last layer\n X_input,X_output,weightsi,biasi,X_input_1_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imx = architecture['layer{}'.format(layer+1)]\n\n if operationi == 'conv_bn_relu':\n conv_output = relu(BatchNorm(torch.t(X_input_1_im2col).mm(weightsi) + biasi))\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_relu':\n conv_output = relu(torch.t(X_input_1_im2col).mm(weightsi) + biasi)\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_bn_sigmoid':\n conv_output = sigmoid(BatchNorm(torch.t(X_input_1_im2col).mm(weightsi) + biasi))\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_sigmoid':\n conv_output = sigmoid(torch.t(X_input_1_im2col).mm(weightsi) + biasi)\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'maxpool':\n maxpool_output = maxpool(X_input,kernel_shapei,stridei)\n\n maxpool_output = torch.reshape(maxpool_output,output_shapei)\n\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = maxpool_output\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n architecture['layer{}'.format(layer+2)][4],imX = im2col(maxpool_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'flatten_dense_relu':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'relu',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'flatten_dense_none':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'none',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'flatten_dense_sigmoid':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'sigmoid',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'softmax':\n Xin = architecture['layer{}'.format(layer+1)][0]\n output = softmax(Xin).squeeze()\n architecture['layer{}'.format(layer+1)][1] = output\n if layer == len(architecture) - 1:\n y_pred = architecture['layer{}'.format(len(architecture))][1]\n \n return y_pred", "def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x.squeeze(0)", "def forward(self, input):\r\n return np.maximum(0,input)", "def max_pool(self, x, name=\"\"):\n return tf.nn.max_pool(x, ksize=self.mp_size, strides=self.mp_stride,\n padding=self.mp_padding, name=name)", "def forward(self, x):\n batch_size, channels, width, height = x.size()\n\n # Input Layer: (batch_size, 1, 28, 28) -> (batch_size, 1*28*28)\n x = x.view(batch_size, -1)\n\n # Layer 1: (batch_size, 1*28*28) -> (batch_size, 128)\n x = self.layer_1(x)\n x = torch.relu(x)\n\n # Layer 2: (batch_size, 128) -> (batch_size, 256)\n x = self.layer_2(x)\n x = torch.relu(x)\n\n # Layer 3: (batch_size, 256) -> (batch_size, 10)\n x = self.layer_3(x)\n x = torch.log_softmax(x, dim=1)\n\n return x" ]
[ "0.78478473", "0.77464086", "0.7588915", "0.75831836", "0.75474185", "0.74184185", "0.7378611", "0.7318571", "0.72232896", "0.71326613", "0.7098723", "0.70758176", "0.6990659", "0.69255495", "0.69221073", "0.68792313", "0.68130654", "0.68058133", "0.6792034", "0.6792034", "0.6785933", "0.6777063", "0.6709021", "0.668601", "0.6682275", "0.66381764", "0.66302615", "0.6614249", "0.65889394", "0.65823686", "0.65627134", "0.65338844", "0.6518862", "0.6506279", "0.6504616", "0.6476989", "0.6442894", "0.6439327", "0.6417145", "0.6415087", "0.641287", "0.64082813", "0.63963646", "0.63934046", "0.63846165", "0.637796", "0.6376113", "0.63746715", "0.6348848", "0.634006", "0.63386214", "0.63343626", "0.6329607", "0.6322901", "0.63213557", "0.6298913", "0.6294224", "0.6288317", "0.6286969", "0.6286058", "0.62805957", "0.62792003", "0.62765497", "0.6272656", "0.626191", "0.6243235", "0.62409633", "0.62361884", "0.62261444", "0.6222016", "0.62137896", "0.6206424", "0.6205639", "0.62050974", "0.6205028", "0.62012345", "0.61861277", "0.61777264", "0.61739075", "0.6172924", "0.6171008", "0.6169727", "0.61691433", "0.6159205", "0.61550653", "0.6153831", "0.6153324", "0.61525637", "0.61523503", "0.61516404", "0.6142829", "0.6133089", "0.61323005", "0.6125017", "0.6120003", "0.6105808", "0.61027706", "0.61025196", "0.6102247", "0.6097806" ]
0.74050933
6
A naive implementation of the backward pass for a maxpooling layer.
def max_pool_backward(dout, cache): dx = None ########################################################################### # TODO: Implement the max-pooling backward pass # ########################################################################### x, pool_param = cache N, C, H, W = x.shape pool_height = pool_param['pool_height'] pool_width = pool_param['pool_width'] stride = pool_param['stride'] H_prime = int(1 + (H - pool_height) / stride) W_prime = int(1 + (W - pool_width) / stride) #python 3 / is just float number division dx = np.zeros((N, C, H, W)) for n in range(N): for c in range(C): for i in range(H_prime): for j in range(W_prime): h_start = i * stride h_end = h_start + pool_height w_start = j * stride w_end = w_start + pool_width pool_window = x[n, c, h_start:h_end, w_start:w_end] maxValue = np.max(pool_window) dx[n,c,h_start:h_end,w_start:w_end] += dout[n,c,i,j] * (pool_window == maxValue) ########################################################################### # END OF YOUR CODE # ########################################################################### return dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_pool_backward_naive(dout, cache):\n dx = None\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx", "def max_pool_backward_naive(dout, cache):\n x, pool_params = cache\n N, C, H, W = x.shape\n\n pool_height = pool_params['pool_height']\n pool_width = pool_params['pool_width']\n stride = pool_params['stride']\n\n Hc = (H - pool_height) / stride + 1\n Wc = (W - pool_width) / stride + 1\n\n dx = np.zeros(x.shape)\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n for i in xrange(N):\n for c in xrange(C):\n for hc in xrange(Hc):\n for wc in xrange(Wc):\n subx = x[i, c, hc:stride*hc+pool_height, wc:stride*wc+pool_width]\n subdx = dx[i, c, hc:stride*hc+pool_height, wc:stride*wc+pool_width]\n max_value = np.max(subx)\n \n subdx += (subx == max_value) * dout[i, c, hc, wc]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx", "def conv_relu_pool_backward_naive(dout, cache):\n\tconv_cache, relu_cache, pool_cache = cache\n\tds = max_pool_backward_naive(dout, pool_cache)\n\tda = relu_backward(ds, relu_cache)\n\tdx, dw, db = conv_backward_naive(da, conv_cache)\n\treturn dx, dw, db", "def max_pool_backward_naive(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max pooling backward pass #\n ###########################################################################\n #Extract info from cache.\n x,pool_param = cache\n pool_width = pool_param['pool_width']\n pool_height = pool_param['pool_height']\n stride = pool_param['stride']\n N,C,H,W = x.shape\n\n #Start computing dx,same as forward pass loop with the correct stride over x.\n dx = np.zeros_like(x)\n for i in range(N):\n a = 0;b = 0\n for t in range(0,H - pool_height + 1,stride):\n for k in range(0,W - pool_width + 1,stride):\n #Go over all of the channels.\n for c in range(C):\n #Find max.\n max_index = np.argmax(x[i,c,t:(t + pool_height),k:(k + pool_width)])\n #Conver flat index.\n index = np.unravel_index(max_index,(pool_height,pool_width))\n dx[i,c,t + index[0],k + index[1]] += dout[i,c,a,b]\n if (b == dout.shape[3] - 1):\n a += 1\n b = 0\n else:\n b += 1\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def max_pool_backward(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max-pooling backward pass #\n ###########################################################################\n x, pool_param = cache\n N, C, H, W = x.shape\n pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']\n stride = pool_param['stride']\n \n HH = 1 + (H - pool_height) // stride\n WW = 1 + (W - pool_width) // stride\n x_strides = x[0][0].strides\n strides = tuple(np.array(x_strides)*stride)\n \n dx = np.zeros(x.shape)\n \n for n in range(N):\n for c in range(C):\n for h in range(HH):\n for w in range(WW):\n h_start = stride * h\n h_end = h_start + pool_height\n\n w_start = stride * w\n w_end = w_start + pool_width\n\n # get the pool window in the input x\n pool_window = x[n, c, h_start:h_end, w_start:w_end]\n \n m = np.max(pool_window)\n dx_window = np.where(pool_window == m, 1, 0)\n \n dx[n, c, h_start:h_end, w_start:w_end] += dx_window * dout[n, c, h, w]\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def conv_relu_pool_backward(dout, cache):\n conv_cache, relu_cache, pool_cache = cache\n ds = layers.max_pool_backward_naive(dout, pool_cache)\n da = layers.relu_backward(ds, relu_cache)\n dx, dw, db = layers.conv_backward_naive(da, conv_cache)\n return dx, dw, db", "def max_pool_backward_naive(dout, cache):\n\tdx = None\n\n\tx, pool_param = cache\n\tN,C,H,W = x.shape\n\tHH = pool_param['pool_height']\n\tWW = pool_param['pool_width']\n\tstride = pool_param['stride']\n\tHp = int(1 + (H-HH)/stride)\n\tWp = int(1 + (W-WW)/stride)\n\n\tdx = np.zeros_like(x)\n\n\tfor n in range(N):\n\t\tfor c in range(C):\n\t\t\tfor j in range(Hp):\n\t\t\t\tfor i in range(Wp):\n\t\t\t\t\tind = np.argmax(x[n,c,j*stride:j*stride+HH,i*stride:i*stride+WW])\n\t\t\t\t\tind1, ind2 = np.unravel_index(ind, (HH,WW))\n\t\t\t\t\tdx[n,c,j*stride:j*stride+HH,i*stride:i*stride+WW][ind1, ind2] = dout[n,c,j,i]\n\n\treturn dx", "def max_pool_backward(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max-pooling backward pass #\n ###########################################################################\n (x, pool_param) = cache\n (N, C, H, W) = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n HH = int(1 + (H - pool_height) / stride)\n WW = int(1 + (W - pool_width) / stride)\n\n dx = np.zeros_like(x)\n\n for n in range(N):\n for c in range(C):\n for h in range(HH):\n for w in range(WW):\n h1 = h * stride\n h2 = h1 + pool_height\n w1 = w * stride\n w2 = w1 + pool_width\n block = np.reshape(x[n, c, h1:h2, w1:w2], (pool_height*pool_width))\n mask = np.zeros_like(block)\n mask[np.argmax(block)] = 1\n dx[n,c,h1:h2,w1:w2] += np.reshape(mask,(pool_height,pool_width)) * dout[n,c,h,w]\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "def conv_relu_pool_backward(dout, cache):\n conv_cache, relu_cache, pool_cache = cache\n ds = max_pool_backward_fast(dout, pool_cache)\n da = relu_backward(ds, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def conv_relu_pool_backward(dout, cache):\n conv_cache, relu_cache, pool_cache = cache\n ds = max_pool_backward_fast(dout, pool_cache)\n da = relu_backward(ds, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db", "def max_pool_forward_naive(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max pooling forward pass #\n ###########################################################################\n N,C,H,W = x.shape\n pool_width = pool_param['pool_width']\n pool_height = pool_param['pool_height']\n stride = pool_param['stride']\n #Compute output size.\n out_width = int((W - pool_width) / stride + 1)\n out_height = int((H - pool_height) / stride + 1) \n out = np.zeros((N,C,out_height,out_width))\n #Naive implementation:Loop over each training example and max pool.(Naive===lots of FOR)\n for i in range(N):\n #Counters for output indices.\n a = 0;b = 0\n for t in range(0,H - pool_height + 1,stride):\n for k in range(0,W - pool_width + 1,stride):\n #Get max in each depth.\n for c in range(C):\n out[i,c,a,b] += np.max(x[i,c,t:(t + pool_height),k:(k + pool_width)])\n if (b == out_width - 1):\n a += 1\n b = 0\n else:\n b += 1\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache", "def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)", "def max_pool_forward_naive(x, pool_param):\n\tout = None\n\t\n\tN, C, H, W = x.shape\n\tHH = pool_param['pool_height']\n\tWW = pool_param['pool_width']\n\tstride = pool_param['stride']\n\tHp = int(1 + (H-HH)/stride)\n\tWp = int(1 + (W-WW)/stride)\n\n\tout = np.zeros((N,C,Hp,Wp))\n\n\tfor n in range(N):\n\t\tfor j in range(Hp):\n\t\t\tfor i in range(Wp):\n\t\t\t\tout[n,:,j,i] = np.amax(x[n,:,j*stride:j*stride+HH,i*stride:i*stride+WW], axis=(-1,-2))\n\n\tcache = (x, pool_param)\n\treturn out, cache", "def backward(self, input_train, input_train_label):\n batchSize = len(input_train) #liczba obrazow podawanych na wejscie w trakcie jednej iteracji\n weights = self.Weights\n biases = self.Biases\n delta_W = self.delta_W\n delta_B = self.delta_B\n poolParams = self.poolParams\n dW_list = []\n dB_list = []\n dW4 = np.zeros(weights[4].shape)\n dB4 = np.zeros(biases[4].shape)\n dW3 = np.zeros(weights[3].shape)\n dB3 = np.zeros(biases[3].shape)\n dW2 = np.zeros(weights[2].shape)\n dB2 = np.zeros(biases[2].shape)\n dW1 = np.zeros(weights[1].shape)\n dB1 = np.zeros(biases[1].shape)\n dW0 = np.zeros(weights[0].shape)\n dB0 = np.zeros(biases[0].shape)\n loss = 0\n for image in range(batchSize):\n\n X_data = input_train[image]\n X_label = input_train_label[image]\n output_forward, cache = self.forward(X_data) \n loss += -1*sum(X_label - np.log(output_forward)) #obliczenie wartosci funkcji straty [cross entropy]\n\n #Propagacja wsteczna gradientu\n dy = -1*(X_label - output_forward)/2\n #print(\"X_label = {} \\t layer7 = {} \\t dy = {}\".format(X_label, output_forward, dy))\n\n [dy, dW, dB ] = fullycon_b(cache[6], np.asarray([dy]).transpose() , weights[4])\n dW4 += dW\n dB4 += dB.flatten() #wektoryzacja macierzy\n dy = act.relu_b(dy.transpose(), cache[6])\n\n [dy, dW, dB ] = fullycon_b(cache[5][:,0], dy, weights[3])\n dW3 += dW\n dB3 += dB.flatten()\n dy = act.relu_b(dy.transpose(), cache[5][:,0]) \n \n [dy, dW, dB ] = convolution_b(cache[4], dy, weights[2])\n dW2 += dW\n dB2 += dB.flatten()\n \n dy = maxpool_b(cache[3], dy)\n dy = act.relu_b(dy, cache[3])\n\n [dy, dW, dB ] = convolution_b(cache[2], dy, weights[1])\n dW1 += dW\n dB1 += dB.flatten()\n \n dy = maxpool_b(cache[1], dy)\n dy = act.relu_b(dy, cache[1]) \n\n [dy, dW, dB ] = convolution_b(np.asarray([cache[0]]), dy, weights[0])\n dW0 += dW\n dB0 += dB.flatten()\n\t\t\t\n dW_list.append(dW4)\n dB_list.append(dB4)\n dW_list.append(dW3)\n dB_list.append(dB3)\n dW_list.append(dW2)\n dB_list.append(dB2)\n dW_list.append(dW1)\n dB_list.append(dB1)\n dW_list.append(dW0)\n dB_list.append(dB0)\n dW_list = dW_list[::-1]\n dB_list = dB_list[::-1]\n \n #Aktualizacja parametrow kazdej z warstw (o ile takie posiada)\n #uczenie z metoda momentum: learning rate = const; alpha = const\n for x in range(len(dW_list)):\n delta_W[x] = alpha*delta_W[x] - eta*dW_list[x]/batchSize\n weights[x] += delta_W[x]\n delta_B[x] = alpha*delta_B[x] - eta*dB_list[x]/batchSize\n biases[x] += delta_B[x]\n #przypisanie nowych wag po aktualiacji wszystkich parametrow\n self.Weights = weights\n self.Biases = biases\n\n #zwrocenie stosunku wartosci f-cji straty do rozmiaru batch'u\n return loss/batchSize", "def max_pool_forward_naive(x, pool_param):\n out = None\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache", "def max_pool_forward_naive(x, pool_param):\n N, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n\n Hc = (H - pool_height) / stride + 1\n Wc = (W - pool_width) / stride + 1\n out = np.random.randn(N, C, Hc, Wc)\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n for i in xrange(N):\n for c in xrange(C):\n for hc in xrange(Hc):\n for wc in xrange(Wc):\n out[i, c, hc, wc] = np.max(x[i, c, hc:stride*hc+pool_height, wc:stride*wc+pool_width])\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache", "def max_pool_forward(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n \n N, C, H, W = x.shape\n pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']\n stride = pool_param['stride']\n \n HH = 1 + (H - pool_height) // stride\n WW = 1 + (W - pool_width) // stride\n x_strides = x[0][0].strides\n strides = tuple(np.array(x_strides)*stride)\n \n out = np.zeros((N,C,HH,WW))\n \n for n in range(N):\n for c in range(C):\n out_shape = (HH,WW,pool_height,pool_width)\n pool_blocks = np.lib.stride_tricks.as_strided(x[n][c],out_shape,strides+x_strides)\n out[n][c] = np.max(pool_blocks, axis=(2,3))\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache", "def backpropagation(self):\n\n print \"Backpropagation in pool layer\"\n deltasNext = self.__nextLayer.getDeltas()\n self.deltas = np.zeros(self.inputShape)\n\n\n # for para dar los valores del delta siguiente a los maximos\n idx = 0\n for n in range(self.inputShape[0]):\n for c in range(self.inputShape[1]):\n nh = 0\n for h in range(self.inputShape[2], self.inputShape[2] - self.kernelSize[0] + 1, self.stride[0]):\n nw = 0\n for w in range(self.inputShape[3], self.inputShape[3] - self.kernelSize[1] + 1, self.stride[1]):\n self.deltas[n, c, w + self.maxIdx[idx][0], h + self.maxIdx[idx][1]] = deltasNext[\n n, c,\n nh: nh + self.kernelSize[\n 0],\n nw:nw + self.kernelSize[\n 1]]\n idx += 1\n\n if self.__previousLayer is None:\n return self.deltas\n else:\n return self.__previousLayer.backpropagation()", "def max_pool_forward(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n N, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n H_prime = int(1 + (H - pool_height) / stride)\n W_prime = int(1 + (W - pool_width) / stride) #python 3 / is just float number division\n \n out = np.zeros((N,C,H_prime,W_prime))\n \n for n in range(N):\n for i in range(H_prime):\n for j in range(W_prime):\n h_start = i * stride\n h_end = h_start + pool_height\n w_start = j * stride\n w_end = w_start + pool_width\n pool_window = x[n, :, h_start:h_end, w_start:w_end]\n pool_window = pool_window.reshape((C,-1))\n out[n,:,i,j] = np.max(pool_window, axis=1)\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache", "def pool_backward(dA, A_prev, kernel_shape, stride=(1, 1), mode='max'):\n m = dA.shape[0]\n h_new = dA.shape[1]\n w_new = dA.shape[2]\n c = dA.shape[3]\n h_prev = A_prev.shape[1]\n w_prev = A_prev.shape[2]\n kh = kernel_shape[0]\n kw = kernel_shape[1]\n # image_num = np.arange(m)\n sh = stride[0]\n sw = stride[1]\n func = {'max': np.max, 'avg': np.mean}\n\n dA_prev = np.zeros(shape=A_prev.shape)\n\n if mode in ['max', 'avg']:\n for img_num in range(m):\n for k in range(c):\n for i in range(h_new):\n for j in range(w_new):\n window = A_prev[\n img_num,\n i * sh: i * sh + kh,\n j * sw: j * sw + kw,\n k\n ]\n if mode == 'max':\n # maxpool returns the max\n # derivative of maxpool relative to the max is 1\n # derivative relative to any other element is 0\n # backpropagate 1 to the unit corresponding to max\n # backpropagate 0 for the other units\n # given these comments, define a mask of 1 and 0s\n mask = np.where(window == np.max(window), 1, 0)\n # print(mask)\n elif mode == 'avg':\n # define a mask weighted by the number of\n # elements in the pooling layer (kh * kw)\n mask = np.ones(shape=window.shape)\n mask /= (kh * kw)\n # print(mask)\n dA_prev[\n img_num,\n i * sh: i * sh + kh,\n j * sw: j * sw + kw,\n k\n ] += mask * dA[\n img_num,\n i,\n j,\n k\n ]\n return dA_prev", "def backward(self, loss):\n global_timer.my_timer.start_profile(\"BWD\")\n mgr = PatrickStarManager()\n mgr.set_training_stage(TrainingStage.BWD)\n\n for param_fp16 in self.client.chunk_based_param_fp16:\n param_fp16.ps_attr.bwd_used_cnt = 0\n\n self.optimizer.zero_grad()\n if self.loss_scaler:\n self.loss_scaler.backward(loss)\n else:\n loss.backward()\n mgr.update_margin_mem()\n global_timer.my_timer.finish_profile(\"BWD\")", "def backward_G(self):\n # Calculate regularzation loss to make transformed feature and target image feature in the same latent space\n self.loss_reg_gen = self.loss_reg * self.opt.lambda_regularization\n\n # Calculate l1 loss \n loss_app_gen = self.L1loss(self.img_gen, self.input_P2)\n self.loss_app_gen = loss_app_gen * self.opt.lambda_rec \n \n # parsing loss\n label_P2 = self.label_P2.squeeze(1).long()\n #print(self.input_SPL2.min(), self.input_SPL2.max(), self.parsav.min(), self.parsav.max())\n self.loss_par = self.parLoss(self.parsav,label_P2)# * 20. \n self.loss_par1 = self.L1loss(self.parsav, self.input_SPL2) * 100 \n\n # Calculate GAN loss\n base_function._freeze(self.net_D)\n D_fake = self.net_D(self.img_gen)\n self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g\n\n # Calculate perceptual loss\n loss_content_gen, loss_style_gen = self.Vggloss(self.img_gen, self.input_P2) \n self.loss_style_gen = loss_style_gen*self.opt.lambda_style\n self.loss_content_gen = loss_content_gen*self.opt.lambda_content\n\n total_loss = 0\n\n for name in self.loss_names:\n if name != 'dis_img_gen':\n #print(getattr(self, \"loss_\" + name))\n total_loss += getattr(self, \"loss_\" + name)\n total_loss.backward()", "def backward_pass(architecture,gradient_layerwise,grad_weights,grad_bias):\n \n for layer in range(len(architecture)-1,-1,-1):\n X_input,X_output,weightsi,biasi,X_input_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imxi = architecture['layer{}'.format(layer+1)]\n# print(\"Operation is:{} and Layer is: {}\".format(operationi,layer+1))\n if operationi == 'softmax': # Last layer -> Dont apply softmax in any layer other than the last layer!\n # not taking gradients here because we need dz_dX(secondlastlayer) which is y_pred - y\n continue\n \n if operationi == 'conv_bn_relu' or operationi == 'conv_relu' or operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if operationi__1 == 'softmax':\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # .\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # .\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input_im2col)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input_im2col)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi #\n elif operationi__1 == 'maxpool': # need to do something here to fix the problem\n None\n\n elif 'flatten' in operationi__1:\n # we currently have dz_doutput of flatten -> we want dz_doutput of the conv_bn_relu before flatten\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2] # weights2\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput of flatten\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5] # i\n try:\n dz_dXi = torch.t(weightsi__1).mm(dz_dXi__1)\n except:\n dz_dXi = weightsi__1.mm(dz_dXi__1)\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n\n dz_dXi = torch.reshape(dz_dXi,(output_shapei[1]*output_shapei[2],-1))\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n dz_dweightsi = X_input_im2col.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n dz_dbi = dz_dXi\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)# Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi) # Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi) # Can also set this to layer like in line ~800\n \n else:\n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dX2 -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n \n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n if 'sigmoid' in operationi__1: # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi__1: # ...\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dXi = torch.reshape(dz_dXi,(output_shape_current_layer[1]*output_shape_current_layer[2],-1))\n dz_dbi = torch.reshape(dz_dXi,bias_current_layer.shape)\n dz_dweightsi = X_im2col_current_layer.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n \n if operationi == 'maxpool':\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n \n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n try:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n except:\n Y = torch.t(weightsi__1).mm(dz_dXi__1) # Ensuring valid matrix multiplication here\n \n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n \n if operationi__1 == 'conv_sigmoid' or operationi__1 == 'conv_bn_sigmoid': # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n else:\n dz_dXi[X_output <= 0] = 0\n\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n \n dz_dXinput = torch.zeros((X_input.shape))\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+1)][0] # output = output of maxpool\n\n dz_dXoutput = torch.reshape(dz_dXoutput,(output_shapei[0],X_input_im2col.shape[2]))\n \n for i in range(output_shapei[0]):\n for j in range(X_input_im2col.shape[2]):\n Xi2ci = X_im2col_current_layer[i,:,:]\n idx = torch.argmax(Xi2ci[:,j]).item()\n value = imxi[i][(idx,j)]\n dz_dXinput[value[0],value[1],value[2]] += float(dz_dXoutput[i,j])\n\n# dz_dXinput = torch.reshape(dz_dXinput,output_shapei)\n \n X_prev_im2col = architecture['layer{}'.format(layer)][4]\n X_output_prev = architecture['layer{}'.format(layer)][1]\n X_output_prev = torch.reshape(X_output_prev,dz_dXinput.shape)\n X_input_prev = architecture['layer{}'.format(layer)][0]\n prev_bias = architecture['layer{}'.format(layer)][3]\n output_shape_prev = architecture['layer{}'.format(layer)][6]\n prev_operation = architecture['layer{}'.format(layer)][9]\n \n if prev_operation == 'conv_sigmoid' or prev_operation == 'conv_bn_sigmoid':\n dz_dXinput *= sigmoid(X_output_prev)*(1-sigmoid(X_output_prev)) # Taking the derivative of the sigmoid function\n else:\n dz_dXinput[X_output_prev <= 0] = 0\n \n if len(dz_dXinput.shape) == 3:\n dz_dXinput = torch.reshape(dz_dXinput,(-1,output_shape_prev[0]))\n \n dz_dbi = torch.reshape(dz_dXinput,prev_bias.shape)\n dz_dweightsi = X_prev_im2col.mm(dz_dXinput)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer)][0] = torch.Tensor(dz_dXinput) # ...\n \n if 'flatten_dense' in operationi:\n \n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n \n if operationi__1 == 'softmax':\n \n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n X_output = torch.reshape(X_output,(-1,1))\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if 'sigmoid' in operationi:\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # Can also set this to layer like in line ~800\n \n else:\n # Have to modify and test this before implementation -> Specifically\n # the backprop implementation is not consistent with the ones above\n #\n X_output = torch.reshape(X_output,(-1,1))\n weights__i = architecture['layer{}'.format(layer+2)][2]\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+2)][0]\n dz_dXoutput = torch.reshape(torch.Tensor(dz_dXoutput),X_output.shape)\n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n\n if 'relu' in operationi:\n dz_dXoutput[X_output<0] = 0\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n if 'sigmoid' in operationi:\n dz_dXoutput*= sigmoid(X_output)*(1-sigmoid(X_output))\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n else:\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n \n unflattened_Xinput = architecture['layer{}'.format(layer+1)][0]\n dz_dXinput = torch.reshape(dz_dXinput,unflattened_Xinput.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXinput)\n \n if gradient_layerwise['layer{}'.format(layer+1)][1] is not None:\n try:\n grad_weights['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][1]\n except:\n grad_weights['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][1])\n if gradient_layerwise['layer{}'.format(layer+1)][2] is not None:\n try:\n grad_bias['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][2]\n except:\n grad_bias['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][2])\n \n gc.collect()\n return", "def backward(self, dout):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)-1,-1,-1):\n act_dout = self.activations[l].backward(dout)\n dout = self.layers[l].backward(act_dout)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return", "def max_pool_forward(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n (N, C, H, W) = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n HH = int(1 + (H - pool_height) / stride)\n WW = int(1 + (W - pool_width) / stride)\n\n out = np.zeros((N, C, HH, WW))\n\n for n in range(N):\n for h in range(HH):\n for w in range(WW):\n h1 = h * stride\n h2 = h1 + pool_height\n w1 = w * stride\n w2 = w1 + pool_width\n block = x[n, :, h1:h2, w1:w2]\n out[n,:,h,w] = np.max(block.reshape((C, pool_height*pool_width)), axis=1)\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache", "def conv_relu_pool_forward_naive(x, w, b, conv_param, pool_param):\n\ta, conv_cache = conv_forward_naive(x, w, b, conv_param)\n\ts, relu_cache = relu_forward(a)\n\tout, pool_cache = max_pool_forward_naive(s, pool_param)\n\tcache = (conv_cache, relu_cache, pool_cache)\n\treturn out, cache", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n\n # pass conv_param to the forward pass for the convolutional layer\n # Padding and stride chosen to preserve the input spatial size\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n h1, c1 = conv_forward_im2col(X, W1, b1, conv_param) #\n h1, r1 = relu_forward(h1)\n h1, p1 = max_pool_forward_fast(h1, pool_param) #\n max_pool_shape = h1.shape\n h1 = h1.reshape(X.shape[0], -1)\n h2, c2 = affine_relu_forward(h1, W2, b2)\n scores, c3 = affine_forward(h2, W3, b3)\n\n if y is None:\n return scores\n\n loss, dx = softmax_loss(scores, y)\n\n loss += self.reg / 2 * (self.params['W1']**2).sum()\n loss += self.reg / 2 * (self.params['W2']**2).sum()\n loss += self.reg / 2 * (self.params['W3']**2).sum()\n\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n grads = {}\n dx, grads['W3'], grads['b3'] = affine_backward(dx, c3)\n grads['W3'] += self.reg * self.params['W3']\n dx, grads['W2'], grads['b2'] = affine_relu_backward(dx, c2)\n dx = dx.reshape(max_pool_shape)\n dx = max_pool_backward_fast(dx, p1)\n dx = relu_backward(dx, r1)\n dx, grads['W1'], grads['b1'] = conv_backward_im2col(dx, c1)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def _poputil_recompute_backward(op, grads):\n return grads", "def loss(self, X, y=None):\n W1 = self.params['W1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n X, cache_conv = conv_forward(X, W1)\n X, x_relu1 = relu_forward(X)\n X, cache_maxpool = max_pool_forward(X, pool_param)\n N1,C1,H1,W1 = X.shape\n X = X.reshape(N1, C1 * H1 * W1)\n X, cache_fc2 = fc_forward(X, W2, b2)\n X, x_relu2 = relu_forward(X)\n X, cache_fc3 = fc_forward(X, W3, b3)\n scores = X\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. #\n ############################################################################\n loss, dx = softmax_loss(X, y)\n dx, dw, db = fc_backward(dx, cache_fc3)\n grads['W3'] = dw\n grads['b3'] = db\n dx = relu_backward(dx, x_relu2)\n dx, dw, db = fc_backward(dx, cache_fc2)\n grads['W2'] = dw\n grads['b2'] = db\n xx, Ind, pp = cache_maxpool\n N2,C2,H2,W2 = xx.shape\n H2 = int(H2/2)\n W2 = int(W2/2)\n dx = dx.reshape(N2,C2,H2,W2)\n dx = max_pool_backward(dx, cache_maxpool)\n dx = relu_backward(dx, x_relu1)\n dx, dw = conv_backward(dx, cache_conv)\n grads['W1'] = dw\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def _poputil_block_recompute_backward(op, grads):\n return grads", "def backward_pass(self, loss):\n\n self.optimizer.zero_grad()\n self.optimizer.backward(loss)\n self.optimizer.step()", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n cnn_out, cnn_cache = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n hidden_out, hidden_cache = affine_relu_forward(cnn_out, W2, b2)\n scores, scores_cache = affine_forward(hidden_out, W3, b3)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n\n # Compute loss and gradients\n loss, dscores = softmax_loss(scores, y)\n dhidden, grads['W3'], grads['b3'] = affine_backward(dscores, scores_cache)\n dcnn, grads['W2'], grads['b2'] = affine_relu_backward(dhidden, hidden_cache)\n dX, grads['W1'], grads['b1'] = conv_relu_pool_backward(dcnn, cnn_cache)\n\n # Regularization\n loss = loss + 0.5*self.reg*np.sum(self.params['W3']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W2']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W1']**2)\n grads['W3'] = grads['W3'] + self.reg * self.params['W3']\n grads['W2'] = grads['W2'] + self.reg * self.params['W2']\n grads['W1'] = grads['W1'] + self.reg * self.params['W1']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def backprop(nn, y):\n LAST = len(nn) - 1\n\n # last layer\n nn[LAST].dCdz = np.multiply(2.0 * (nn[LAST].a - y), AF_PRIME(nn[LAST].z))\n nn[LAST].dCdw = (np.dot(nn[LAST].dCdz, nn[LAST].input_value.T))\n nn[LAST].dCdw_sum = \\\n np.add(nn[LAST].dCdw, nn[LAST].dCdw_sum)\n nn[LAST].w -= nn[LAST].dCdw * LEARNING_RATE\n\n # other layer\n for n in range(1, len(nn)):\n dz1dz2 = \\\n np.dot(nn[LAST - n + 1].w.T, nn[LAST - n + 1].dCdz)\n nn[LAST - n].dCdz = \\\n np.multiply(AF_PRIME(nn[LAST - n].z), dz1dz2)\n nn[LAST - n].dCdw = \\\n (np.dot(nn[LAST - n].dCdz, nn[LAST - n].input_value.T))\n nn[LAST - n].dCdw_sum = \\\n np.add(nn[LAST - n].dCdw, nn[LAST - n].dCdw_sum)\n nn[LAST - n].w -= nn[LAST - n].dCdw * LEARNING_RATE", "def backward(self, *output_grads):\n raise NotImplementedError", "def _AffBatchReluDrop_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-Batchnorm-relu-drop} X (L-1)]\n dhid = dropout_backward(dhid, cache[i]['drop'])\n dhid = relu_backward(dhid, cache[i]['relu'])\n dhid, grads['gamma{0}'.format(i)], grads['beta{0}'.format(i)] = \\\n batchnorm_backward_alt(dhid, cache[i]['batchnorm'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_backward(dhid, cache[i]['affine']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def _AffLayerReluDrop_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-Batchnorm-relu} X (L-1)]\n dhid = dropout_backward(dhid, cache[i]['drop'])\n dhid = relu_backward(dhid, cache[i]['relu'])\n dhid, grads['gamma{0}'.format(i)], grads['beta{0}'.format(i)] = \\\n layernorm_backward(dhid, cache[i]['layernorm'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_backward(dhid, cache[i]['affine']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def pool_backward(dA, A_prev, kernel_shape, stride=(1, 1), mode='max'):\n sh, sw = stride\n kh, kw = kernel_shape\n m, h_prev, w_prev, c_prev = A_prev.shape\n dm, h_new, w_new, c_new = dA.shape\n dA_prev = np.zeros(A_prev.shape)\n for i in range(m):\n for j in range(h_new):\n for k in range(w_new):\n jsh = j * sh\n ksw = k * sw\n for ll in range(c_new):\n pool = A_prev[i, jsh: jsh + kh, ksw: ksw + kw, ll]\n if mode == 'max':\n maxp = np.amax(pool)\n mask = np.zeros(kernel_shape)\n np.place(mask, pool == maxp, 1)\n dA_prev[i, jsh: jsh + kh, ksw: ksw + kw, ll] += \\\n mask * dA[i, j, k, ll]\n else:\n mask = np.ones(kernel_shape)\n dA_prev[i, jsh: jsh + kh, ksw: ksw + kw, ll] += \\\n mask * dA[i, j, k, ll] / kh / kw\n return dA_prev", "def _poputil_remap_deduce_layer_backward(op, grads):\n return grads", "def loss(self, X, y=None):\n\n # In dev testing, the loss fnc stops at \"scores\" , unfollowed by \"softmax\" probability prediction.\n # In real testing, \"self.predict()\" needs to be implemented in Solver() class.\n \n if y is None:\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = \"test\"\n\n\n W1, b1 = self.params['W1'], self.params['b1']\n gamma1, beta1 = self.params[\"sbnGamma1\"], self.params[\"sbnBeta1\"]\n bn_param1 = self.bn_params[0]\n\n W2, b2 = self.params['W2'], self.params['b2']\n gamma2, beta2 = self.params[\"sbnGamma2\"], self.params[\"sbnBeta2\"]\n bn_param2 = self.bn_params[1]\n\n W3, b3 = self.params['W3'], self.params['b3']\n gamma3, beta3 = self.params[\"bnGamma3\"], self.params[\"bnBeta3\"]\n bn_param3 = self.bn_params[2]\n\n W4, b4 = self.params['W4'], self.params['b4']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param = self.conv_param\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = self.maxpool_params\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_sbn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): return out, cache;\n out, cache[\"layer1\"] = layer_utils.conv_sbn_relu_forward(X, W1, b1, gamma1, beta1, conv_param, bn_param1) \n out, cache[\"layer2\"] = layer_utils.conv_sbn_relu_forward(out, W2, b2, gamma2, beta2, conv_param, bn_param2)\n\n # def max_pool_forward_fast(x, pool_param): return out, cache;\n out, cache[\"maxpool\"] = fast_layers.max_pool_forward_fast(out, pool_param)\n\n # def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): return out, cache;\n \n out, cache[\"layer3\"] = layer_utils.affine_bn_relu_forward(out, W3, b3, gamma3, beta3, bn_param3)\n\n # def affine_forward(x, w, b): return out, cache;\n scores, cache[\"layer4\"] = layers.affine_forward(out, W4, b4)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) + np.sum(W4 * W4))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW4, db4 = layers.affine_backward(dscores, cache[\"layer4\"]) \n\n # def affine_bn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW3, db3, dgamma3, dbeta3 = layer_utils.affine_bn_relu_backward(dout, cache[\"layer3\"])\n\n # print cache[\"layer3\"]\n\n # def max_pool_backward_fast(dout, cache): return max_pool_backward_im2col(dout, real_cache);\n # def max_pool_backward_im2col(dout, cache): return dx;\n dout = fast_layers.max_pool_backward_fast(dout, cache[\"maxpool\"])\n\n # def conv_sbn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW2, db2, dgamma2, dbeta2 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer2\"])\n _, dW1, db1, dgamma1, dbeta1 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer1\"])\n\n # reg\n grads['W4'], grads['b4'] = dW4 + self.reg * W4, db4\n \n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads[\"bnGamma3\"], grads[\"bnBeta3\"] = dgamma3, dbeta3\n\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads[\"sbnGamma2\"], grads[\"sbnBeta2\"] = dgamma2, dbeta2\n\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n grads[\"sbnGamma1\"], grads[\"sbnBeta1\"] = dgamma1, dbeta1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def conv_backward_naive(dout, cache):\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the convolutional backward pass. #\n ###########################################################################\n #Extract variables from cache.\n x,w,b,conv_param = cache\n stride = conv_param['stride']\n pad = conv_param['pad']\n #Extract shapes(lots of dimensions can become buggy)\n N,F,out_height,out_width = dout.shape\n #Save filter dimensions.\n HH,WW = w.shape[2],w.shape[3]\n #Start by computing gradient of the bias.(always the simplest one)\n db = np.sum(np.sum(np.sum(dout,axis = 3),axis = 2),axis = 0)\n dw = np.zeros_like(w)\n dx = np.zeros_like(x)\n #Start computing gradient of w and x.(Naive implementation)\n #Go over each filter in w.\n for i in range(F):\n #Go over each training example.\n for j in range(N):\n curr_x = x[j,:,:,:]\n #Get current gradient of activation map for j filter on i training example.\n curr_dout = dout[j,i,:,:]\n a = 0;b = 0\n #print(\"HERE\",curr_x.shape)\n #print(\"Stride:\",stride)\n for t in range(0,curr_x.shape[1] - WW + 1,stride):\n for k in range(0,curr_x.shape[2] - HH + 1,stride):\n #print(\"t: %d k: %d WW:%d HH:%d \" % (t,k,WW,HH))\n dw[i,:,:,:] += curr_dout[a,b] * curr_x[:,t:(t + WW),k:(k + HH)]\n dx[j,:,t:(t + WW),k:(k + HH)] += curr_dout[a,b] * w[i,:,:,:]\n if(b == dout.shape[3] - 1):\n a += 1\n b = 0\n else:\n b += 1\n #Remove padding.\n dx = dx[:,:,pad : (dx.shape[2] - pad),pad: (dx.shape[3] - pad)] \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.dropout_param is not None:\n self.dropout_param['mode'] = mode \n if self.use_batchnorm:\n for bn_param in self.bn_params:\n bn_param[mode] = mode\n\n scores = None\n \n ### forward pass ###\n L = self.num_layers\n past_caches = [0 for i in range(L)]\n \n if self.use_dropout:\n dropout_caches = [0 for i in range(L)]\n \n out = X\n if self.use_batchnorm:\n for i in range(L-1):\n\n out, past_caches[i] = affine_batch_relu_forward(out, self.params['W' + str(i+1)],\n self.params['b' + str(i+1)], \n self.params['gamma' + str(i+1)],\n self.params['beta' + str(i+1)],\n self.bn_params[i])\n if self.use_dropout:\n out, dropout_caches[i] = dropout_forward(out, self.dropout_param)\n else:\n for i in range(L-1):\n\n out, past_caches[i] = affine_relu_forward(out, self.params['W' + str(i+1)],\n self.params['b' + str(i+1)])\n if self.use_dropout:\n out, dropout_caches[i] = dropout_forward(out, self.dropout_param)\n \n scores, past_caches[L-1] = affine_forward(out, self.params['W' + str(L)],\n self.params['b' + str(L)])\n \n ### backpropagation ###\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n \n loss_l2 = 0\n \n loss, dx = softmax_loss(scores, y)\n for i in range(L-1): \n W = 'W' + str(i+1)\n loss_l2 += np.sum(self.params[W]*self.params[W])\n loss_l2 *= 0.5 * self.reg\n loss += loss_l2\n \n W_final = 'W'+str(L)\n b_final = 'b'+str(L)\n dx, grads[W_final], grads[b_final] = affine_backward(dx, past_caches[L-1])\n grads[W_final] += self.reg * self.params[W_final]\n \n if self.use_batchnorm:\n for i in range(L-1):\n ind = L-1-i\n W = 'W'+str(ind)\n b = 'b'+str(ind)\n gamma = 'gamma' + str(ind)\n beta = 'beta' + str(ind)\n \n if self.use_dropout:\n dx = dropout_backward(dx, dropout_caches[-i-2])\n\n dx, grads[W], grads[b], grads[gamma], grads[beta] = affine_batch_relu_backward(dx, past_caches[-i-2])\n grads[W] += self.reg * self.params[W]\n\n else:\n for i in range(L-1):\n ind = L-1-i\n W = 'W'+str(ind)\n b = 'b'+str(ind)\n \n if self.use_dropout:\n dx = dropout_backward(dx, dropout_caches[-i-2])\n\n dx, grads[W], grads[b] = affine_relu_backward(dx, past_caches[-i-2])\n grads[W] += self.reg * self.params[W]\n\n return loss, grads", "def _pool_step(\n X,\n pool_size, #TODO(mmd): Better name\n pooler = tf.nn.max_pool,\n):\n # TODO(mmd): Why all the expansion squeezing necessary?\n x = tf.expand_dims(x, 3) # num_samples x num_features x num_filters_in x 1\n x = pooler(x, ksize=[1,pool_size,1,1], strides=[1,pool_size,1,1], padding='SAME')\n #tf.maximum\n return tf.squeeze(x, [3]) # num_samples x num_features / p x num_filters", "def conv_relu_pool_forward(x, w, b, conv_param, pool_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n s, relu_cache = relu_forward(a)\n out, pool_cache = max_pool_forward_fast(s, pool_param)\n cache = (conv_cache, relu_cache, pool_cache)\n return out, cache", "def conv_relu_pool_forward(x, w, b, conv_param, pool_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n s, relu_cache = relu_forward(a)\n out, pool_cache = max_pool_forward_fast(s, pool_param)\n cache = (conv_cache, relu_cache, pool_cache)\n return out, cache", "def backward_pass(self, grad):\n pass", "def backward_and_step(self, loss):\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_relu_pool_forward(x, w, b, conv_param, pool_param): return out, cache;\n out, cache['layer1'] = layer_utils.conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) \n # def affine_relu_forward(x, w, b): return out, cache;\n out, cache['layer2'] = layer_utils.affine_relu_forward(out, W2, b2)\n # def affine_forward(x, w, b): return out, cache;\n scores, cache['layer3'] = layers.affine_forward(out, W3, b3)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW3, db3 = layers.affine_backward(dscores, cache['layer3']) \n # def affine_relu_backward(dout, cache): return dx, dw, db;\n dout, dW2, db2 = layer_utils.affine_relu_backward(dout, cache['layer2'])\n # def conv_relu_pool_backward(dout, cache): return dx, dw, db;\n dout, dW1, db1 = layer_utils.conv_relu_pool_backward(dout, cache['layer1'])\n\n # reg\n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def _pool(prev_layer, layer_name):\n with tf.name_scope(layer_name):\n return tf.nn.max_pool(prev_layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')", "def L_model_backward(AL, Y, caches):\n pass", "def backward(self, out_grad, input):\n raise NotImplementedError", "def _AffReluDrop_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n thiscache = cache[i]\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-relu-drop} X (L-1)]\n dhid = dropout_backward(dhid, thiscache['drop'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_relu_backward(dhid, thiscache['affine_relu']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def backward_deconvnet_relu(x):\n def grad(dy):\n return tf.nn.relu(dy)\n return tf.nn.relu(x), grad", "def conv_relu_backward_naive(dout, cache):\n\tconv_cache, relu_cache = cache\n\tda = relu_backward(dout, relu_cache)\n\tdx, dw, db = conv_backward_naive(da, conv_cache)\n\treturn dx, dw, db", "def prop_max_pool(self, activation, relevance, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1]):\n act = tf.expand_dims(activation, 3) # N x M x F x 1\n z = tf.nn.max_pool(act, ksize, strides, padding='SAME') + self.epsilon\n with self.model.graph.as_default():\n rel = tf.expand_dims(relevance, 3)\n s = rel / z\n c = gen_nn_ops.max_pool_grad_v2(act, z, s, ksize, strides, padding='SAME')\n tmp = c * act\n return tf.squeeze(tmp, [3])", "def conv_backward_naive(dout, cache):\n dx, dw, db = None, None, None\n #############################################################################\n # TODO: Implement the convolutional backward pass. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx, dw, db", "def pooling_layer_backprop(self, bwd_in, fwd_in):\n img_w, img_h, img_c = fwd_in.shape\n output = np.zeros(fwd_in.shape)\n\n for c in range(img_c):\n in_x = out_x = 0\n while in_x + self._kernel_size <= img_w:\n in_y = out_y = 0\n while in_y + self._kernel_size <= img_h:\n current = fwd_in[in_x:in_x+self._kernel_size, in_y:in_y+self._kernel_size, c]\n (x, y) = np.unravel_index(np.nanargmax(current), current.shape)\n output[in_x+x, in_y+y, c] = bwd_in[out_x, out_y, c]\n in_y += self._kernel_size\n out_y += 1\n in_x += self._kernel_size\n out_x += 1\n return output", "def _poputil_remap_layer_backward(op, grads):\n return grads", "def backward(self, grad_output):\n raise NotImplementedError", "def relu_forward(x):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n out=np.maximum(0,x)\n cache=x\n return out,cache", "def backward_D(self):\n base_function._unfreeze(self.net_D)\n #print(self.input_P2.shape, self.img_gen.shape)\n self.loss_dis_img_gen = self.backward_D_basic(self.net_D, self.input_P2, self.img_gen)", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n activation = []\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n ### Forward propagation\n activation.append(data)\n\n # Hidden layer inputs: (N, Dx) * (Dx, H) -> N x H\n z = np.dot(activation[-1], W1) + b1 \n # Activations, inputs to the final layer. \n activation.append(sigmoid(z)) # output of the hidden layer, activation\n # Final layer outputs: ( N x H ) * ( H, Dy) -> (N, Dy)\n z = np.dot(activation[-1], W2) + b2\n activation.append( softmax(z) )\n\n # Cross-entropy cost\n\n y_p = activation[-1]\n activation = activation[:-1] # remove activation data (output)\n\n cost = -np.sum(labels * np.log(y_p))\n \n error = []\n \n ### backward propagation\n sigma = (y_p - labels)\n error.append(sigma)\n\n gradb2 = np.sum(error[-1], axis=0)\n gradW2 = np.dot(activation[-1].T, error[-1])\n\n #\n sigma = np.dot(W2, error[-1].T)\n sigma = sigma.T * sigmoid_grad(activation[-1])\n activation = activation[:-1] # remove activation data ( hidden layer )\n\n error.append(sigma)\n\n gradb1 = np.sum(error[-1], axis=0)\n gradW1 = np.dot(activation[-1].T, error[-1])\n\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), \n gradW2.flatten(), gradb2.flatten()))\n \n return cost, grad", "def backward(self, inputs, grad_loss_input):\n raise NotImplementedError", "def backward_G(self):\n mask = self.mask*0.5 + 0.5\n\n self.loss_G_SH = self.criterionS(self.pr_SH*mask, self.gt_SH*mask) * self.opt.lambda_S\n self.loss_G = self.loss_G_SH\n\n if not self.opt.no_brightness:\n self.loss_G_BA = self.criterionBA(self.pr_BA*mask, self.gt_BA*mask) * self.opt.lambda_BA\n self.loss_G_BC = 0\n for i in range(25):\n gt_BC = self.gt_BC[i][:, :2]\n bc_num = int(self.gt_BC[i][0, 3].item())\n pr_BC = self.pr_BC[i]\n loss_G_BC = util.min_loss_BC_NoBatch(pr_BC, gt_BC, bc_num, self.criterionBC)\n loss_G_BC = loss_G_BC * self.opt.lambda_BC / 25.0\n self.loss_G_BC += loss_G_BC\n\n loss_B = self.loss_G_BA + self.loss_G_BC\n self.loss_G += loss_B\n\n # Third, LTM Regularization\n if self.opt.reg_LTM:\n ltm_mean = torch.mean(self.ltm, dim=0, keepdim=True) # [1, 75, 256, 256]\n ltm_mean = ltm_mean.expand(self.ltm.size(0), ltm_mean.size(1), ltm_mean.size(2), ltm_mean.size(3)) # [25, 75, 256, 256]\n self.loss_LTMReg = self.criterionReg(self.ltm, ltm_mean) * self.opt.lambda_regLTM\n self.loss_G += self.loss_LTMReg\n\n\n self.loss_G.backward()", "def loss(self, X, y=None, justLoss=False):\n # N = X.shape[0]\n # mode = 'test' if y is None else 'train'\n scores = None\n\n W1, b1 = self.params['W1'], self.params['b1']\n # W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n\n conv_param = {'stride': 1, 'pad': 0}\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n #######################################################################\n # TODO: Implement the forward pass for the convolutional neural net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n #######################################################################\n\n conv1, conv_cache = conv_forward(X, W1, b1, conv_param)\n relu1, relu_cache1 = relu_forward(conv1)\n\n # conv2, conv_cache2 = conv_forward(relu1, W2, b2, conv_param)\n # relu2, relu_cache2 = relu_forward(conv2)\n\n scores, maxpool_cache = max_pool_forward(relu1, pool_param)\n scores, forward_cache = fc_forward(scores, W3, b3)\n \n\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n #######################################################################\n # TODO: Implement the backward pass for the convolutional neural net, #\n # storing the loss and gradients in the loss and grads variables. #\n # Compute data loss using softmax, and make sure that grads[k] holds #\n # the gradients for self.params[k]. #\n loss, dscores = softmax_loss(scores, y)\n\n if justLoss:\n return loss\n # print(loss)\n\n\n dx_3, grads['W3'], grads['b3'] = fc_backward(dscores, forward_cache)\n dx_3 = max_pool_backward(dx_3, maxpool_cache)\n\n # dx_2 = relu_backward(dx_3, relu_cache2)\n # dx_2, grads['W2'], grads['b2'] = conv_backward(dx_3, conv_cache2)\n\n dx = relu_backward(dx_3, relu_cache1)\n dx, grads['W1'], grads['b1'] = conv_backward(dx, conv_cache)\n \n \n\n return loss, grads", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n scores = None\n ############################################################################\n # Implementing the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n ############################################################################\n\n l_input = X.copy()\n out = []\n cache = []\n for i in range(self.num_layers - 1):\n # layerwise compute the forward pass and store outputs in out list\n key = ['W' + str(i+1), 'b' + str(i+1)]\n lout, lcache = affine_sigmoid_forward(l_input, self.params[key[0]], self.params[key[1]])\n out.append(lout)\n cache.append(lcache)\n l_input = lout\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n scores, lcache = affine_forward(out[self.num_layers - 2], self.params[key[0]], self.params[key[1]])\n cache.append(lcache)\n \n # regularization parameter compute by summing square of all weight vectors\n R = 0\n for i in range(1, self.num_layers + 1):\n key = 'W' + str(i)\n R += np.sum(np.power(self.params[key], 2))\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n\n ########################\n # Backward pass to compute the loss and gradients\n ########################\n\n loss, dscore = softmax_loss(scores, y)\n # Apply regularization of the loss \n loss = loss + 0.5 * self.reg * R\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n dx, grads[key[0]], grads[key[1]] = affine_backward(dscore, cache[self.num_layers - 1])\n grads[key[0]] += self.reg * self.params[key[0]] \n\n for i in range(self.num_layers - 1, 0, -1):\n key = ['W' + str(i), 'b' + str(i)]\n dx, grads[key[0]], grads[key[1]] = affine_sigmoid_backward(dx, cache[i-1])\n # Apply regularization to the gradients\n grads[key[0]] += self.reg * self.params[key[0]]\n\n return loss, grads", "def relu_forward(x):\r\n cache = x\r\n out = np.maximum(0, x)\r\n return out, cache", "def L_model_backward(AL, Y, caches):\n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n\n dAL = -(np.divide(Y,AL)-np.divide(1-Y,1-AL))\n \"\"\"\n cache = caches[-1]\n grads[\"dA\"+str(L)],grads[\"dW\"+str(L)],grads[\"db\"+str(L)] = linear_activation_backward(dAL,cache,activation = 'sigmoid')\n\n for i in reversed(range(L-1)):\n grads[\"dA\"+str(i+1)],grads[\"dW\"+str(i+1)],grads[\"db\"+str(i+1)] = linear_activation_backward(grads[\"dA\"+str(i+2)],caches[i],activation = 'relu')\n \"\"\"\n\n current_cache = caches[-1]\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_backward(sigmoid_backward(dAL, current_cache[1]),current_cache[0])\n\n for l in reversed(range(L - 1)):\n # lth layer: (RELU -> LINEAR) gradients.\n # Inputs: \"grads[\"dA\" + str(l + 2)], caches\". Outputs: \"grads[\"dA\" + str(l + 1)] , grads[\"dW\" + str(l + 1)] , grads[\"db\" + str(l + 1)]\n ### START CODE HERE ### (approx. 5 lines)\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_backward(sigmoid_backward(dAL, current_cache[1]), current_cache[0])\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n ### END CODE HERE ###\n\n return grads", "def on_iter_backward(self, runner):\n runner.optimizer.zero_grad()\n runner.loss.backward()\n runner.optimizer.step()", "def _AffLayerRelu_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-Batchnorm-relu} X (L-1)]\n dhid = relu_backward(dhid, cache[i]['relu'])\n dhid, grads['gamma{0}'.format(i)], grads['beta{0}'.format(i)] = \\\n layernorm_backward(dhid, cache[i]['layernorm'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_backward(dhid, cache[i]['affine']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def pool_backward(dA, A_prev, kernel_shape, stride=(1, 1), mode='max'):\n m, h_new, w_new, c_new = dA.shape\n m, h_prev, w_prev, c_prev = A_prev.shape\n kh, kw = kernel_shape\n sh, sw = stride\n\n dA_prev = np.zeros_like(A_prev, dtype=dA.dtype)\n\n for z in range(m):\n for y in range(h_new):\n for x in range(w_new):\n for k in range(c_new):\n images = A_prev[z, y * sh:(kh+y*sh), x * sw:(kw+x*sw), k]\n tmp_dA = dA[z, y, x, k]\n if mode == 'max':\n z_mask = np.zeros(kernel_shape)\n v_max = np.max(images)\n np.place(z_mask, images == v_max, 1)\n dA_prev[z, y * sh:(kh + y * sh),\n x * sw:(kw+x*sw), k] += z_mask * tmp_dA\n\n elif mode == 'avg':\n avg = tmp_dA / kh / kw\n dA_prev[z, y * sh:(kh + y * sh),\n x * sw:(kw+x*sw),\n k] += np.ones(kernel_shape) * avg\n return dA_prev", "def backward_G(self):\n self.loss_G.backward()", "def loss(self, X, y=None):\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\t\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size = W1.shape[2]\n\t\tconv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\t\tscores = None\n\t\t############################################################################\n\t\t# TODO: Implement the forward pass for the three-layer convolutional net, #\n\t\t# computing the class scores for X and storing them in the scores\t\t\t\t\t #\n\t\t# variable.\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tz1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n\t\tz2, cache2 = affine_relu_forward(z1, W2, b2)\n\t\ty3, cache3 = affine_forward(z2, W3, b3)\n\t\tscores = y3\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\t############################################################################\n\t\t# TODO: Implement the backward pass for the three-layer convolutional net, #\n\t\t# storing the loss and gradients in the loss and grads variables. Compute #\n\t\t# data loss using softmax, and make sure that grads[k] holds the gradients #\n\t\t# for self.params[k]. Don't forget to add L2 regularization!\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W3'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W1'], 2).sum())\n\n\t\tdx3, grads['W3'], grads['b3'] = affine_backward(dout, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = affine_relu_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_pool_backward(dx2, cache1)\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\treturn loss, grads", "def relu_forward(x):\n out = None\n out = np.maximum(0.0, x)\n cache = x\n return out, cache", "def _AffBatchRelu_Backprop(self, dscores, cache):\n grads = {}\n loss = None\n #Last Softmax Layer\n ##Add L2 Regularization loss\n loss = 0.5 * self.reg * np.sum(self.params['W{0}'.format(self.num_layers)]**2)\n ##Calculate grads for last Affine\n dhid, grads['W{0}'.format(self.num_layers)], grads['b{0}'.format(self.num_layers)] =\\\n affine_backward(dscores, cache[-1])\n grads['W{0}'.format(self.num_layers)] += self.reg * self.params['W{0}'.format(self.num_layers)]\n\n for i in range(self.num_layers-1, 0, -1): #hidden layers\n ##L2 Reg. loss\n loss += 0.5 * self.reg * np.sum(self.params['W{0}'.format(i)]**2)\n ##Calculate grads for [{affine-Batchnorm-relu} X (L-1)]\n dhid = relu_backward(dhid, cache[i]['relu'])\n dhid, grads['gamma{0}'.format(i)], grads['beta{0}'.format(i)] = \\\n batchnorm_backward_alt(dhid, cache[i]['batchnorm'])\n dhid, grads['W{0}'.format(i)], grads['b{0}'.format(i)] = \\\n affine_backward(dhid, cache[i]['affine']) \n grads['W{0}'.format(i)] += self.reg * self.params['W{0}'.format(i)]\n\n return grads, loss", "def back_propagation(self, y_output: np.ndarray) -> None:\n next_layer = NullLayer()\n for layer in reversed(self.layers):\n layer.propagate(y_output, next_layer.delta, next_layer.w)\n next_layer = layer", "def train(input, label, conv, maxpool, softmax, lr=0.005):\n # Forward\n output, loss, accuracy = forward(input, label, conv, maxpool, softmax)\n\n gradient = np.zeros(10)\n gradient[label] = -1 / output[label]\n\n # Backprop\n gradient = softmax.backprop(gradient, lr)\n gradient = maxpool.backprop(gradient)\n gradient = conv.backprop(gradient, lr)\n\n return loss, accuracy", "def conv_bn_relu_backward(dout, cache):\n conv_cache, sbn_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dan, dgamma, dbeta = spatial_batchnorm_backward(da, sbn_cache)\n dx, dw, db = conv_backward_fast(dan, conv_cache)\n return dx, dw, db, dgamma, dbeta", "def backward(last_layer: str) -> Callable:\n\n def closure() -> Tuple[Optional[torch.Tensor], torch.Tensor]:\n optimizer.zero_grad()\n output = model(data)\n if last_layer == \"output\":\n output.backward(torch.ones_like(target))\n return None, output\n elif last_layer == 'loss':\n loss = compute_loss(output - target)\n loss.backward()\n return loss, output\n else:\n assert False, 'last layer must be \"output\" or \"loss\"'\n\n return closure", "def backward(last_layer: str) -> Callable:\n\n def closure() -> Tuple[Optional[torch.Tensor], torch.Tensor]:\n optimizer.zero_grad()\n output = model(data)\n if last_layer == \"output\":\n output.backward(torch.ones_like(target))\n return None, output\n elif last_layer == 'loss':\n loss = compute_loss(output - target)\n loss.backward()\n return loss, output\n else:\n assert False, 'last layer must be \"output\" or \"loss\"'\n\n return closure", "def _backward(loss):\n\n loss.backward()", "def backward(\n self, X: np.ndarray, y: np.ndarray, lr: float, reg: float = 0.0\n ) -> float:\n y_hat = self.forward(X)\n\n y_one_hot = self.one_hot_encode(y)\n loss = CrossEntropy.forward(y_one_hot, y_hat)\n\n d_layer = CrossEntropy.backward(y, y_hat)\n\n w_grads = []\n b_grads = []\n\n for idx, layer in reversed(list(enumerate(self.layers))):\n # Not output layer\n if (idx + 1) < len(self.layers):\n next_layer = self.layers[idx + 1]\n\n d_layer = d_layer.dot(next_layer.w.T)\n d_layer = layer.activation_func.backward(d_layer, layer.activated_out)\n\n d_w = layer.linear_in.T.dot(d_layer) + 2 * reg * layer.w\n d_b = np.sum(d_layer, axis=0)\n\n w_grads.insert(0, d_w)\n b_grads.insert(0, d_b)\n\n self.optimizer.step(self.layers, w_grads, b_grads, lr)\n\n if self.norm_weights:\n w_norm = max(np.linalg.norm(l.w) for l in self.layers) / len(self.layers)\n b_norm = max(np.linalg.norm(l.w) for l in self.layers) / len(self.layers)\n for layer in self.layers:\n layer.w /= w_norm\n layer.b /= b_norm\n\n return loss", "def max_pool(bottom, ksize, strides, name):\n with tf.variable_scope(name):\n pool = tf.nn.max_pool(bottom, ksize=ksize, strides=strides, padding='SAME')\n print_activation(pool)\n\n # visitable pool layer\n variable_summaries(pool, name + '/pool')\n return pool", "def _max_pool(self, bottom, name='max_pool'):\n return tf.nn.max_pool(\n bottom,\n ksize=[1, 3, 1, 1],\n strides=[1, 3, 1, 1],\n padding='SAME', name=name)", "def rnn_backward(dh, cache):\n dx, dh_prev, dWx, dWh, db = None, None, None, None, None\n ##############################################################################\n # TODO: Implement the backward pass for a vanilla RNN running an entire #\n # sequence of data. You should use the rnn_step_backward function that you #\n # defined above. #\n ##############################################################################\n \"\"\"\n x, next_h, prev_h, Wx, Wh, b = cache\n dz = (1-next_h*next_h)*dnext_h\n # THIS ERROR IS SPREAD AMONG THE\n # np.dot(x, Wx) + np.dot(prev_h, Wh) + b)\n dx = np.dot(dz,Wx.T)\n dprev_h = np.dot(dz,Wh.T)\n db = np.sum(dz,axis=0)\n dWx = np.dot(x.T,dz)\n dWh = np.dot(prev_h.T,dz)\n #d(tanh) = 1- tanh*tanh\n \"\"\"\n #pdb.set_trace()\n # dh is not result of forward prop\n # but\n N,T,H = dh.shape\n tmp_x, tmp_next_h, tmp_prev_h, tmp_Wx, tmp_Wh, tmp_b = cache[T-1]\n D = tmp_x.shape[1]\n\n\n dx = np.zeros((N,T,D))\n dh_prev = np.zeros((N,H))\n dWx = np.zeros((D,H))\n dWh = np.zeros((H,H))\n db = np.zeros((H))\n\n for i in reversed(list(range(0,T))):\n # current gradient at timestep is the upstream gradient (provided as input)\n # this may be coming from the Y as in the min_char_rnn.py (see line 59)\n # + downstream gradient provided by rnn_step_backward.\n dh_curr = dh[:,i,:] + dh_prev\n dx_, dh_prev, dWx_, dWh_, db_ = rnn_step_backward(dh_curr, cache[i])\n dWx += dWx_\n dWh += dWh_\n db += db_\n dx[:,i,:]=dx_\n\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dx, dh_prev, dWx, dWh, db", "def max_pool(x,\n k_h,\n k_w,\n s_h,\n s_w,\n name,\n padding=\"VALID\"):\n with tf.name_scope(name):\n outputs = tf.nn.max_pool(x, [1, k_h, k_w, 1], [1, s_h, s_w, 1], padding)\n # Return layer's output\n return outputs", "def backward(self, outputs, labels):\n # Layers & shape\n depth = len(self.layer_dimensions) - 1\n # num_classes, batch_size = outputs.shape\n batch_size, num_classes = outputs.shape\n coefficient = 1 / batch_size\n # 1/ First case : last layer -> output\n layer_a = \"a_\" + str(depth - 1)\n a = self._cache[layer_a]\n Jz = outputs - labels\n # Weights gradients\n dw = coefficient * np.dot(a.T, Jz)\n db = coefficient * np.sum(Jz, axis=0)\n self._grad[\"dw_\" + str(depth)] = dw\n self._grad[\"db_\" + str(depth)] = db\n # 2/ Second case : inside the layers\n for i in range(depth - 1, 0, -1):\n # Get the weights and biases\n layer_w = \"w_\" + str(i + 1)\n layer_a = \"a_\" + str(i - 1)\n layer_z = \"z_\" + str(i)\n w = self._parameters[layer_w]\n a = self._cache[layer_a]\n z = self._cache[layer_z]\n # Gradients\n Jz = self.activation_hidden.backward(z) * np.dot(Jz, w.T)\n db = coefficient * np.sum(Jz, axis=0)\n dw = coefficient * np.dot(a.T, Jz)\n self._grad[\"dw_\" + str(i)] = dw\n self._grad[\"db_\" + str(i)] = db", "def backward(ctx, grad_output):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n # Retrieve saved tensors and constants\n gamma, ivar, mean, input = ctx.saved_tensors\n eps = ctx.saved_tensors\n\n # Check which inputs need gradients\n input_needs_grad, gamma_needs_grad, beta_needs_grad = ctx.needs_input_grad\n\n # Get the batch size (=N)\n N, _ = grad_output.shape\n\n # reconstruct the input_norm\n input_norm = (input - mean) * ivar\n grand_input_norm = grad_output * gamma\n\n ##### Gradient wrt beta #####\n grad_beta = grad_output.sum(dim=0) if beta_needs_grad else None\n\n #### Gradient wrt gamma ####\n grad_gamma = (input_norm*grad_output).sum(dim=0) if gamma_needs_grad else None\n \n #### Gradient wrt input ####\n term1 = N*grand_input_norm \n term2 = torch.sum(grand_input_norm, dim=0)\n term3 = input_norm*torch.sum(grand_input_norm*input_norm, dim=0)\n grad_input = (1. / N) * ivar * (term1 - term2 - term3) if input_needs_grad else None\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n # return gradients of the three tensor inputs and None for the constant eps\n return grad_input, grad_gamma, grad_beta, None", "def backward(self, gradient):\n raise NotImplementedError()", "def backward(cls, grad_out, activated_out):\n raise Exception(\"Unimplemented\")", "def backward(cls, grad_out, activated_out):\n raise Exception(\"Unimplemented\")", "def _backprop(self, y_train, X_train, A_hidden, Z_hidden, A_out, Z_out, batch_idx):\n\n # This is the derivative assuming our costfunction is 0.5*two_norm(A_out - y)**2\n # This results in different backpropagation \n \n error_out = A_out - y_train[batch_idx].reshape(len(y_train[batch_idx]), 1)\n \n # Since we are in the regression case with a linear ouput funct.\n act_derivative_out = 1\n\n delta_out = error_out*act_derivative_out\n\n grad_w_out = np.dot(A_hidden[-1].T, delta_out)\n grad_b_out = np.sum(delta_out, axis=0)\n\n # Updating the output weights \n self.W_out = self.W_out - self.eta * grad_w_out\n self.b_out = self.b_out - self.eta * grad_b_out\n \n \n # Looping over all the hidden layers except one\n # If the layer only have one layer it doesn't go into this while loop \n \n i = 0\n while (i < self.n_hidden_layers-1):\n # Index moving backward in the layers.\n #print(\"this should only be one loop\")\n layer_ind = self.n_hidden_layers - 1 - i\n #print(\"layer_ind: : \", layer_ind)\n act_derivative_h = self.activate(Z_hidden[layer_ind], self.activation, deriv=True)\n \n if (i == 0):\n error_prev = np.dot(delta_out, self.W_out.T) * act_derivative_h\n else:\n #print(\"np.shape(error_prev)\", np.shape(error_prev))\n error_prev = np.dot(error_prev, self.W_h[layer_ind+1].T) * act_derivative_h\n \n grad_w_h = np.dot(A_hidden[layer_ind - 1].T, error_prev)\n grad_b_h = np.sum(error_prev, axis=0)\n \n self.W_h[layer_ind] = self.W_h[layer_ind] - self.eta * grad_w_h\n self.b_h[layer_ind] = self.b_h[layer_ind] - self.eta * grad_b_h\n i += 1\n \n \n act_derivative_h = self.activate(Z_hidden[0], self.activation, deriv=True) \n \n # Case with one hidden layer doesn't enter the while loop.\n if( self.n_hidden_layers == 1):\n error_last = np.dot(delta_out, self.W_out.T) * act_derivative_h\n else:\n error_last = np.dot(error_prev, self.W_h[layer_ind].T) * act_derivative_h\n\n grad_w_h = np.dot(X_train[batch_idx].T, error_last)\n grad_b_h = np.sum(error_last, axis = 0)\n\n self.W_h[0] = self.W_h[0] - self.eta * grad_w_h\n self.b_h[0] = self.b_h[0] - self.eta * grad_b_h\n\n return None", "def bprop_pool(self, layer, I, O, argmax=None, alpha=1.0, beta=0.0):\n assert layer.sizeI == O.size\n assert layer.sizeO == I.size\n if layer.op == \"max\":\n assert layer.sizeO == argmax.size\n primitives = c_longlong(layer.dnnPrimitives.ctypes.data)\n self.mklEngine.MaxPooling_bprop(I.get_prim(), O.get_prim(),\n primitives, layer.initOk_b)\n layer.initOk_b = 1\n O.shape5D = layer.dimI", "def back_propagate(self,y):\n if y.ndim > 1: #case of multioutput prediction\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n \n #calculating gradients for each layer\n for layer, activation in zip(reversed(range(len(self.architecture))), self.activations[::-1]):\n \n if layer == len(self.architecture)-1: #identifies output layer and does output layer specific calculations\n dCda_L = self.derivatives(function='mse', y_pred = self.all_data[f'A{layer}'], y= y)\n da_LdZ_L = self.derivatives(np.sum(self.all_data[f'Z{layer}'], axis=0)*(1/m),activation)\n delta_L = np.multiply(dCda_L,da_LdZ_L)\n self.all_data[f'dCda_{layer}'] = dCda_L\n self.all_data[f'da_{layer}dZ_{layer}'] = da_LdZ_L\n self.all_data[f'delta_{layer}'] = delta_L\n else: #for other layers\n da_LdZ_l = self.derivatives(np.sum(self.all_data[f'Z{layer}'], axis=0)*(1/m),activation)\n self.all_data[f'da_{layer}dZ_{layer}'] = da_LdZ_l\n delta_l = np.multiply(np.dot(self.all_data[f'delta_{layer+1}'], (self.weights_and_biases[f'W{layer+1}']).T), da_LdZ_l) \n self.all_data[f'delta_{layer}'] = delta_l\n \n dCdW_l = np.outer(np.sum(self.all_data[f'A{layer-1}'],axis=0)*(1/m),self.all_data[f'delta_{layer}'])\n dCdb_l = self.all_data[f'delta_{layer}']\n\n ##saving calculated data\n self.all_data[f'dCdW{layer}'] = dCdW_l\n self.all_data[f'dCdb{layer}'] = dCdb_l\n self.parameter_gradients[f'dCdW{layer}'] = dCdW_l\n self.parameter_gradients[f'dCdb{layer}'] = dCdb_l\n \n else: #calculations for single dataset(eg for SGD)\n m = 1\n n = len(y)\n\n ##calculating gradients for each layer\n for layer, activation in zip(reversed(range(len(self.architecture))), self.activations[::-1]):\n \n if layer == len(self.architecture)-1:\n #dCda_L = (self.all_data[f'A{layer}'] - y)*(1/(m*n))*2\n dCda_L = self.derivatives(function='mse', y_pred = self.all_data[f'A{layer}'], y=y)\n da_LdZ_L = self.derivatives(self.all_data[f'Z{layer}'],activation)\n delta_L = np.multiply(dCda_L,da_LdZ_L)\n self.all_data[f'dCda_{layer}'] = dCda_L\n self.all_data[f'da_{layer}dZ_{layer}'] = da_LdZ_L\n self.all_data[f'delta_{layer}'] = delta_L\n else:\n da_LdZ_l = self.derivatives(self.all_data[f'Z{layer}'],activation)\n self.all_data[f'da_{layer}dZ_{layer}'] = da_LdZ_l\n delta_l = np.multiply(np.dot(self.all_data[f'delta_{layer+1}'], (self.weights_and_biases[f'W{layer+1}']).T), da_LdZ_l) \n self.all_data[f'delta_{layer}'] = delta_l\n\n dCdW_l = np.outer(self.all_data[f'A{layer-1}'],self.all_data[f'delta_{layer}'])\n dCdb_l = self.all_data[f'delta_{layer}']\n\n #saving data\n self.all_data[f'dCdW{layer}'] = dCdW_l\n self.all_data[f'dCdb{layer}'] = dCdb_l\n self.parameter_gradients[f'dCdW{layer}'] = dCdW_l\n self.parameter_gradients[f'dCdb{layer}'] = dCdb_l", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def backward(ctx, grad_output):\n\n # This is a pattern that is very convenient - at the top of backward\n # unpack saved_tensors and initialize all gradients w.r.t. inputs to\n # None. Thanks to the fact that additional trailing Nones are\n # ignored, the return statement is simple even when the function has\n # optional inputs.\n # input, weight, bias = ctx.saved_variables\n\n return grad_output", "def backward_pass(self, delta, zs, activations, nabla_b, nabla_w):\n for l in range(2, self.num_layers):\n delta = np.dot(self.weights[-l + 1].transpose(), delta) * sigmoid_prime(zs[-l])\n nabla_b[-l] = delta\n nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())\n return nabla_b, nabla_w", "def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs):\n self.clear_lp_grads()\n loss.backward(**bwd_kwargs)\n\n if update_hp_grads:\n self.update_hp_grads(clear_lp_grads=clear_lp_grads)", "def TransitionDown(inputs, n_filters, dropout_p=0.2):\n\n l = BN_ReLU_Conv(inputs, n_filters, filter_size=1, dropout_p=dropout_p)\n l = Pool2DLayer(l, 2, mode='max')\n\n return l\n # Note : network accuracy is quite similar with average pooling or without BN - ReLU.\n # We can also reduce the number of parameters reducing n_filters in the 1x1 convolution", "def forward(ctx, input, hard=False, tau=1):\n y_soft = F.gumbel_softmax(input, hard=hard, tau=tau)\n ctx.save_for_backward(y_soft)\n if hard:\n _, k = y_soft.max(-1)\n # this bit is based on\n # https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5\n y_hard = logits.new_zeros(*shape).scatter_(-1, k.view(-1, 1), 1.0)\n # this cool bit of code achieves two things:\n # - makes the output value exactly one-hot (since we add then\n # subtract y_soft value)\n # - makes the gradient equal to y_soft gradient (since we strip\n # all other gradients)\n y = y_hard - y_soft.detach() + y_soft\n else:\n y = y_soft\n return y", "def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = layers.relu_backward(dout, relu_cache)\n dx, dw, db = layers.conv_backward_fast(da, conv_cache)\n return dx, dw, db" ]
[ "0.78717184", "0.7527569", "0.74360836", "0.742322", "0.74153745", "0.74050575", "0.736715", "0.73291767", "0.71188295", "0.71188295", "0.7104715", "0.7095607", "0.7016401", "0.69276273", "0.6922222", "0.68295985", "0.67844594", "0.67377377", "0.6726962", "0.6720057", "0.67109764", "0.6651964", "0.6643144", "0.6625892", "0.658501", "0.65720695", "0.6517257", "0.6510932", "0.6487874", "0.647887", "0.6465031", "0.6460259", "0.6445781", "0.6439334", "0.6406169", "0.63766384", "0.6330127", "0.63297695", "0.6328837", "0.63010955", "0.62973094", "0.62934357", "0.62865365", "0.62865365", "0.6283529", "0.6275666", "0.6273552", "0.6257604", "0.6256648", "0.6252558", "0.6247873", "0.6230411", "0.6225366", "0.62243587", "0.6218961", "0.62043536", "0.61997664", "0.61947536", "0.6188839", "0.61874247", "0.61685", "0.6151619", "0.6150696", "0.614977", "0.6135455", "0.61268544", "0.6117661", "0.61165255", "0.61132777", "0.610621", "0.61056674", "0.6103426", "0.6099024", "0.6097415", "0.6090848", "0.6088591", "0.60852927", "0.6084353", "0.6084353", "0.6068261", "0.606294", "0.60570675", "0.6057011", "0.60569793", "0.6038408", "0.60352796", "0.6027642", "0.60238457", "0.6020695", "0.6020695", "0.6018237", "0.6015591", "0.59918314", "0.5982988", "0.5980608", "0.59771186", "0.5971987", "0.5969664", "0.5965196", "0.5963282" ]
0.73897725
6
Computes the loss and gradient for binary SVM classification.
def svm_loss(x, y): N = x.shape[0] x = np.squeeze(x) loss = np.sum(((1-x*y)>0)*(1-x*y))/N dx = ((1-x*y)>0)*(-y)/N return loss, dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svm_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement loss and gradient for multiclass SVM classification. #\n # This will be similar to the svm loss vectorized implementation in #\n # cs231n/classifiers/linear_svm.py. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n y_temp = np.ones((x.shape[0], x.shape[1])) # 1로 구성된 x와 같은 쉐입의 매트릭스를 만든다\n #print(y_temp)\n y_score = x[np.arange(x.shape[0]), y] # 정답레이블의 스코어로만 구성된 하나의 컬럼 벡터를 만든다\n y_score = np.reshape(y_score, (x.shape[0], 1)) # 브로드캐스팅을 위해 리쉐입 해준다\n y_temp[np.arange(x.shape[0]), y] = 0 # 1로 구성된 템프매트릭스의 정답 레이블에 해당되는 인덱스에 0을 할당한다\n #print(y_temp)\n loss_temp = (x - y_score) - 1\n loss_temp = (-loss_temp * y_temp) / x.shape[0]\n loss = (np.sum(loss_temp))\n #print(loss_temp)\n\n #print(np.sum(loss_temp, axis = 1))\n \n temp = loss_temp * x.shape[0]\n temp[loss_temp > 0] = 1\n row_sum = np.sum(temp, axis = 1)\n temp[np.arange(x.shape[0]), y] = -row_sum.T\n dx = -temp\n\n dx /= x.shape[0]\n\n\n #print(dx)\n\n\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return loss, dx", "def svm_loss(W, X, y, classes, reg):\n # compute the loss and the gradient\n # num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # Implementation of a SVM loss, storing the result in loss. #\n #############################################################################\n scores = X.dot(W)\n correct_class_scores = scores[np.arange(num_train), y-1]\n margin = np.transpose(scores) - correct_class_scores + 1 # delta = 1\n margin[y-1, np.arange(num_train)] = 0 \n\n # values greater than zeros in margin - calculating max(0, margin)\n gt_zero = np.maximum(np.zeros((margin.shape)), margin)\n\n loss = np.sum(gt_zero)\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train. \n loss /= num_train\n # And regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n\n #############################################################################\n # Implementation the gradient for the SVM loss, storing the result in dW. #\n # #\n #############################################################################\n\n # classifiers having loss > 0\n gt_zero[gt_zero > 0] = 1\n\n # Calculating indexes for the necessary subtractions\n images_sum = np.sum(gt_zero, axis = 0)\n\n # Subtracting the derivative\n gt_zero[y-1, range(num_train)] = -images_sum[range(num_train)]\n\n # updating the gradients\n dW = np.transpose(gt_zero.dot(X))\n\n # Normalizing the gradient\n dW /= num_train\n\n # Adding regularization to the gradieant.\n dW += reg * W\n\n return loss, dW", "def svm_loss_vectorized(W, X, y, reg):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n delta = 1 # margin of the SVM\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n # 1) Dot product of weight and data matrix\n XW = np.dot(W,X)\n # 2) get correct class scores using y \n correct_class=XW[y,np.arange(X.shape[1])]\n # 3) find margins by using element wise maximum function\n #print np.matrix(correct_class).shape\n mar=np.maximum(0,XW-np.matrix(correct_class) + delta)\n #print mar.shape\n # Make correct classes 0\n mar[y,np.arange(X.shape[1])]=0\n #print mar.shape\n # get loss by summing and dividing by n\n loss = np.sum(mar)\n loss /= X.shape[1]\n # adjust by regularization strength\n loss += 0.5 * reg * np.sum(np.square(W))\n \n \n \n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n # create a binary matrix \n binary_mat=mar\n binary_mat[mar>0]=1\n \n # sum of all incorrect classes \n #print binary_mat.shape\n sum=np.sum(binary_mat,axis=0)\n \n # y coordinate decreases and hence negative \n binary_mat[y,np.arange(X.shape[1])]= -sum\n \n dW = (np.dot(binary_mat,X.T))\n dW = dW / X.shape[1]\n dW = dW + reg*W \n pass\n\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def svm_loss_vectorized(W, X, y, reg):\n num_classes = W.shape[1]\n num_train = X.shape[0]\n #loss = 0.0 \n loss = 0.0\n scores = np.zeros((1,num_classes))\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n \n # lines begin with double \"#\" are the last version of code!!!!!\n \n ##for i in xrange(num_train):\n #XX = np.tile(X[i,:],(num_classes,1)) # try to use broadcasting\n #scores = np.sum(np.multiply(XX,W.T), axis = 1)\n ## scores = np.sum(np.multiply(X[i,:],W.T), axis = 1)\n \n ## if i ==1: print scores\n \n #loss += np.sum(scores - scores[y[i]]) + num_classes -1\n #http://stackoverflow.com/questions/2900084/counting-positive-elements-in-a-list-with-python-list-comprehensions\n ## scores+=1\n ## scores[y[i]]-=1 \n #however, this is sum over index, not values, glaube ich \n #loss+= sum(x < 0 for x in (scores-scores[y[i]]))\n ## loss+= (scores-scores[y[i]])[scores-scores[y[i]]>0].sum()\n #pass\n ############################################\n # construct a zero loop version\n ############################################\n scores2D = np.zeros((num_train, num_classes)) #used to store dotted scores\n scores1D = np.zeros((num_train,1)) #used to store corrected scores\n #index1D = np.zeros((1,num_classes))\n #index1D = range(num_classes) \n #scores1D = y[index1D]\n \n scores2D = np.dot(X,W) \n ##for i in xrange(num_train):\n ## scores1D[i,0]=scores2D[i,y[i]]-1 #find the correct scores and fill them into scores1D, the value -1 is because: si-sj+1\n ## scores2D[i,y[i]]-=1 # we want at corrected score voxel, the value should be 0, correct score -1 - \n #(correct score -1) = 0\n #####################################\n #for loop replacement###\n indexInsert = np.arange(num_train)\n scores1D[indexInsert,0] = scores2D[indexInsert,y[indexInsert]] -1 #using array indexing\n scores2D[indexInsert,y[indexInsert]] -=1\n \n ##################################### \n \n #scores2D = X.dot(W)\n #http://stackoverflow.com/questions/9497290/how-would-i-sum-a-multi-dimensional-array-in-the-most-succinct-python\n #rewrite summation\n #loss += (scores2D-scores1D)[scores2D-scores1D >0].sum()\n #temp = scores2D-np.tile (scores1D, (1,num_classes)) # for each score minus the corrected score\n temp = scores2D-scores1D #broadcasting!!\n #print temp[1,:]\n temp= temp.clip(min=0) \n #loss += sum(map(sum, (temp)[temp>0]))\n #loss += sum(map(sum, (temp)))\n #loss += (temp)[temp >0].sum()\n loss += sum(sum(x) for x in temp) #sum them up\n #loss -= num_train # minus 1 is because in each train, due to the plus 1 above , correct score - correct \n # score +1 = 1, but it should be 0, therefore, i deduce them at the last minute \n # ( then I made this also in the for loop to meet intuitive)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n #tempBool = np.divide(temp, temp)\n #tempBool = tempBool.clip(max=1,min=0)\n #http://stackoverflow.com/questions/19666626/replace-all-elements-of-python-numpy-array-that-are-greater-than-some-value\n tempBool = np.copy(temp) # temp = scores2D-scores1D , temp= temp.clip(min=0)\n # temp is already the every score minus the correct labeled score\n tempBool[tempBool>0] = 1 # for every element, when it is positive, set it to one (for weighting)\n for j in xrange(num_train):\n tempBool[j,y[j]] =-1*sum(tempBool[j,:]) # calculate how many final scores, max(~~,0) are more than 0, add the number to the correct\n # label element, because it is the times that the corrected scores be used\n dW += np.reshape (X[j,:],(X.shape[1],1))*tempBool[j,:] # broadcasting, out-product\n #pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n dW/= num_train\n dW += reg*W\n \n return loss, dW", "def svm_loss_vectorized(W, X, y, reg):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n delta = 1 # margin of the SVM\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n pass\n dim, num_train = X.shape\n # print X.dtype\n num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes\n # print \"Num Classes: \", num_classes\n if W is None:\n # lazily initialize W\n W = np.random.randn(num_classes, dim) * 0.0001\n\n loss_image_arr = np.empty([num_train, num_classes])\n\n # Information about various dimensions\n # print \"Num Dimensions: \", dim, \"Num Samples: \", num_train, \"Num Classes: \", num_classes\n\n score_matrix = np.matmul(W, X)\n for i in range(num_train):\n\n # Sanity Check for the sizes of he matrices after multiplication\n # All rows in score_matrix represents the score of an image in a class\n # print \"Weight Matrix Shape: \", self.W.shape, \"Score Matrix Shape: \", score_matrix.shape\n\n for j in range(num_classes):\n if (j!=y[i]):\n loss_image_arr[i, j] = (max(0, score_matrix[j, i] - score_matrix[y[i], i] + delta))\n\n reg_loss = reg * np.sum(np.square(W))\n loss = np.sum(loss_image_arr)/num_train + reg_loss\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n pass\n binary_matrix = loss_image_arr>0\n # print \"Binary Matrix: \", binary_matrix.shape, \"X_batch: \", X_batch.shape\n\n dW = -np.transpose(np.matmul(X, binary_matrix))\n dW = dW/num_train\n # print \"Iteration -- \", \"Loss: \", loss_iter , \"Gradient Shape: \", dW.shape, \"Weight Shape: \", self.W.shape\n\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n c = np.array([0, 1])\n pred_class = []\n for i in range(num_train):\n scores = X[i].dot(W)\n pred_class.append(c[np.argmax(scores)])\n #print('scores size:',scores.shape)\n correct_class_score = scores[y[i]]\n for j in range(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n\n # Add regularization to the loss.\n loss += reg * np.sum(W * W)\n print(pred_class)\n\n return loss, dW, pred_class", "def svm_loss_vectorized(W, X, y, reg):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n num_classes = W.shape[1]\n num_train = X.shape[0]\n scores = np.dot(X, W)\n correct_class_score = np.choose(y, scores.T).reshape(-1, 1)\n thresh = np.maximum(np.zeros(scores.shape), scores - correct_class_score + 1)\n thresh[np.arange(num_train), y] = 0\n loss = np.sum(thresh)\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n binary = thresh\n binary[binary > 0] = 1\n\n row_sum = np.sum(binary, axis=1)\n binary[np.arange(num_train), y] -= row_sum\n dW = np.dot(X.T, binary)\n\n dW /= num_train\n dW += reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def svm_loss_vectorized(W, X, y, reg):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n delta = 1.0\n num_train = X.shape[1]\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n scores = W.dot(X)\n idx = range(X.shape[1])\n correct_score = scores[y, idx]\n\n # print scores[y[0], 0], correct_score[0]\n \n correct_score = np.tile(correct_score, (10,1))\n loss = np.sum(np.maximum(np.zeros((W.shape[0], X.shape[1])), scores - correct_score + delta))\n loss -= X.shape[1] * delta\n loss /= X.shape[1]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n # Calculate 0, loss maximum\n # take out all the non-zero entries\n # multiply with the training examples matrix transpose.\n # Add the resulting ans to dW\n maximum_mask = np.maximum(np.zeros((W.shape[0], X.shape[1])), scores - correct_score + delta)\n maximum_mask[y, idx] = 0\n\n maximum_mask[maximum_mask != 0] = 1\n \n sum_columnwise = np.sum(maximum_mask, axis=0)\n # replace correct entry with sum of columns\n maximum_mask[y, idx] = -sum_columnwise[range(num_train)]\n\n # Here we are doing two things at once, first we are calculating sum of all 1 entries in row\n # and then subtract that many number of times as sum of ones across column.\n dW = maximum_mask.dot(X.T)\n dW /= num_train\n dW += reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n for i in xrange(num_train):\n scores = X[i].dot(W)\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[:,j] += X[i,:].T\n dW[:,y[i]] -= X[i,:].T\n \n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW/= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg*W\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n\n return loss, dW", "def svm_loss_vectorized(theta, X, y, reg):\n J = 0.0\n dtheta = np.zeros(theta.shape) # initialize the gradient as zero\n delta = 1.0\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in variable J. #\n #############################################################################\n \n # compute the loss function\n\n K = theta.shape[1]\n m = X.shape[0]\n\n scores = X.dot(theta)\n correct_class_score = scores[np.arange(m),y]\n margin = np.maximum(0, scores - correct_class_score[np.newaxis].T + delta)\n margin[np.arange(m), y] = 0\n J = np.sum(margin)\n\n J /= m\n J += 0.5 * reg * np.sum(theta * theta)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dtheta. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n\n num_yi = np.sum(margin>0 , axis=1)\n temp = np.zeros(margin.shape) \n temp[margin>0] = 1\n temp[np.arange(m), y] = -num_yi\n dtheta = np.dot(X.T,temp)\n\n dtheta = dtheta/m\n dtheta = dtheta + reg*theta\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return J, dtheta", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n for i in range(num_train):\n scores = X[i].dot(W)\n correct_class_score = scores[y[i]]\n for j in range(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[:, y[i]] -= X[i].transpose()\n dW[:, j] += X[i].transpose() # chain rule\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg * W\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n return loss, dW", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[0]\n num_train = X.shape[1]\n loss = 0.0\n for i in xrange(num_train):\n scores = W.dot(X[:, i])\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[j] += X[:, i]\n dW[y[i]] -= X[:, i]\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n\n return loss, (dW / num_train)", "def binary_svm_loss(theta, X, y, C):\n\n m, d = X.shape\n grad = np.zeros(theta.shape)\n J = 0\n\n ############################################################################\n # TODO #\n # Implement the binary SVM hinge loss function here #\n # 4 - 5 lines of vectorized code expected #\n ############################################################################\n J=0.5*np.sum(theta**2)/m\n J=J+C*np.sum(np.maximum(0,1-np.multiply(y,(np.dot(X,theta)))))/m\n \n grad=theta/m\n temp_1=np.dot(X,theta)\n temp_2=np.multiply(y,temp_1)\n\n temp_3=y[temp_2<1]\n temp_4=X[temp_2<1,:]\n temp_5=np.dot(temp_4.T,temp_3)\n grad=grad-temp_5*C/m\n\n\n# for j in range(d):\n# \tgrad[j]=float(theta[j]/m)\n# \tfor k in range(m):\n#\t \tif (y[k]*(np.dot(theta,X[k,:]))<1):\n#\t \t\tgrad[j]=grad[j]-float(C*y[k]*X[k,j]/m)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return J, grad", "def svm_loss_vectorized(theta, X, y, reg):\n J = 0.0\n dtheta = np.zeros(theta.shape) # initialize the gradient as zero\n delta = 1.0\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in variable J. #\n # 8-10 lines of code #\n #############################################################################\n\n K = theta.shape[1] # number of classes\n m = X.shape[0] # number of examples\n\n h = np.dot(X, theta)\n hy = np.choose(y, h.T).reshape(-1, 1)\n l = h - hy + delta\n margins = np.maximum(l, 0.0)\n margins[np.arange(m), y] = 0.0\n\n J = np.sum(margins)\n J /= m\n J += 0.5 * reg * np.sum(theta * theta)\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dtheta. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n\n g = (margins > 0) * 1\n g[np.arange(m), y] = -np.sum(g, axis = 1)\n \n dtheta = np.dot(X.T, g)\n dtheta /= m\n dtheta += reg * theta\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return J, dtheta", "def binary_svm_loss(theta, X, y, C):\n\n m, d = X.shape\n grad = np.zeros(theta.shape)\n J = 0\n\n ############################################################################\n # TODO #\n # Implement the binary SVM hinge loss function here #\n # 4 - 5 lines of vectorized code expected #\n ############################################################################\n h = np.dot(X, theta)\n J = 1.0 / 2 / m * np.sum(theta**2) + 1.0 * C / m * np.sum(np.max([np.zeros(m), 1 - y * h], axis = 0))\n\n grad = 1.0 / m * theta + 1.0 * C / m * np.dot(X.T, -y * (y * h < 1))\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return J, grad", "def svm_loss_forloop(W, X, y, reg, delta=1):\n\n ################################################################################\n # You implementation #\n # Use the ahove svm_loss_bias_forloop implementation as reference #\n ################################################################################\n\n # initialize the returned results\n loss = 0.0\n d_W = np.zeros(W.shape)\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n\n for i in xrange(num_train):\n # compute the classification scores for a single image\n scores = X[i].dot(W)\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n # compute the loss for this image\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + delta\n if margin > 0:\n loss += margin\n # compute the gradient for this image\n d_W[:, j] += X[i, :].T\n d_W[:, y[i]] -= X[i, :].T\n\n # Right now the loss is a sum over all training examples\n # We need it to be an average instead so we divide by num_train.\n loss /= num_train\n # Add regularization to the loss.\n #no reg on bias\n loss += 0.5 * reg * np.sum(W[:-1,:] * W[:-1,:])\n\n # Do the same for d_W and d_b\n d_W /= num_train\n d_W[:-1,:] += reg * W[:-1,:]\n\n\n return loss, d_W", "def svm_loss_naive(theta, X, y, reg):\n\n delta = 1.0\n dtheta = np.zeros(theta.shape) # initialize the gradient as zero\n\n # compute the loss function\n\n K = theta.shape[1]\n m = X.shape[0]\n J = 0.0\n for i in xrange(m):\n\tscores = X[i,:].dot(theta)\n\tcorrect_class_score = scores[y[i]]\n\tfor j in xrange(K):\n\t\tif j == y[i]:\n\t\t\tcontinue\n\t\tmargin = max(0,scores[j] - correct_class_score + delta)\n\t\tJ += margin\n\t\tif margin > 0 and j!=y[i]:\t\t\n\t\t\tdtheta[:,j] = dtheta[:,j]+X[i,:]\n\t\t\tdtheta[:,y[i]] = dtheta[:,y[i]]-X[i,:]\n\n\n # Right now the loss is a sum over all training examples, but we want it\n # To be an average instead so we divide by num_train.\n J /= m\n dtheta = dtheta/m\n # Add regularization to the loss.\n J += 0.5 * reg * np.sum(theta * theta)\n dtheta =dtheta + reg*theta\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dtheta. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n\n return J, dtheta", "def svm_loss_vectorized(W, X, y, reg, delta=1):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # Understand this implementation #\n #############################################################################\n # Hint: check how numpy broadcasting and advanced indexing are used\n # https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\n # This allows selection of arbitrary items in the array based on their N-dimensional index. Each integer array represents a number of indexes into that dimension.\n\n # Get dims\n D = X.shape[1]\n num_classes = W.shape[1]\n num_train = X.shape[0]\n scores = X.dot(W)\n\n correct_scores = scores[np.arange(num_train), y].reshape(-1, 1) # using the fact that all elements in y are < C == num_classes\n mat = scores - correct_scores + delta \n mat[np.arange(num_train), y] = 0 # accounting for the j=y_i term we shouldn't count (subtracting 1 makes up for it since w_j = w_{y_j} in this case)\n \n # Compute max\n thresh = np.maximum(np.zeros((num_train, num_classes)), mat)\n # Compute loss as double sum\n loss = np.sum(thresh)\n loss /= num_train\n \n # Add regularization\n loss += 0.5 * reg * np.sum(W * W)\n\n # Binarize into integers\n binary = thresh\n binary[thresh > 0] = 1\n\n row_sum = np.sum(binary, axis=1)\n binary[range(num_train), y] = -row_sum[range(num_train)]\n dW = np.dot(X.T, binary)\n\n # Divide\n dW /= num_train\n\n # Regularize\n dW += reg*W\n \n return loss, dW", "def svm_loss(x, y):\n x = np.squeeze(x)\n N = x.shape[0]\n yt = y\n yt[y==0]=-1\n tmp = 1-yt*x\n mask = np.ones_like(tmp)\n mask[tmp<=0] = 0\n tmp = tmp*mask\n loss = np.sum(tmp)/N\n \n dx = -yt*mask/N\n # dx = np.reshape(dx,[dx.shape[0],1])\n return loss, dx", "def svm_loss(x, y):\n\n x = x.reshape((-1,1))\n y = y.reshape((-1,1))\n N,_ = x.shape\n \n y_p = np.where(y == 1,1,-1)\n \n losses = np.maximum(0,1-(x*y_p))\n loss = np.sum(losses)/N\n dx = np.where(losses > 0, 1, 0)*(-y_p)/N\n dx = dx.reshape((-1,))\n\n return loss, dx", "def svm_loss_bias_forloop(W, b, X, y, reg, delta = 1): \n # initialize the returned results\n loss = 0.0\n d_W = np.zeros(W.shape) \n d_b = np.zeros(b.shape)\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n \n for i in xrange(num_train):\n # compute the classification scores for a single image\n scores = X[i].dot(W) + b\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n # compute the loss for this image\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + delta \n if margin > 0:\n loss += margin\n # compute the gradient for this image\n d_W[:, j] += X[i, :].T\n d_b[j] += 1\n d_W[:, y[i]] -= X[i, :].T\n d_b[y[i]] -= 1\n \n # Right now the loss is a sum over all training examples\n # We need it to be an average instead so we divide by num_train.\n loss /= num_train \n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n \n # Do the same for d_W and d_b\n d_W /= num_train\n d_W += reg*W\n\n d_b /= num_train\n \n return loss, d_W, d_b", "def svm_loss(x, y):\n N = x.shape[0]\n correct_class_scores = x[np.arange(N), y]\n margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)\n margins[np.arange(N), y] = 0\n loss = np.sum(margins) / N\n num_pos = np.sum(margins > 0, axis=1)\n dx = np.zeros_like(x)\n dx[margins > 0] = 1\n dx[np.arange(N), y] -= num_pos\n dx /= N\n return loss, dx", "def svm_loss_naive(theta, X, y, reg):\n\n K = theta.shape[1] # number of classes\n m = X.shape[0] # number of examples\n\n J = 0.0\n dtheta = np.zeros(theta.shape) # initialize the gradient as zero\n delta = 1.0\n\n #############################################################################\n # TODO: #\n # Compute the loss function and store it in J. #\n # Do not forget the regularization term! #\n # code above to compute the gradient. #\n # 8-10 lines of code expected #\n #############################################################################\n\n for i in xrange(m):\n h = np.dot(X[i,:], theta)\n hy = h[y[i]]\n for j in xrange(K):\n if j == y[i]:\n continue\n l = h[j] - hy + delta\n if l > 0:\n J += l\n dtheta[:, j] += X[i, :]\n dtheta[:, y[i]] -= X[i, :]\n\n J /= m\n dtheta /= m\n J += 0.5 * reg * np.sum(theta * theta)\n dtheta += reg * theta\n\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dtheta. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n return J, dtheta", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def svm_loss(x, y):\n N = x.shape[0]\n correct_class_scores = x[np.arange(N), y]\n margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)\n margins[np.arange(N), y] = 0\n loss = np.sum(margins) / N\n num_pos = np.sum(margins > 0, axis=1)\n dx = np.zeros_like(x)\n dx[margins > 0] = 1\n dx[np.arange(N), y] -= num_pos\n dx /= N\n return loss, dx", "def compute_gradient_and_loss(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n \n if term > 0:\n local_loss = term\n \n loss += local_loss\n \n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n# dW += reg * deriv_abs(W) #dW[:,-1]\n# else:\n# dW += 2 * reg * W # l2 derivative formula \n dW[:-1,:] += reg * np.sign((W[:-1,:])) #dW[:,-1]\n else:\n dW[:-1,:] += 2 * reg * W[:-1,:] # l2 derivative formula \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def svm_loss(scores, y):\r\n\r\n N = scores.shape[0]\r\n\r\n # Compute svm data loss\r\n correct_class_scores = scores[range(N), y]\r\n margins = np.maximum(0.0, scores - correct_class_scores[:, None] + 1.0)\r\n margins[range(N), y] = 0.0\r\n loss = np.sum(margins) / N\r\n\r\n # Compute gradient off loss function w.r.t. scores\r\n num_pos = np.sum(margins > 0, axis=1)\r\n dscores = np.zeros(scores.shape)\r\n dscores[margins > 0] = 1\r\n dscores[range(N), y] -= num_pos\r\n dscores /= N\r\n\r\n return loss, dscores", "def computeGradient(self, X, y, w):\n n = len(X)\n if self.loss == 'linear':\n gradient = -2 * np.dot(X.T, (y - X.dot(w)))\n elif self.loss == 'logistic':\n g = self.logistic(X, w)\n gradient = -2 * np.dot(X.T, (y - g) * g * (1 - g))\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = ((np.dot(X, w) >= 0).astype(int) != y)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = -np.dot(usedX.T, usedY)\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = (np.dot(X, w) * newY < 1)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = 2 * w - self.C * np.dot(usedX.T, usedY)\n gradient[0] = gradient[0] + 2 * w[0]\n\n return gradient", "def compute_gradient_and_loss1(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n for j in xrange(num_classes): # for every class\n if j != y[i]: # don't take the correct ground truth index\n term = s[j] - s_y + 1 # max term with Delta = 1, according to Hinge loss formula\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n loss += term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW += reg * deriv_abs(W)\n else:\n dW += 2 * reg * W # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################", "def compute_loss(self):", "def compute_gradient_and_loss2(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n local_loss = term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n loss += local_loss \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW[:,-1] += reg * deriv_abs(W[:,-1]) #dW[:,-1]\n else:\n dW[:,-1] += 2 * reg * W[:,-1] # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def loss(self, X, y=None, reg=0.0):\r\n Ws = self.weights\r\n bs = self.biases\r\n N, D = X.shape # number of samples, number of features per sample\r\n\r\n # Compute the forward pass\r\n self.activations = []\r\n for i in xrange(len(Ws)): # for each set of weights\r\n W,b = Ws[i], bs[i]\r\n if i == 0:\r\n H = np.dot(X,W) + b\r\n else:\r\n H = np.dot(self.activations[-1],W) + b\r\n if i < len(Ws) - 1: # if we're computing hidden activations, apply nonlinear function\r\n H = (H > 0) * (H) + (H < 0) * (H/100.0)\r\n self.activations.append(H)\r\n scores = self.activations[-1]\r\n \r\n # If there's no labels provided, stop here\r\n if y is None:\r\n return scores\r\n\r\n # Compute the loss\r\n exped_scores = np.exp(scores)\r\n sums = np.sum(exped_scores,axis=1)\r\n # softmax classifier loss\r\n data_loss = (-1.0/N) * np.sum(np.log(exped_scores[range(N),y.astype(int)] / sums))\r\n\r\n # loss due to regularization\r\n reg_loss = 0\r\n for i in xrange(len(Ws)):\r\n reg_loss += np.sum(Ws[i]**2)\r\n reg_loss *= reg*(0.5)\r\n\r\n loss = data_loss + reg_loss\r\n \r\n # Compute gradients\r\n weights_grads = []\r\n biases_grads = []\r\n activation_grads = []\r\n for i in xrange(len(Ws)):\r\n weights_grads.append(np.copy(Ws[i]))\r\n biases_grads.append(np.copy(bs[i]))\r\n activation_grads.append(np.copy(self.activations[i]))\r\n\r\n DlossDscores = np.array(exped_scores / (N * np.matrix(sums).T))\r\n DlossDscores[range(N),y.astype(int)] -= (1.0/N)\r\n \r\n for i in xrange(len(Ws)-1,-1,-1):\r\n if i == 0:\r\n weights_grads[0] = np.dot(X.T, activation_grads[0]) + reg*Ws[0]\r\n biases_grads[0] = np.dot(np.ones((1,N)), activation_grads[0])[0]\r\n elif i == len(Ws)-1:\r\n H = self.activations[i-1]\r\n weights_grads[i] = np.dot(H.T, DlossDscores) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), DlossDscores)[0]\r\n dH = np.dot(DlossDscores, Ws[i].T)\r\n activation_grads[i-1] = dH\r\n else:\r\n H = self.activations[i-1]\r\n dH_out = activation_grads[i]\r\n weights_grads[i] = np.dot(H.T, dH_out) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), dH_out)[0]\r\n dH = np.dot(dH_out, Ws[i].T)\r\n dH = dH * (H > 0) + dH/100.0 * (H < 0)\r\n activation_grads[i-1] = dH\r\n \r\n grads = {}\r\n grads['weights'] = weights_grads\r\n grads['biases'] = biases_grads\r\n\r\n return loss, grads", "def ovo_classifier(X_train, y_train, X_val, y_val):\n beta_matrix = []\n #obj_matrix =[obj(np.zeros(X_train.shape[1]), 1, X_train, y_train, h=0.5)]\n obj_matrix = []\n final_matrix = []\n final_matrix_test =[]\n for i in np.unique(y_train):\n for j in np.unique(y_train):\n if i < j:\n print(i,j)\n # Slicing X train and y train into each pair wise comparison\n # location scale y labels into [-1,1] for loss calculation\n sliced_X_train = X_train[np.ix_(np.bitwise_or(y_train == i, y_train == j))]\n sliced_y_train = ((y_train[np.ix_(np.bitwise_or(y_train == i, y_train == j))]-min(i,j))/(max(i,j)-min(i,j)))*2-1\n \n # generating index for cross validation indexing\n index = np.random.randint(low=0,high=3,size=sliced_X_train.shape[0])\n print('starting cross validation...', i, j)\n avg_error, set_of_lambda = fold3_CV_for_lambda(x=sliced_X_train, y=sliced_y_train, index=index)\n optimal_lambda = set_of_lambda[np.argmin(avg_error)]\n print('starting mylinearsvm...', i, j)\n beta_vals, obj_vals = mylinearsvm(beta = np.zeros(sliced_X_train.shape[1]),\n \t lambd = optimal_lambda,\n x = sliced_X_train,\n y = sliced_y_train,\n step_size_init=1)\n \n # Store classifier and objective value from each iteration\n beta_matrix.append(np.copy(beta_vals[-1]))\n obj_matrix.append(np.copy(obj_vals))\n\n print('starting prediction...', i, j)\n # Predict y_val from X_val using the trained classifiers\n pred = 2*(beta_vals.dot(X_val.T)>0)-1\n y_hat = np.zeros(X_val.shape[0])\n y_hat[pred[-1]==1] = j\n y_hat[pred[-1]==-1] = i\n final_matrix.append(np.copy(y_hat))\n\n beta_matrix = np.array(beta_matrix)\n obj_matrix = np.array(obj_matrix)\n print('beta matrix shape:', beta_matrix.shape)\n print('obj matrix shape:', obj_matrix.shape)\n\n prediction_matrix = np.array(final_matrix)\n \n y_val_pred = []\n for f in range(X_val.shape[0]):\n data = pd.DataFrame(prediction_matrix[:,f].astype(int))\n # Use mode to find the most frequent class\n assignment = data.mode()\n # In the case of a tie, use random int generator for the indexing of a random mode\n if assignment.shape[0] > 1:\n assign = assignment.loc[np.random.randint(assignment.shape[0]),0]\n else:\n assign = assignment.loc[0,0]\n y_val_pred.append(assign)\n \n misclassification_error = np.mean(np.array(y_val_pred) != y_val)\n print('misclassification_error:', misclassification_error)\n \n return np.array(beta_matrix), np.array(obj_matrix), np.array(y_val_pred), misclassification_error", "def trainSVM(dataSet, epochs, C, rho):\n \n D = len(dataSet[0][0]);\n w = np.zeros(D);\n t = 0;\n \n # run for some epochs, over every training point in random order on each epoch \n for epoch in range(epochs): \n\n random.shuffle(dataSet); \n for [x, y] in dataSet:\n \n # compute learning rate for this itr\n r = rho/(1 + rho*t/C); \n \n # compute gradient on single example\n if y*np.dot(w, x) <= 1:\n grad = w - C*y*x;\n else:\n grad = w;\n \n # update weight vector\n w = w - r*grad;\n \n t = t + 1;\n \n return w;", "def train(self, features, labels, optimizer, loss_scale=None):\n loss, gradients = self.compute_gradients(\n features,\n labels,\n optimizer,\n loss_scale=loss_scale,\n )\n optimizer.apply_gradients(list(zip(gradients, self.trainable_weights)))\n return loss", "def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def l2_loss_vectorized(self, W, X, y, reg):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n num_train = X.shape[0]\n num_of_classes = W.shape[1]\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the perceptron loss, storing the #\n # result in loss and the gradient in dW #\n #############################################################################\n\n\n scores = X.dot(W) - y\n\n loss = np.mean(0.5 * (scores**2))\n\n grad = np.empty_like(W)\n grad = X.T.dot(scores)\n dW = grad\n dW /= num_train\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def _calc_loss(self, fvs, labels, w, b):\n\n loss = 0.5 * self.lda * (np.linalg.norm(w) ** 2)\n tmp = sum(map(lambda x, y: (x - y) ** 2, fvs.dot(w) + b, labels))\n loss += tmp / fvs.shape[0]\n\n return loss", "def svm():", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def model(X, Y, word_to_vec_map, learning_rate = 0.01, num_iterations = 400):\n \n # Get a valid word contained in the word_to_vec_map \n any_word = list(word_to_vec_map.keys())[0]\n \n # Initialize cost. It is needed during grading\n cost = 0\n \n # Define number of training examples\n m = Y.shape[0] # number of training examples\n n_y = len(np.unique(Y)) # number of classes \n n_h = word_to_vec_map[any_word].shape[0] # dimensions of the GloVe vectors \n \n # Initialize parameters using Xavier initialization\n W = np.random.randn(n_y, n_h) / np.sqrt(n_h)\n b = np.zeros((n_y,))\n \n # Convert Y to Y_onehot with n_y classes\n Y_oh = convert_to_one_hot(Y, C = n_y) \n \n # Optimization loop\n for t in range(num_iterations): # Loop over the number of iterations\n for i in range(m): # Loop over the training examples\n \n ### START CODE HERE ### (≈ 4 lines of code)\n # Average the word vectors of the words from the i'th training example\n # def sentence_to_avg(sentence, word_to_vec_map): # return avg\n avg = sentence_to_avg(X[i], word_to_vec_map)\n\n # Forward propagate the avg through the softmax layer. \n # You can use np.dot() to perform the multiplication.\n z = np.dot(W, avg) + b\n a = softmax(z)\n\n # Compute cost using the i'th training label's one hot representation and \"A\" (the output of the softmax)\n cost = - np.sum(Y_oh[i] * a)\n ### END CODE HERE ###\n \n # Compute gradients \n dz = a - Y_oh[i]\n dW = np.dot(dz.reshape(n_y,1), avg.reshape(1, n_h))\n db = dz\n\n # Update parameters with Stochastic Gradient Descent\n W = W - learning_rate * dW\n b = b - learning_rate * db\n \n if t % 100 == 0:\n print(\"Epoch: \" + str(t) + \" --- cost = \" + str(cost))\n pred = predict(X, Y, W, b, word_to_vec_map) #predict is defined in emo_utils.py\n\n return pred, W, b", "def compute_loss(W, b, x, y, config):\n\n # Lazy import of propper model\n if config.model_type == \"linear_svm\":\n from utils.linear_svm import model_loss\n elif config.model_type == \"logistic_regression\":\n from utils.logistic_regression import model_loss\n else:\n raise ValueError(\"Wrong model type {}\".format(\n config.model_type))\n\n loss, loss_c, pred = model_loss(W, b, x, y)\n loss += config.reg_lambda * l2_loss(W)\n\n return loss, loss_c, pred", "def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,\n batch_size=200, verbose=False):\n num_train, dim = X.shape\n num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes\n if self.W is None:\n self.W = np.random.randn(dim, num_classes) * 0.001\n\n # Run stochastic gradient descent to optimize W\n loss_history = []\n for it in xrange(num_iters):\n batch_ind = np.random.choice(X.shape[0],batch_size, replace=True)\n X_batch = X[batch_ind]\n y_batch = y[batch_ind]\n\n # Step One: Implement Stochastic\n #########################################################################\n # Sample batch_size elements from the training data and their #\n # corresponding labels to use in this round of gradient descent. #\n # Store the data in X_batch and their corresponding labels in #\n # y_batch; after sampling X_batch should have shape (batch_size, dim) #\n # and y_batch should have shape (batch_size,) #\n # #\n # Hint: Use np.random.choice to generate indices. Sampling with #\n # replacement is faster than sampling without replacement. #\n #########################################################################\n\n # Step Two: Implement Gradient\n # Simply call self.loss (which calls svm_loss_vectorized) to evaluate loss and gradient\n loss, dW = self.loss(X_batch,y_batch,reg)\n loss_history.append(loss)\n\n # Step Three: Implement Descent\n # Simply update the weights using the gradient and the learning rate. #\n self.W -= dW*learning_rate\n\n if verbose and it % 100 == 0:\n print('iteration %d / %d: loss %f' % (it, num_iters, loss))\n\n return loss_history", "def compute_loss_and_gradients(self, X, y):\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model", "def train(features, targets, weights, bias):\n # see gradient_descent for explanation\n epochs = 100\n learning_rate = 0.1\n\n picture_nb = 2\n\n # Print current accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n for epoch in range(epochs):\n if epoch % 10 == 0:\n # get normalized scores\n predictions = activation(pre_activation(features, weights, bias))\n # compare with targets to see how bad our algorithm is\n print(\"Cost = %s\" % cost(predictions, targets))\n # Replot graph. Check in create_dataset for explanation of parameters\n if picture_nb == 2:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='red')\n elif picture_nb == 11:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='green')\n else:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='orange')\n picture_nb+=1\n\n # Initialize gradients\n # weights_gradients is 2D array with 2 values\n weights_gradients = np.zeros(weights.shape)\n bias_gradient = 0\n # Go through each row\n for feature, target in zip(features, targets):\n # Compute prediction\n z = pre_activation(feature, weights, bias)\n # Get normalized score\n y = activation(z)\n # Update gradients based on formulas established before. Look at gradient_descent to understand what we\n # are doing. Also, the formulas are below, just before the call of the function train.\n weights_gradients += (y - target) * derivative_activation(z) * feature\n # no multiplication of feature because it does not depend on some coordinates.\n bias_gradient += (y - target) * derivative_activation(z)\n\n # Update variables. These are the lines that result the cost to get reduced.\n weights = weights - learning_rate * weights_gradients\n bias = bias - learning_rate * bias_gradient\n\n # Print final accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)\n plt.savefig(\"DataPointsLineEvolution.png\")\n # legend for understanding\n plt.legend(['Original division', 'New division', 'New division', 'New division', 'New division', 'New division',\n 'New division', 'New division', 'New division', 'Final division'], loc='upper left')\n # save picture of data points drawn.\n plt.savefig(\"DataPointsLineEvolutionLegend.png\")", "def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))", "def loss(self, X, y=None):\n\n # In dev testing, the loss fnc stops at \"scores\" , unfollowed by \"softmax\" probability prediction.\n # In real testing, \"self.predict()\" needs to be implemented in Solver() class.\n \n if y is None:\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = \"test\"\n\n\n W1, b1 = self.params['W1'], self.params['b1']\n gamma1, beta1 = self.params[\"sbnGamma1\"], self.params[\"sbnBeta1\"]\n bn_param1 = self.bn_params[0]\n\n W2, b2 = self.params['W2'], self.params['b2']\n gamma2, beta2 = self.params[\"sbnGamma2\"], self.params[\"sbnBeta2\"]\n bn_param2 = self.bn_params[1]\n\n W3, b3 = self.params['W3'], self.params['b3']\n gamma3, beta3 = self.params[\"bnGamma3\"], self.params[\"bnBeta3\"]\n bn_param3 = self.bn_params[2]\n\n W4, b4 = self.params['W4'], self.params['b4']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param = self.conv_param\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = self.maxpool_params\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_sbn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): return out, cache;\n out, cache[\"layer1\"] = layer_utils.conv_sbn_relu_forward(X, W1, b1, gamma1, beta1, conv_param, bn_param1) \n out, cache[\"layer2\"] = layer_utils.conv_sbn_relu_forward(out, W2, b2, gamma2, beta2, conv_param, bn_param2)\n\n # def max_pool_forward_fast(x, pool_param): return out, cache;\n out, cache[\"maxpool\"] = fast_layers.max_pool_forward_fast(out, pool_param)\n\n # def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): return out, cache;\n \n out, cache[\"layer3\"] = layer_utils.affine_bn_relu_forward(out, W3, b3, gamma3, beta3, bn_param3)\n\n # def affine_forward(x, w, b): return out, cache;\n scores, cache[\"layer4\"] = layers.affine_forward(out, W4, b4)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) + np.sum(W4 * W4))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW4, db4 = layers.affine_backward(dscores, cache[\"layer4\"]) \n\n # def affine_bn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW3, db3, dgamma3, dbeta3 = layer_utils.affine_bn_relu_backward(dout, cache[\"layer3\"])\n\n # print cache[\"layer3\"]\n\n # def max_pool_backward_fast(dout, cache): return max_pool_backward_im2col(dout, real_cache);\n # def max_pool_backward_im2col(dout, cache): return dx;\n dout = fast_layers.max_pool_backward_fast(dout, cache[\"maxpool\"])\n\n # def conv_sbn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW2, db2, dgamma2, dbeta2 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer2\"])\n _, dW1, db1, dgamma1, dbeta1 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer1\"])\n\n # reg\n grads['W4'], grads['b4'] = dW4 + self.reg * W4, db4\n \n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads[\"bnGamma3\"], grads[\"bnBeta3\"] = dgamma3, dbeta3\n\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads[\"sbnGamma2\"], grads[\"sbnBeta2\"] = dgamma2, dbeta2\n\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n grads[\"sbnGamma1\"], grads[\"sbnBeta1\"] = dgamma1, dbeta1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def loss(self, X, y=None, reg=0.0):\n\n self.layers = []\n layers = self.layers\n layers.append(X)\n\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n H, C = W2.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n mid = np.maximum(0, X.dot(W1) + b1.reshape(1, -1)) # activation\n scores = mid.dot(W2) + b2.reshape(1, -1)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. So that your results match ours, multiply the #\n # regularization loss by 0.5 #\n #############################################################################\n exp_score = np.exp(scores)\n exp_score_sum = exp_score.sum(axis=1)\n correct_score = exp_score[np.arange(N), y]\n probability = (correct_score / exp_score_sum).reshape(-1, 1)\n loss = -np.log(probability).sum()\n\n loss /= N\n loss += 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2))\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n des = np.tile((-correct_score / np.square(exp_score_sum)).reshape(-1, 1), (1, C))\n des[np.arange(N), y] += 1.0 / exp_score_sum\n dsoftmax = des * (-np.ones((mid.shape[0], 1)) / probability) * np.exp(scores)\n\n # W2\n grads['W2'] = mid.T.dot(dsoftmax)\n grads['W2'] /= N\n grads['W2'] += reg * W2\n\n # b2\n grads['b2'] = np.ones_like(b2.reshape(1, -1)) * dsoftmax\n grads['b2'] = np.mean(grads['b2'], axis=0).reshape(-1)\n\n # W1\n binary = np.zeros_like(mid)\n binary[mid > 0] = 1\n grads['W1'] = X.T.dot(binary * dsoftmax.dot(W2.T)) # chain rule, compute dmid/dW1 * dscore/dmid * dsoftmax\n grads['W1'] /= N\n grads['W1'] += reg * W1\n\n # b1\n grads['b1'] = np.ones_like(b1.reshape(1, -1)) * binary * dsoftmax.dot(W2.T)\n grads['b1'] = np.mean(grads['b1'], axis=0).reshape(-1)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def learn(self):\n Xt = np.append(np.ones((self.X.shape[0], 1)), self.X, axis=1)\n Yt = self.Y * 2 - 1\n\n w = np.ones(Xt.shape[1]) # avoiding random init, for debugging\n lw = [[] for k in range(len(w))]\n \n for iter in range(self.max_steps):\n P = Yt * np.dot(Xt, w)\n M = np.where(P <= 0)[0] # indices of misclassified datapoints\n\n if len(M) == 0: \n self.logger.debug(\"Found linearly separable hyperplane!\")\n break\n\n if self.is_stochastic:\n # just pick one randomly from M\n M = [M[random.randint(0, len(M)-1)]]\n\n grad = -1 * np.sum((Yt[M] * Xt[M].T), axis=1) / len(M)\n\n if self.reg_constant > 0:\n grad += self.reg_constant * w\n \n eta = self.step_size * 10000 / (10000 + iter)\n \n w = w - grad * eta\n \n if iter % 100 == 0:\n for k in range(len(w)):\n lw[k].append(w[k])\n \n if iter % 1000 == 0:\n self.logger.debug(\"Iter %s:\\t %f %f %f\" %(iter, w[0], w[1], w[2]))\n \n self.logger.debug(\"Iterations: %s\" %(iter))\n\n# x_range = range(len(lw[0]))\n# fig = plt.figure()\n# ax1 = fig.add_subplot(111) \n# for j, lwn in enumerate(lw):\n# if j % 3 >= 2: # plot an arbitrary subset of features\n# a = w[j]\n# ax1.plot(x_range, [(x-a) for x in lwn], label=str(j))\n# \n# plt.xlabel(\"Iteration\")\n# plt.ylabel(\"Feature weight\")\n# plt.show()\n \n #self.logger.debug(\"%s\" % np.array2string(w, precision=2, separator=','))\n \n self.w = w", "def svm_model_fn(features, labels, mode, params):\n\n feature_columns = [layers.real_valued_column(i) for i in features.keys()]\n example_id_column(features)\n\n weight_column_name = params.get(\"weight_column_name\")\n\n head = head_lib.binary_svm_head(\n weight_column_name=weight_column_name,\n enable_centered_bias=False)\n\n optimizer = sdca_optimizer.SDCAOptimizer(\n example_id_column=\"index\",\n num_loss_partitions=params[\"num_loss_partitions\"],\n symmetric_l1_regularization=params[\"l1_regularization\"],\n symmetric_l2_regularization=params[\"l2_regularization\"])\n\n chief_hook = linear._SdcaUpdateWeightsHook()\n update_weights_hook = chief_hook\n\n if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):\n raise ValueError(\"Optimizer must be of type SDCAOptimizer\")\n\n if isinstance(head,\n head_lib._BinarySvmHead): # pylint: disable=protected-access\n loss_type = \"hinge_loss\"\n elif isinstance(head,\n head_lib._BinaryLogisticHead): # pylint:\n # disable=protected-access\n loss_type = \"logistic_loss\"\n elif isinstance(head,\n head_lib._RegressionHead): # pylint:\n # disable=protected-access\n assert head.logits_dimension == 1, (\"SDCA only applies for \"\n \"logits_dimension=1.\")\n loss_type = \"squared_loss\"\n else:\n raise ValueError(\"Unsupported head type: {}\".format(head))\n\n parent_scope = \"linear\"\n\n with variable_scope.variable_op_scope(\n features.values(), parent_scope) as scope:\n features = features.copy()\n features.update(layers.transform_features(features, feature_columns))\n logits, columns_to_variables, bias = (\n layers.weighted_sum_from_feature_columns(\n columns_to_tensors=features,\n feature_columns=feature_columns,\n num_outputs=1,\n scope=scope))\n\n linear._add_bias_column(feature_columns, features, bias,\n columns_to_variables)\n\n def _train_op_fn(unused_loss):\n global_step = contrib_variables.get_global_step()\n sdca_model, train_op = optimizer.get_train_step(columns_to_variables,\n weight_column_name,\n loss_type, features,\n labels, global_step)\n if update_weights_hook is not None:\n update_weights_hook.set_parameters(sdca_model, train_op)\n return train_op\n\n model_fn_ops = head.create_model_fn_ops(\n features=features,\n labels=labels,\n mode=mode,\n train_op_fn=_train_op_fn,\n logits=logits)\n if update_weights_hook is not None:\n return model_fn_ops._replace(\n training_chief_hooks=(model_fn_ops.training_chief_hooks +\n [update_weights_hook]))\n return model_fn_ops", "def SVM_SMO(self, train_data, label, C):\n num_data, num_feature = train_data.shape\n iter_i = 0\n intercept = 0\n alpha_vec = np.zeros((num_data, 1))\n iter_num = 1000\n while iter_i <= iter_num:\n alpha_changed = 0\n for i in range(num_data):\n gX_i = float(np.inner((alpha_vec.reshape(-1) * label),\n (np.dot(train_data, train_data.iloc[i, :].T)))) + intercept\n E_i = gX_i - float(label[i])\n if (((label[i] * E_i < -1e-3) and (alpha_vec[i] < C))\n or ((label[i] * E_i > 1e-3) and (alpha_vec[i] > 0))):\n j = int(self.find_new_index(i,num_data))\n gX_j = float(np.inner((alpha_vec.reshape(-1) * label),\n (np.dot(train_data, train_data.iloc[j, :].T)))) + intercept\n E_j = gX_j - float(label[j])\n alpha_old_i = alpha_vec[i].copy()\n alpha_old_j = alpha_vec[j].copy()\n\n if label[i] != label[j]:\n L = np.maximum(0, alpha_old_j - alpha_old_i)\n H = np.minimum(C, C + alpha_old_j - alpha_old_i)\n else:\n L = np.maximum(0, alpha_old_j + alpha_old_i - C)\n H = np.minimum(C, alpha_old_j + alpha_old_i)\n\n if L == H:\n continue\n\n x1 = train_data.iloc[i, :]\n x2 = train_data.iloc[j, :]\n eta = 2 * np.inner(x1, x2) - np.inner(x1, x1) - np.inner(x2, x2)\n alpha_new_j = alpha_old_j - label[j] * (E_i - E_j) / eta\n\n if alpha_new_j > H:\n alpha_new_j = H\n elif alpha_new_j < L:\n alpha_new_j = L\n\n alpha_vec[j] = alpha_new_j\n if np.abs(float(alpha_new_j - alpha_old_j)) < 1e-4:\n continue\n alpha_new_i = alpha_old_i + label[i] * label[j] * (alpha_old_j - alpha_new_j)\n\n # intercept decision\n intercept_new_1 = (intercept - E_i - label[i] * (alpha_new_i - alpha_old_i) * np.inner(x1, x1)\n - label[j] * (alpha_new_j - alpha_old_j) * np.inner(x1, x2))\n intercept_new_2 = (intercept - E_j - label[i] * (alpha_new_i - alpha_old_i) * np.inner(x1, x2)\n - label[j] * (alpha_new_j - alpha_old_j) * np.inner(x2, x2))\n\n if (alpha_new_i > 0) and (alpha_new_i < C):\n intercept = intercept_new_1\n elif (alpha_new_j > 0) and (alpha_new_j < C):\n intercept = intercept_new_2\n else:\n intercept = (intercept_new_1 + intercept_new_2) / 2\n alpha_vec[i] = alpha_new_i\n alpha_changed += 1\n\n if alpha_changed == 0:\n iter_i += 1\n else:\n iter_i = 0\n\n W = np.dot(np.multiply(alpha_vec, label[:,np.newaxis]).T, train_data)\n W = W[0]\n w = -W[0]/W[1]\n margin = 1 / np.sqrt(np.sum(W ** 2))\n margin = np.sqrt(1 + w ** 2) * margin\n intercept = intercept/W[1]\n return(alpha_vec, w, intercept, margin)", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n # print \"dW's shape\", dW.shape\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax.ipynb loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # For every training image\n for train_image in xrange(num_train):\n # Multiply the weights by the image to get the scores\n scores = X[train_image].dot(W)\n # print(scores)\n # And then get the correct score\n correct_label = y[train_image]\n correct_score = scores[correct_label]\n # TODO: Right up to here\n # And then get the score of every other classifier\n all_scores = np.sum(scores)\n # Add a normalizing factor for numeric stability\n normalizing_constant = np.max(scores)\n scores -= normalizing_constant\n correct_score -= normalizing_constant\n #Calculating the softmax values\n softmax = np.exp(correct_score)/np.sum(np.exp(scores))\n\n # print(\"Correct score softmax\",softmax)\n\n # And calculating the loss\n loss += -1*np.log(softmax)\n # print loss\n #TODO: Loss computation is also correct\n\n # And calculating the gradient\n\n # First, update the Weight matrix with the correct example's derivative\n dW[:,correct_label] += (softmax-1)*np.transpose(X[train_image])\n\n # Then do the same for the wrong cases\n incorrect_labels = [x for x in xrange(num_classes) if x != correct_label]\n # Now, update the weights\n for label_index in incorrect_labels:\n #Calculating the softmax for a wrong label\n incorrect_label_softmax = np.exp(scores[label_index])/(np.sum(np.exp(scores)))\n # Calculating the derivative\n necessary_weight = incorrect_label_softmax*np.transpose(X[train_image])\n # Updating the weights\n dW[:,label_index] += necessary_weight\n\n\n # Divide the loss\n loss /= num_train\n dW /= num_train\n\n # Now, do regularization\n loss += 0.5*reg*np.sum(W*W)# Penalize big weights\n dW += reg*W\n\n\n\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def ex_2_b(x_train, y_train, x_test, y_test):\n ###########\n ## TODO:\n ## Train SVMs with polynomial kernels for different values of the degree\n ## (Remember to set the 'coef0' parameter to 1)\n ## and plot the variation of the training and test scores with polynomial degree using 'plot_score_vs_degree' func.\n ## Plot the decision boundary and support vectors for the best value of degree\n ## using 'plot_svm_decision_boundary' function\n ###########\n degrees = range(1, 21)\n\n test_scores = np.array([])\n train_scores = np.array([])\n best_svm = None\n best_test_score = 0\n\n for deg in degrees:\n clf = svm.SVC(kernel='poly', degree=deg, coef0=1)\n clf.fit(x_train, y_train)\n\n test_score = clf.score(x_test, y_test)\n\n if test_score > best_test_score:\n best_test_score = test_score\n best_svm = clf\n\n test_scores = np.append(test_scores, test_score)\n train_scores = np.append(train_scores, clf.score(x_train, y_train))\n\n plot_score_vs_degree(train_scores, test_scores, degrees)\n\n plot_svm_decision_boundary(clf, x_train, y_train, x_test, y_test)", "def loss(self, X, labels):\n features = self.get_conv_features(X)\n loss = blah\n return loss", "def SVM_train(Ktrain,y,lbda_vec):\r\n n = Ktrain.shape[0]\r\n for idx, lbda in enumerate(lbda_vec): \r\n C = 1/(2*lbda*n)\r\n P = matrix(Ktrain, tc=\"d\")\r\n q = - matrix(y,tc=\"d\")\r\n G = matrix( np.concatenate( (np.diagflat(y) , -np.diagflat(y) ), axis=0 ),tc=\"d\" )\r\n h1 = C * np.ones((n,1))\r\n h2 = np.zeros((n,1)) \r\n h = matrix(np.concatenate((h1,h2),axis=0))\r\n\r\n solvers.options['show_progress'] = False\r\n \r\n sol = solvers.qp(P,q,G,h) \r\n a = np.asarray(sol['x'])\r\n\r\n #alpha is sparse\r\n a[np.where(np.abs(a) < 1e-4)] = 0\r\n y_svm = np.dot(Ktrain,a)\r\n\r\n print(\"Précision pour lambda = \" + str(lbda) + \" :\", accuracy(y_svm,y))", "def _compute_loss(self, predictions, targets, **params):\n pass", "def loss(self, labels, input_data):\n\n pred, out = self.inference(input_data)\n loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels, out), name=\"loss\") + \\\n tf.losses.get_regularization_loss()\n return loss, pred", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train=X.shape[0]\n num_class=W.shape[1]\n num_feature=X.shape[1]\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(num_train):\n #W*Xi C*1\n x=np.exp(np.dot(W.T,X[i,:]))\n denominator=np.sum(x)\n numerator=x[y[i]]\n loss-=np.log(numerator/denominator)\n #numerator and denominator\n #for j in range(num_class):\n normalize_score=x/denominator\n nm=np.reshape(normalize_score, (num_class, 1))\n \n #CxD\n dscore=nm.dot(np.reshape(X[i,:],(1,num_feature)))\n #print(dscore.shape)\n\n dscore[y[i],:]-=X[i,:]\n dW+=dscore.T\n\n loss/=num_train\n dW = dW/num_train + reg*W\n #\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def loss(self, X, labels):\n features = self.get_conv_feats(X)\n loss = blah\n return loss", "def step(self, x, y, learning_rate=1e-3):\n \n # Input transformation\n \"\"\"\n Input is represented with M-dimensional vectors\n We convert them to (N, M) matrices such that columns are one-hot \n representations of the input\n \"\"\"\n x = self.one_hot(x, self.N)\n y = self.one_hot(y, self.N)\n\n \n # Forward propagation\n \"\"\"\n Returns\n -------\n embedding: array\n (D, M) matrix where columns are word embedding from U matrix\n logits: array\n (N, M) matrix where columns are output logits\n prob: array\n (N, M) matrix where columns are output probabilities\n \"\"\"\n \n ### YOUR CODE HERE ###\n #Omran:\n #U and V of dimension (D, N) and (N, D) respectively\n\n embedding = np.dot(self.U, x)\n logits = np.dot(self.V, embedding)\n prob = self.softmax(logits,0)# take care of the axis, I am not quite sure how you will implement it\n \n assert embedding.shape == (self.D, x.shape[1])\n assert logits.shape == (self.N, x.shape[1])\n assert prob.shape == (self.N, x.shape[1])\n \n \n # Loss calculation\n \"\"\"\n Returns\n -------\n loss: int\n Cross-entropy loss using true values and probabilities\n \"\"\"\n \n ### YOUR CODE HERE ###\n loss = self.loss(y, prob)\n \n # Backward propagation\n \"\"\"\n Returns\n -------\n d_U: array\n (N, D) matrix of partial derivatives of loss w.r.t. U\n d_V: array\n (D, N) matrix of partial derivatives of loss w.r.t. V\n \"\"\"\n \n ### YOUR CODE HERE ###\n #I am not quite sure of this!!\n \n# difference = np.sum(np.subtract(prob, y), axis=1)\n difference = prob - y\n d_V = difference @ embedding.T\n# print(self.N, self.D)\n# print(difference.shape)\n# print(d_V.shape)\n d_U = (self.V.T @ difference) @ x.T\n# d_U = self.V.T @ np.outer(difference, x)\n \n assert d_V.shape == (self.N, self.D)\n assert d_U.shape == (self.D, self.N)\n \n \n # Update the parameters\n \"\"\"\n Updates the weights with gradient descent such that W_new = W - alpha * dL/dW, \n where alpha is the learning rate and dL/dW is the partial derivative of loss w.r.t. \n the weights W\n \"\"\"\n \n ### YOUR CODE HERE ###\n self.V = self.V - learning_rate * d_V\n self.U = self.U - learning_rate * d_U\n\n return loss, d_U, d_V", "def fit(self, X, Y):\n X = X.toarray() # convert X to ndarray\n Y = Y.to_numpy() # convert Y to numpy array\n Y[Y == 0] = -1 # convert all zeros to -1, the SVM works with -1 and 1 values\n\n self.w = np.zeros(X.shape[1])\n self.b = 0\n\n for iter in range(self.iterations):\n X, Y = shuffle(X, Y)\n for idx, x_i in enumerate(X):\n dw, db = self.compute_gradients(x_i, Y[idx])\n self.update_gradients(dw, db)", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def losses(self):\n # compute all kinds of losses \n\n # 1. Logits losses for classification \n\n # 2. regression loss for bbox \n\n return classification_loss, bbox_reg_loss", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n scores = outputVectors.dot(predicted.T) # shape = (V, 1)\n y_hat = softmax(scores)\n cost = -scores[target] + np.log(np.sum(np.exp(scores)))\n one_hot_target = np.zeros_like(y_hat)\n one_hot_target[target] = 1\n grad = np.outer((y_hat - one_hot_target), predicted)\n gradPred = outputVectors.T.dot(y_hat - one_hot_target)\n \n '''\n final_predicted = predicted.dot(outputVectors.T)\n probability = softmax(final_predicted)\n cost = -np.log(probability[target])\n \n one_hot_target = np.zeros_like(probability)\n one_hot_target[target] += 1\n dlogits = probability - one_hot_target\n grad = np.outer(predicted, dlogits).T\n gradPred = outputVectors.T.dot(dlogits)\n '''\n ### END YOUR CODE\n\n return cost, gradPred, grad", "def softmax_loss_vectorized(W, X, y, reg):\n num_train = X.shape[0]\n num_class = W.shape[1]\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n scores = X.dot(W)\n temp_matrix = np.zeros(scores.shape)\n \n max_each_row = np.max(scores,axis=1).reshape(-1,1)\n scores -= max_each_row\n summation = np.sum(np.exp(scores),axis=1).reshape(-1,1)\n scores = np.exp(scores)\n scores = np.divide(scores,summation)\n temp_matrix[range(num_train),list(y)] =-1\n scores += temp_matrix\n dW = X.T.dot(scores) / num_train + 2*reg*W \n log_summation = np.log(summation)\n vector = scores[range(num_train),list(y)].reshape(-1,1) \n L = -vector+ log_summation \n loss = np.sum(L)/num_train + reg*np.sum(W*W)\n \n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def cross_entropoy_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n ############################################################################\n # TODO: Compute the cross-entropy loss and its gradient without explicit #\n # loops. Store the loss in loss and the gradient in dW. If you are not #\n # careful here, it is easy to run into numeric instability. Don't forget #\n # the regularization! #\n ############################################################################\n num_train_sample = X.shape[0] \n num_class = W.shape[1]\n # matrix of score of all samples and all class\n p_score = X.dot(W)\n # normalize\n p_score -= np.max(p_score,axis = 1,keepdims = True)\n # vector\n sum_score = np.sum(np.exp(p_score), axis=1, keepdims=True)\n # element-wise division\n score_i = np.exp(p_score)/sum_score\n # loss = -log(P(y)) y is all sample label, P(y) is their scores\n loss = np.sum(-np.log(score_i[np.arange(num_train_sample), y]))\n\n ind = np.zeros_like(score_i)\n ind[np.arange(num_train_sample), y] = 1\n # X:n*m W:m*k score: n*k formular to find X*score---x transpose dot score\n dW = X.T.dot(score_i - ind)\n\n loss /= num_train_sample\n loss += 0.5 * reg * np.sum(W * W)\n dW /= num_train_sample\n # dW += reg*W\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, dW", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\n from sklearn import svm\n \n \"*** YOUR CODE HERE ***\"\n self.sklearn_svm = svm.SVC(C=5, kernel='rbf', gamma=0.005, decision_function_shape='ovo')\n self.sklearn_svm.fit(trainingData, trainingLabels)", "def loss_gradient(self, x, y):\n x_preproc = self._apply_processing(x)\n x_defences, y_defences = self._apply_defences(x_preproc, y, fit=False)\n\n # Adjust the shape of y for loss functions that do not take labels in one-hot encoding\n if self._reduce_labels:\n y_defences = np.argmax(y_defences, axis=1)\n\n grads = self._loss_grads([x_defences, y_defences])[0]\n grads = self._apply_defences_gradient(x_preproc, grads)\n grads = self._apply_processing_gradient(grads)\n assert grads.shape == x_preproc.shape\n\n return grads", "def main():\n\n if len(sys.argv) < 4 or len(sys.argv) > 5:\n print 'Usage: classifier.py data_dimension train_set_path test_set_path [option: add_bias]'; \n return; \n\n # create sets of possible hyperparameter values\n setC = {0.001, 0.01, 0.1, 1, 10, 25, 100}; # trade off regularizer and error minimization\n setRho = {0.001, 0.01, 0.1, 1}; # learning rate for gradient descent \n hyperparams = [setC, setRho];\n \n # create svm classifier for selected data\n dataDim = int(sys.argv[1]);\n trainPath = str(sys.argv[2]);\n testPath = str(sys.argv[3]);\n if len(sys.argv) == 5:\n c = Classifier('svm', hyperparams, dataDim, testPath, trainPath, addBias=True);\n else:\n c = Classifier('svm', hyperparams, dataDim, testPath, trainPath);\n \n print 'Classifier type: ', c.type, \\\n '\\nTraining set: ', trainPath, \\\n '\\nTest set: ', testPath;\n \n print 'Determining hyperparameters to use...';\n c.learnHyperparams(report=1);\n \n print 'Training classifier...';\n c.train();\n \n print 'Performing inference on test set...';\n c.test(); \n \n print '\\nREPORT:', \\\n '\\nUsing hyperparameters: ', c.theta, \\\n '\\nLearned weight vector: ', c.w, \\\n '\\nPrediction accuracy on test set: ', c.accuracy * 100, ' percent';", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n num_train = X.shape[0]\n # print(\"num_train:\", num_train)\n num_classes = W.shape[1]\n # print(\"num_classes:\", num_classes)\n \n scores = X.dot(W) # scores is N*D x D*C -> N*C \n log_c = np.max(scores, axis=1).T\n scores -= log_c[:,None]\n correct_class_score = scores[np.arange(num_train),y]\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(np.exp(scores), axis=1)\n proportion = np.exp(correct_class_score) / sum_exp_scores\n loss -= np.sum(np.log(proportion))\n \n # calculating dW = (p - (c = correct c ? 1 : 0)) * x\n correct_class_one_hot = np.zeros_like(scores)\n correct_class_one_hot[np.arange(num_train),y] += 1\n p = np.exp(scores) / sum_exp_scores[:,None] - correct_class_one_hot # N*C / N:1 -> N*C\n dW += X.T.dot(p) # D*N x N*C -> D*C\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W) \n dW /= num_train\n dW += reg * W\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n y_hat = softmax(np.dot(outputVectors,predicted))\n y = np.zeros(outputVectors.shape[0])\n y[target] = 1.0\n\n cost = -np.log(y_hat[target])\n gradPred = np.dot(outputVectors.T,y_hat - y)\n grad = np.outer(y_hat - y,predicted)\n ### END YOUR CODE\n\n return cost, gradPred, grad", "def _loss_gradient(x0, x1, b, w, lam, weights=None):\n nvars = len(w)\n\n # initialize + regularization term\n loss = 0.5 * lam * np.sum(w ** 2)\n gradient = np.zeros(nvars + 1) # first position is b\n gradient[1:] = lam * w\n\n # we need prediction for x\n pred_x_0_1 = [LogisticRegression._sigmoid(x0, b, w), LogisticRegression._sigmoid(x1, b, w)]\n\n # the log likelihood\n log_like_x_0_1 = [np.log(1.0 - pred_x_0_1[0]),\n np.log(pred_x_0_1[1])]\n\n # also need the error for gradient.\n error = [pred_x_0_1[0],\n pred_x_0_1[1] - 1]\n\n if weights is None:\n loss += -np.sum(log_like_x_0_1[1]) - np.sum(log_like_x_0_1[0])\n gradient[0] += np.sum(error[0]) + np.sum(error[1]) # * 1 for bias term \n for k in range(nvars):\n gradient[k + 1] += np.sum(error[0] * x0[:, k]) + np.sum(error[1] * x1[:, k])\n else:\n loss += -np.sum(weights[1] * log_like_x_0_1[1]) - np.sum(weights[0] * log_like_x_0_1[0])\n gradient[0] += np.sum(error[0] * weights[0]) + np.sum(error[1] * weights[1])\n for k in range(nvars):\n gradient[k + 1] += ( np.sum(weights[0] * error[0] * x0[:, k]) +\n np.sum(weights[1] * error[1] * x1[:, k]) )\n return loss, gradient", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n scores = None\n ############################################################################\n # Implementing the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n ############################################################################\n\n l_input = X.copy()\n out = []\n cache = []\n for i in range(self.num_layers - 1):\n # layerwise compute the forward pass and store outputs in out list\n key = ['W' + str(i+1), 'b' + str(i+1)]\n lout, lcache = affine_sigmoid_forward(l_input, self.params[key[0]], self.params[key[1]])\n out.append(lout)\n cache.append(lcache)\n l_input = lout\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n scores, lcache = affine_forward(out[self.num_layers - 2], self.params[key[0]], self.params[key[1]])\n cache.append(lcache)\n \n # regularization parameter compute by summing square of all weight vectors\n R = 0\n for i in range(1, self.num_layers + 1):\n key = 'W' + str(i)\n R += np.sum(np.power(self.params[key], 2))\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n\n ########################\n # Backward pass to compute the loss and gradients\n ########################\n\n loss, dscore = softmax_loss(scores, y)\n # Apply regularization of the loss \n loss = loss + 0.5 * self.reg * R\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n dx, grads[key[0]], grads[key[1]] = affine_backward(dscore, cache[self.num_layers - 1])\n grads[key[0]] += self.reg * self.params[key[0]] \n\n for i in range(self.num_layers - 1, 0, -1):\n key = ['W' + str(i), 'b' + str(i)]\n dx, grads[key[0]], grads[key[1]] = affine_sigmoid_backward(dx, cache[i-1])\n # Apply regularization to the gradients\n grads[key[0]] += self.reg * self.params[key[0]]\n\n return loss, grads", "def loss(data, y_pred):\n # TODO: Try using points other than the training data points for the divergence calculation.\n y_true = data[:,:2]\n p1 = data[:,2:5]\n p2 = data[:,5:8]\n p3 = data[:,8:11]\n p4 = data[:,11:14]\n\n ### Calculate divergence using model predictions:\n\n # Step 1: Use the model to calculate predicted wind field in the surrounding points p1, p2, p3 and p4.\n y_pred_p1 = model(p1)\n y_pred_p2 = model(p2)\n y_pred_p3 = model(p3)\n y_pred_p4 = model(p4)\n\n # Step 2: Calculate the partial derivatives with a three-point centered difference.\n scale_x = self.scaler_data.scale_[0] #scale-factor for x\n scale_y = self.scaler_data.scale_[1] #scale-factor for y\n\n dudx = (y_pred_p1[:, 0] - y_pred_p3[:, 0]) / (p1[:,0] - p3[:,0]) # <- pj = transformed data\n dvdy = (y_pred_p2[:, 1] - y_pred_p4[:, 1]) / (p2[:,1] - p4[:,1]) # <- pj = transformed data\n\n # Step 3: Calculate the divergence.\n divergence = ( dudx / scale_x + dvdy / scale_y ) * np.mean([scale_x, scale_y])\n #tf.print(K.mean(K.abs(divergence)))\n\n # Step 4: Calculate and return total loss.\n return K.mean(K.square(y_true - y_pred)) + gamma*K.mean(K.square(divergence))", "def softmax_classifier(W, input, label, lamda):\n\n ############################################################################\n # TODO: Put your code here\n\n loss = 0.0\n num_train = input.shape[0]\n num_classes = W.shape[1]\n\n score = np.dot(input, W) # (N,C)\n prediction = np.argmax(score, axis=1)\n score -= np.max(score, axis=1, keepdims=True)\n\n # # cross entropy loss\n # # take exponent of the score and normalized with sum of all exponents.\n probs = np.exp(score) # (N,C)\n e_y = np.sum(np.multiply(probs,label), axis=1) # (N,) probability for correct class\n e_sum = np.sum(probs, axis=1) # (N,) sum of probability over all classes\n\n # implementation of loss equivalent l_i = -f_y_i + log sum_j(e^(f_j))\n # loss = np.sum(-np.log(e_y/e_sum)) # sum of -log across all samples.\n # loss /= num_train # average loss\n loss = np.sum(-1 * e_y) + np.sum(np.log(e_sum))\n loss /= num_train\n\n loss += lamda * np.sum(W * W) # regularization \n\n # Gradient\n delta_score = probs / e_sum.reshape(num_train,1) # (N,C)\n delta_score -= label # (NxC)\n gradient = np.dot(input.T, delta_score)\n gradient /= num_train\n gradient += lamda * 2 * W\n\n ############################################################################\n\n return loss, gradient, prediction", "def train_step(self, batch):\n user, pos, neg = batch\n with tf.GradientTape() as t:\n\n # Clean Inference\n xu_pos, gamma_u, gamma_pos, emb_pos_feature, theta_u, beta_pos = \\\n self(inputs=(user, pos), training=True)\n xu_neg, _, gamma_neg, _, _, beta_neg = self(inputs=(user, neg), training=True)\n\n result = tf.clip_by_value(xu_pos - xu_neg, -80.0, 1e8)\n loss = tf.reduce_sum(tf.nn.softplus(-result))\n\n # Regularization Component\n reg_loss = self.reg * tf.reduce_sum([tf.nn.l2_loss(gamma_u),\n tf.nn.l2_loss(gamma_pos),\n tf.nn.l2_loss(gamma_neg),\n tf.nn.l2_loss(theta_u)]) * 2 \\\n + self.reg * tf.nn.l2_loss(beta_pos) * 2 \\\n + self.reg * tf.nn.l2_loss(beta_neg) * 2 / 10 \\\n + self.reg * tf.reduce_sum([tf.nn.l2_loss(self.E), tf.nn.l2_loss(self.Bp)]) * 2\n\n # Loss to be optimized\n loss += reg_loss\n\n params = [\n self.Bi,\n self.Gu,\n self.Gi,\n self.Tu,\n self.E,\n self.Bp\n ]\n\n grads = t.gradient(loss, params)\n self.optimizer.apply_gradients(zip(grads, params))\n\n return loss.numpy()", "def ex_2_c(x_train, y_train, x_test, y_test):\n ###########\n ## TODO:\n ## Train SVMs with RBF kernels for different values of the gamma\n ## and plot the variation of the test and training scores with gamma using 'plot_score_vs_gamma' function.\n ## Plot the decision boundary and support vectors for the best value of gamma\n ## using 'plot_svm_decision_boundary' function\n ###########\n gammas = np.arange(0.01, 2, 0.02)\n\n test_scores = np.array([])\n train_scores = np.array([])\n best_svm = None\n best_test_score = 0\n\n for gamma in gammas:\n clf = svm.SVC(kernel='rbf', gamma=gamma)\n clf.fit(x_train, y_train)\n\n test_score = clf.score(x_test, y_test)\n\n if test_score > best_test_score:\n best_test_score = test_score\n best_svm = clf\n\n test_scores = np.append(test_scores, test_score)\n train_scores = np.append(train_scores, clf.score(x_train, y_train))\n\n plot_score_vs_gamma(train_scores, test_scores, gammas)\n\n plot_svm_decision_boundary(clf, x_train, y_train, x_test, y_test)", "def cross_entropoy_loss_naive(W, X, y, reg):\n # pylint: disable=too-many-locals\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n ############################################################################\n # TODO: Compute the cross-entropy loss and its gradient using explicit #\n # loops. Store the loss in loss and the gradient in dW. If you are not #\n # careful here, it is easy to run into numeric instability. Don't forget #\n # the regularization! #\n ############################################################################\n num_train_sample = X.shape[0] #row of train data\n num_class = W.shape[1] #column of weight, plane,horse..\n for i in range(num_train_sample):\n p_score = X[i].dot(W) #a row of score corresponding to each class\n p_score -= np.max(p_score) #normalize, highest is 1\n\n ###compute softmax loss\n # sum of scores corresponding to different classes of a sample \n sum_score = np.sum(np.exp(p_score)) \n # each class's score over sum_score of a sample \n score_i = lambda k: np.exp(p_score[k]) / sum_score\n # for the correct label in each sample, find softmax loss over sum\n # iteration make loss sum up all samples\n loss = loss - np.log(score_i(y[i]))\n\n for k in range(num_class):\n p_k = score_i(k)\n # gradient of softmax\n dW[:, k] += (p_k - (k == y[i])) * X[i]\n\n loss /= num_train_sample\n loss += 0.5 * reg * np.sum(W * W)\n dW /= num_train_sample\n dW += reg*W\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, dW", "def train(self, trainingData, trainingLabels, validationData, validationLabels ):\n import sklearn\n from sklearn import svm\n\n \"*** YOUR CODE HERE ***\"\n self.sklearn_classifier = svm.SVC(C=2, gamma=0.025, decision_function_shape='ovo', tol=0.015)\n self.sklearn_classifier.fit(trainingData, trainingLabels)", "def compute_loss_and_gradients(self, X, y):\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n # TODO Set parameter gradient to zeros\n # Hint: using self.params() might be useful!\n self.fulllayer1.W.grad = np.zeros_like(self.fulllayer1.W.grad)\n self.fulllayer1.B.grad = np.zeros_like(self.fulllayer1.B.grad)\n self.fulllayer2.W.grad = np.zeros_like(self.fulllayer2.W.grad)\n self.fulllayer2.B.grad = np.zeros_like(self.fulllayer2.B.grad)\n\n\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model\n res = self.fulllayer1.forward(X)\n res2 = self.reglayer1.forward(res)\n res3 = self.fulllayer2.forward(res2)\n\n loss, grad = softmax_with_cross_entropy(res3, y)\n\n back3 = self.fulllayer2.backward(grad)\n back2 = self.reglayer1.backward(back3)\n back = self.fulllayer1.backward(back2)\n \n # After that, implement l2 regularization on all params\n # Hint: self.params() is useful again!\n\n for params in self.params().keys():\n # print(params)\n # print(self.params()[params].value)\n loc_loss, loc_grad = l2_regularization(self.params()[params].value, self.reg)\n loss += loc_loss\n self.params()[params].grad += loc_grad\n\n return loss", "def train(self, X, y, batch_size=5, num_epochs=10, alpha=0.1, gamma=0.9, learning=\"Delta\"):\n rem = int(np.ceil(len(X[0])/batch_size))\n for epoch in range(num_epochs):\n art = 0;\n for sample in range(rem):\n end = art + batch_size\n\n # Get a sample (column from X and Y) where the size of the sample is given by the batch size\n sampleX = X[:, art : end]\n sampleY = y[:, art : end]\n #print (sampleX)\n\n # Get the prediction\n results = self.predict(sampleX)\n art += batch_size\n\n if learning == \"Delta\" or learning == \"delta\":\n # Calculate e\n e = np.subtract(sampleY, results)\n\n # Calculate e dot p, where p is the input matrix\n ep = np.dot(e, np.transpose(sampleX))\n\n # Multiply this new matrix by the scalar alpha\n aep = np.multiply(alpha, ep)\n\n # Calculate the new weights along with the bias\n self.weights = np.add(self.weights, aep)\n \n elif learning == \"Filtered\" or learning == \"filtered\":\n\n # Calculate e dot p, where p is the input matrix\n ep = np.dot(sampleY, np.transpose(sampleX))\n\n # Multiply this new matrix by the scalar alpha\n aep = np.multiply(alpha, ep)\n\n # Multiply the old weights by some scalar gamma\n gw = np.multiply(1 - gamma, self.weights)\n\n self.weights = np.add(gw, aep)\n\n elif learning == \"Unsupervised_hebb\" or learning == \"unsupervised_hebb\":\n # Add a row of one's to the top of the input matrix\n #newX = np.vstack((np.array([1 for column in range(sampleX.shape[1])]), sampleX))\n\n # Calculate e dot p, where p is the input matrix\n ep = np.dot(results, np.transpose(sampleX))\n\n # Multiply this new matrix by the scalar alpha\n aep = np.multiply(alpha, ep)\n\n # Calculate the new weights along with the bias\n self.weights = np.add(self.weights, aep)", "def loss(self, X, y=None, justLoss=False):\n # N = X.shape[0]\n # mode = 'test' if y is None else 'train'\n scores = None\n\n W1, b1 = self.params['W1'], self.params['b1']\n # W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n\n conv_param = {'stride': 1, 'pad': 0}\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n #######################################################################\n # TODO: Implement the forward pass for the convolutional neural net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n #######################################################################\n\n conv1, conv_cache = conv_forward(X, W1, b1, conv_param)\n relu1, relu_cache1 = relu_forward(conv1)\n\n # conv2, conv_cache2 = conv_forward(relu1, W2, b2, conv_param)\n # relu2, relu_cache2 = relu_forward(conv2)\n\n scores, maxpool_cache = max_pool_forward(relu1, pool_param)\n scores, forward_cache = fc_forward(scores, W3, b3)\n \n\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n #######################################################################\n # TODO: Implement the backward pass for the convolutional neural net, #\n # storing the loss and gradients in the loss and grads variables. #\n # Compute data loss using softmax, and make sure that grads[k] holds #\n # the gradients for self.params[k]. #\n loss, dscores = softmax_loss(scores, y)\n\n if justLoss:\n return loss\n # print(loss)\n\n\n dx_3, grads['W3'], grads['b3'] = fc_backward(dscores, forward_cache)\n dx_3 = max_pool_backward(dx_3, maxpool_cache)\n\n # dx_2 = relu_backward(dx_3, relu_cache2)\n # dx_2, grads['W2'], grads['b2'] = conv_backward(dx_3, conv_cache2)\n\n dx = relu_backward(dx_3, relu_cache1)\n dx, grads['W1'], grads['b1'] = conv_backward(dx, conv_cache)\n \n \n\n return loss, grads", "def blrObjFunction(params, *args):\r\n train_data, labeli = args\r\n \r\n n_data = train_data.shape[0];\r\n n_feature = train_data.shape[1];\r\n error = 0;\r\n error_grad = np.zeros((n_feature+1,1));\r\n \r\n ##################\r\n # YOUR CODE HERE #\r\n ##################\r\n \r\n # Step-1: Compute yn\r\n w = params.reshape((n_feature+1, 1))\r\n wt = w.transpose();\r\n xn = train_data;\r\n xn_with_bias = np.array([])\r\n xn_with_bias_list = [] \r\n for x in range (0, n_data):\r\n xn_with_bias_list.append(np.append ([1], xn[x]))\r\n xn_with_bias = np.vstack(xn_with_bias_list)\r\n yn = sigmoid(np.dot (xn_with_bias, w));\r\n \r\n # Step-2: Compute Error\r\n tn = labeli;\r\n sum1 = tn * np.log (yn);\r\n sum2 = (1-tn) * np.log ((1- yn));\r\n errorr = 0\r\n for i in range (0, n_data):\r\n errorr = errorr + sum1[i] + sum2[i];\r\n errorr = -1 * errorr;\r\n error = errorr[0] \r\n \r\n # Step-3: Compute Error gradient\r\n yn_minus_tn = yn - tn;\r\n xn_with_bias\r\n error_grad = np.dot(xn_with_bias.transpose(), yn_minus_tn);\r\n error_grad = np.squeeze(np.asarray(error_grad));\r\n print (error)\r\n return error, error_grad", "def train(prob: SvmProblem):\n # Define variables\n x = prob.X\n y = prob.Y\n C = prob.C\n C2 = prob.gamma\n\n # Swap params, so SVM solves X* with correct params\n xk = prob.xkernel\n xsk = prob.xskernel\n\n prob.C = C2\n prob.xkernel = xsk\n\n svm = SVM()\n xstar_clf = svm.train(prob.Xstar, prob)\n\n # Get distance to decision boundary\n xi_star = np.zeros(prob.num)\n for i in range(prob.num):\n output = (1 - prob.Y[i] * (xstar_clf.f(prob.Xstar[i])))\n xi_star[i] = max(0, output)\n\n # Replace swapped out params so modified SVM solves X with correct params\n prob.C = C\n prob.xkernel = xk\n\n # Define the inputs to CVXOPT - See Appendix G.4\n P = prob.yi_yj * prob.xi_xj\n q = -np.ones((prob.num, 1))\n G1 = -np.eye(prob.num)\n G2 = np.eye(prob.num)\n G3 = xi_star.reshape(1, -1)\n G = np.vstack((G1, G2))\n G = np.vstack((G, G3))\n h1 = np.zeros(prob.num).reshape(-1, 1)\n h2 = np.repeat((1 + prob.delta) * C, prob.num).reshape(-1, 1)\n h3 = sum(xi_star) * C\n h = np.vstack((h1, h2))\n h = np.vstack((h, h3))\n A = y.reshape(1, -1)\n b = np.zeros(1)\n\n P = matrix(P, tc='d')\n q = matrix(q, tc='d')\n G = matrix(G, tc='d')\n h = matrix(h, tc='d')\n A = matrix(A, tc='d')\n b = matrix(b, tc='d')\n\n # Solve optimization problem using CVXOPT\n solvers.options['show_progress'] = False\n sol = solvers.qp(P, q, G, h, A, b)\n alphas = np.array(sol['x'])\n\n # Get the bias\n bacond1 = (alphas > 1e-8)\n bacond2 = (alphas <= (1 + prob.delta) * C)\n bcond = np.array([a and b for a, b in zip(bacond1, bacond2)]).flatten()\n\n yS = y[bcond]\n xS = x[bcond]\n aS = alphas[bcond]\n\n sum_total = 0\n for s in range(len(yS)):\n inner_total = 0\n for m in range(len(yS)):\n am = aS[m]\n ym = yS[m]\n xm_xs = prob.xkernel(xS[m], xS[s])\n inner_total += am * ym * xm_xs\n sum_total += yS[s] - inner_total\n\n bias = sum_total / len(yS)\n\n # Populate Classifier object to be returned\n clf = Classifier()\n clf.b = bias\n clf.alphas = alphas\n clf.xs = x\n clf.ys = y\n clf.kern = prob.xkernel\n return clf", "def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss", "def update_weights(self, X: np.ndarray, y: np.ndarray, learning_rate: float, reg_coeff: float):\n ################################################################################\n # TODO: Compute the gradient of loss computed above w.r.t the svm weights. #\n # and then update self.w with the computed gradient. #\n # (don't forget learning rate and reg_coeff in update rule) #\n # Don't forget L2-regularization term in your implementation! #\n ################################################################################\n\n # write your code here\n N = len(X)\n gradient = ((-1 * np.sum(((X.T * y).T)[np.where((1 - np.matmul(X, self.weights) * y) > 0)], axis=0)) / N) + reg_coeff * self.weights\n self.weights -= learning_rate * gradient\n\n\n ################################################################################\n # END OF YOUR CODE #\n ################################################################################", "def softmax_loss_vectorized(W, X, y, reg):\n\n #############################################################################\n # TODO: Compute the softmax.ipynb loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n train_images = X.shape[0]\n # Store all the scores in a matrix\n all_scores = np.dot(X,W)\n #First, calculate the normalizing constant for numeric stability\n constant = np.max(all_scores,axis=1)\n normalized_scores = np.transpose(np.subtract(np.transpose(all_scores),constant))\n\n #Then, calculate softmax for the correct scores\n exp_scores = np.exp(all_scores)\n # First, keep track of the sum of values per row\n exp_sum = np.sum(exp_scores,axis=1)\n\n # Finally, calculate the softmax score for every entry\n softmax_scores = np.transpose(exp_scores)/exp_sum # useful when computing gradient\n softmax_scores = np.transpose(softmax_scores)\n # And then, compute the loss\n loss_score = softmax_scores[range(train_images),y]\n loss_score = -1 * np.log(loss_score) #taking the logarithm\n loss += np.sum(loss_score)\n\n #Normalize and regularize the loss\n loss /= train_images\n loss += 0.5*reg*np.sum(W*W)\n\n #Finally, calculate a vectorized gradient\n\n # Calculate the derivative at the correct label\n softmax_scores[range(train_images),y] -= 1\n # Then, make a matrix containing all the gradient values\n gradient_values = np.dot(np.transpose(X),softmax_scores)\n gradient_values = gradient_values\n\n #FINALLY, update the gradient\n dW+= gradient_values\n #And normalize and regularize it\n dW /= train_images\n dW += reg*W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def evaluate(self, X, y, w):\n value, prediction = self.predict(X, w)\n if self.loss == 'linear' or self.loss == 'logistic':\n Error = np.sum((value - y) ** 2)\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n tmp = - value * newY\n Error = np.sum(tmp[tmp > 0])\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n tmp = 1 - value * newY\n h = np.sum(tmp[tmp > 0])\n Error = np.sum(w ** 2) + self.C * h\n\n Error = Error / len(y)\n Acc = np.sum(prediction == y) / len(y)\n\n return Error, Acc", "def _train(self, loss):\n config = ConfigParser.ConfigParser()\n config.read(\"config/conf.cfg\")\n\n learning_rate =float(config.get(\"Common Params\", \"learning_rate\"))\n moment = float(config.get(\"Common Params\", \"moment\"))\n opt = tf.train.AdamOptimizer()\n train_step = opt.minimize(loss)\n return train_step\n\n # grads = opt.compute_gradients(self.total_loss)\n\n # apply_gradient_op = opt.apply_gradients(grads, global_step=self.global_step)\n\n #return apply_gradient_op", "def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss", "def naive_softmax_loss_and_gradient(\n center_word_vec,\n outside_word_idx,\n outside_vectors,\n dataset\n):\n\n ### YOUR CODE HERE\n \n center_word_vec = center_word_vec.reshape((center_word_vec.shape[0], 1))\n center_dot_outside = np.dot(outside_vectors, center_word_vec)\n prob = softmax(center_dot_outside.reshape(-1)).reshape(-1, 1) \n loss = -np.log(prob[outside_word_idx])\n \n prob_complement = prob.copy()\n prob_complement[outside_word_idx] -= 1.0\n \n grad_center_vec = np.dot(outside_vectors.T, prob_complement).flatten()\n \n grad_outside_vecs = np.dot(prob_complement, center_word_vec.T)\n\n ### END YOUR CODE\n\n return loss, grad_center_vec, grad_outside_vecs", "def softmax_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement the loss and gradient for softmax classification. This #\n # will be similar to the softmax loss vectorized implementation in #\n # cs231n/classifiers/softmax.py. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n num_train = x.shape[0]\n\n x = np.exp(x)\n temp_sum = np.sum(x, axis = 1, keepdims = True)\n x = x / temp_sum\n softmax_result = x\n trans_y = np.zeros((x.shape[0],x.shape[1]))\n trans_y[np.arange(x.shape[0]), y] += 1\n x = - np.log(x)\n x = x * trans_y\n x_sum = np.sum(x)\n loss = x_sum / num_train\n loss = loss + \n\n dx = softmax_result - trans_y\n dx = dx / num_train\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return loss, dx", "def ex_3_b(x_train, y_train, x_test, y_test):\n ###########\n ## TODO:\n ## Train multi-class SVMs with a LINEAR kernel\n ## Use the sklearn.metrics.confusion_matrix to plot the confusion matrix.\n ## Find the index for which you get the highest error rate.\n ## Plot the confusion matrix with plot_confusion_matrix.\n ## Plot the first 10 images classified as the most misclassified digit using plot_mnist.\n ###########\n\n labels = range(1, 6)\n\n lin = svm.SVC(decision_function_shape='ovr', kernel='linear')\n lin.fit(x_train, y_train)\n\n y_test_predict =lin.predict(x_test)\n\n score_train = lin.score(x_train, y_train)\n score_test = lin.score(x_test, y_test)\n\n cm = confusion_matrix(y_test, y_test_predict)\n plot_confusion_matrix(cm, labels)\n #print(cm)\n\n diff_list = y_test_predict == y_test\n\n # indexes of all missclassiefied images\n misclassifieds = [i for i, val in enumerate(diff_list) if val == False]\n\n # remove diagonal elements from cm for later processing\n cm_no_diagonal = cm\n np.fill_diagonal(cm_no_diagonal, 0)\n #print(cm_no_diagonal)\n\n errors_per_class = np.sum(cm_no_diagonal, axis=0)\n #print(errors_per_class)\n\n sel_err = np.array(misclassifieds) # CHANGE ME! Numpy indices to select all images that are misclassified.\n i = np.argmax(errors_per_class) # CHANGE ME! Should be the label number corresponding the largest classification error.\n #print(i)\n\n # Plot with mnist plot\n plot_mnist(x_test[sel_err], y_test_predict[sel_err], labels=labels[i], k_plots=10, prefix='Predicted class')", "def binary_classifier(train_data, dim, wi):\n n = np.zeros(dim)\n p = np.zeros(dim)\n p_w = 0\n n_w = 0\n for i in range(len(train_data)):\n if train_data[i][dim] == 1:\n # Positive\n p_w += float(wi[i])\n p += (float(wi[i]) * train_data[i][0:dim])\n elif train_data[i][dim] == -1:\n # Negative\n n_w += float(wi[i])\n n += (float(wi[i]) * train_data[i][0:dim])\n\n p *= float(1) / float(p_w)\n n *= float(1) / float(n_w)\n w_vec = p - n\n t_vec = 0.5 * np.dot(np.transpose(p + n), (p - n))\n\n error = 0\n\n for i in range(len(train_data)):\n point = train_data[i]\n # Predicted positive\n if np.dot(point[0:dim], w_vec) > t_vec:\n if point[dim] == -1:\n # It is a false positive\n error += wi[i]\n # Predicted negative\n else:\n if point[dim] == 1:\n # It is a false positive\n error += wi[i]\n\n return t_vec, w_vec, error", "def cross_entropoy_loss_naive(W, X, y, reg):\n # pylint: disable=too-many-locals\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n \n\n C = W.shape[1]\n# print(\"no. of classes {}\".format(C))\n N,D = X.shape\n# print(\"no. of data {} and dimension {}\".format(N,D))\n for i in range(N):\n xi = X[i,:]\n# print(\"one record shape: {}\".format(xi.shape))\n scores = np.zeros(C)\n for c in range(C):\n w = W[:,c]\n# print(\"weight for one record {}\".format(w.shape))\n scores[c] = xi.dot(w)\n scores -= np.max(scores)\n actual_y = y[i]\n total_score = np.sum(np.exp(scores)) \n loss_i = -scores[actual_y] + np.log(total_score)\n# print('naive score : {}'.format(scores[actual_y]))\n loss += loss_i\n \n #gradient\n probability = np.exp(scores)/total_score\n for j in range(C):\n dW[:,j] += probability[j]*xi\n \n dW[:,actual_y] -= xi\n loss = loss/N\n reg_loss = 0.5*reg*np.sum(W*W)\n loss = loss + reg_loss\n print(\"loss : {}\".format(loss))\n dW = dW/N\n dW += reg*W\n \n \n \n \n\n ############################################################################\n # TODO: Compute the cross-entropy loss and its gradient using explicit #\n # loops. Store the loss in loss and the gradient in dW. If you are not #\n # careful here, it is easy to run into numeric instability. Don't forget #\n # the regularization! #\n ############################################################################\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, dW", "def train(input, label, conv, maxpool, softmax, lr=0.005):\n # Forward\n output, loss, accuracy = forward(input, label, conv, maxpool, softmax)\n\n gradient = np.zeros(10)\n gradient[label] = -1 / output[label]\n\n # Backprop\n gradient = softmax.backprop(gradient, lr)\n gradient = maxpool.backprop(gradient)\n gradient = conv.backprop(gradient, lr)\n\n return loss, accuracy", "def loss(self, X_batch, y_batch, learning_rate=1e-3, one_vs_all_index=-1, reg=True):\n #########################################################################\n # TODO: #\n # calculate the loss and the derivative #\n #########################################################################\n loss = 0\n for i in range(X_batch.shape[0]):\n if one_vs_all_index == -1:\n loss += -(y_batch[i] * (np.dot(self.w.T, X_batch[i]))) + np.log(\n 1 + np.exp(np.dot(self.w.T, X_batch[i])))\n else:\n if reg:\n reg = (learning_rate / 2 * X_batch.shape[0]) * np.sum(np.power(self.ws[one_vs_all_index], 2))\n loss += -(y_batch[i] * (np.dot(self.ws[one_vs_all_index].T, X_batch[i]))) + np.log(\n 1 + np.exp(np.dot(self.ws[one_vs_all_index].T, X_batch[i]))) + reg\n else:\n loss += -(y_batch[i] * (np.dot(self.ws[one_vs_all_index].T, X_batch[i]))) + np.log(\n 1 + np.exp(np.dot(self.ws[one_vs_all_index].T, X_batch[i])))\n gradients = np.zeros(X_batch.shape[1])\n if one_vs_all_index == -1:\n dot = np.dot(X_batch, self.w)\n else:\n dot = np.dot(X_batch, self.ws[one_vs_all_index])\n logists = sigmod(dot)\n diff = y_batch - logists\n for index in range(X_batch.shape[0]):\n if one_vs_all_index != -1:\n if reg:\n dot = np.dot(X_batch[index], diff[index])\n gradients[1:] += dot[1:] + (learning_rate / X_batch.shape[0]) * self.ws[one_vs_all_index][1:]\n gradients[0] += dot[0]\n else:\n gradients += np.dot(X_batch[index], diff[index])\n else:\n gradients += np.dot(X_batch[index], diff[index])\n\n return loss, gradients / X_batch.shape[0] # 取均值免得步长过大直接nan\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################" ]
[ "0.7909253", "0.7767066", "0.7405515", "0.7281215", "0.7261097", "0.7241102", "0.7210721", "0.7159015", "0.7136917", "0.7069623", "0.70671463", "0.7060711", "0.7044991", "0.69557554", "0.6928465", "0.6869657", "0.68506974", "0.6844779", "0.6830456", "0.6820079", "0.67838556", "0.6687998", "0.6682661", "0.6679417", "0.6585854", "0.65084213", "0.64975196", "0.64533967", "0.6443581", "0.64322233", "0.64118993", "0.62694615", "0.6190889", "0.6182364", "0.6179974", "0.61765724", "0.6173323", "0.6148572", "0.6143582", "0.61318755", "0.6095418", "0.6081608", "0.606012", "0.6056921", "0.6049978", "0.60452664", "0.60241556", "0.60232204", "0.6013251", "0.6012473", "0.6005783", "0.6005186", "0.60051143", "0.60040224", "0.60017246", "0.5988041", "0.59512335", "0.59500057", "0.59478587", "0.594783", "0.5945739", "0.5942752", "0.5939269", "0.5936991", "0.5929236", "0.5924025", "0.59139824", "0.59094125", "0.5887182", "0.58833855", "0.5868018", "0.5866427", "0.5861045", "0.58593136", "0.5858788", "0.58481336", "0.5834095", "0.5833367", "0.58222735", "0.58213747", "0.5816392", "0.5815699", "0.5814529", "0.5812119", "0.5809893", "0.5806371", "0.5802567", "0.5799207", "0.5797353", "0.57875115", "0.5786879", "0.5784815", "0.5783575", "0.5776365", "0.57737076", "0.5773516", "0.5771607", "0.57696617", "0.57639515", "0.57620037" ]
0.6714259
21
Computes the loss and gradient for binary classification with logistic regression.
def logistic_loss(x, y): N = x.shape[0] x = np.squeeze(x) y_prime = (y + 1)/2 h = 1 /(1 + np.exp(-x)) loss = np.sum(-np.log( (h**y_prime) * ((1-h)**(1-y_prime)) ))/N dx = np.exp(-y*x)*(-y)/(1+np.exp(-y*x))/N return loss, dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logistic(weights, data, targets, hyperparameters):\n y = logistic_predict(weights, data)\n\n #####################################################################\n # TODO: #\n # Given weights and data, return the averaged loss over all data #\n # points, gradient of parameters, and the probabilities given by #\n # logistic regression. #\n #####################################################################\n f = None\n df = None\n\n f = evaluate(targets, y)[0]\n\n N = len(data)\n M = len(weights) - 1 \n temp = np.ones([N, M + 1])\n temp[: N, : M] = np.array(data)\n\n\n df = np.zeros([M+1, 1])\n\n df[:, 0] = np.array([[np.mean([(y.flatten()[i] - targets.flatten()[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n # df = np.matrix([[np.mean([(y[i] - targets[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n #####################################################################\n # END OF YOUR CODE #\n #####################################################################\n return f, df, y", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def _loss_gradient(x0, x1, b, w, lam, weights=None):\n nvars = len(w)\n\n # initialize + regularization term\n loss = 0.5 * lam * np.sum(w ** 2)\n gradient = np.zeros(nvars + 1) # first position is b\n gradient[1:] = lam * w\n\n # we need prediction for x\n pred_x_0_1 = [LogisticRegression._sigmoid(x0, b, w), LogisticRegression._sigmoid(x1, b, w)]\n\n # the log likelihood\n log_like_x_0_1 = [np.log(1.0 - pred_x_0_1[0]),\n np.log(pred_x_0_1[1])]\n\n # also need the error for gradient.\n error = [pred_x_0_1[0],\n pred_x_0_1[1] - 1]\n\n if weights is None:\n loss += -np.sum(log_like_x_0_1[1]) - np.sum(log_like_x_0_1[0])\n gradient[0] += np.sum(error[0]) + np.sum(error[1]) # * 1 for bias term \n for k in range(nvars):\n gradient[k + 1] += np.sum(error[0] * x0[:, k]) + np.sum(error[1] * x1[:, k])\n else:\n loss += -np.sum(weights[1] * log_like_x_0_1[1]) - np.sum(weights[0] * log_like_x_0_1[0])\n gradient[0] += np.sum(error[0] * weights[0]) + np.sum(error[1] * weights[1])\n for k in range(nvars):\n gradient[k + 1] += ( np.sum(weights[0] * error[0] * x0[:, k]) +\n np.sum(weights[1] * error[1] * x1[:, k]) )\n return loss, gradient", "def loss(params: hk.Params, batch, labels, xent_weight=self.weights, l1_coeff=self.l1_coef, l2_coeff=self.l2_coef) -> jnp.ndarray:\n logits = net.apply(params, batch)\n labels = jax.nn.one_hot(label, 2)\n\n # Note that in our problem, regularization should be after the AND-mask.\n sum_in_layer = lambda p: jnp.sum(p)\n sum_p_layers = [sum_in_layer(p) for p in jax.tree_leaves(params)]\n l1_loss = sum(sum_p_layers)\n l2_loss = 0.5 * sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params))\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits) * xent_weight)\n softmax_xent /= labels.shape[0]\n\n return softmax_xent + l2_coeff * l2_loss + l1_coeff * l1_loss", "def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def train_logisticRegression(data: np.array, labels: np.array)->None:\n\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n n_categories = np.size(labels, 1)\n\n data = np.hstack((np.ones((n_examples, 1)), data))\n\n print(data[0:5, :])\n\n X_train, X_test, y_train, y_test, idx_test = split_data(data, labels, 0.7)\n\n convergence_goal = 1e-3\n learning_rate = 0.01\n\n theta = np.random.uniform(size=((n_features+1, n_categories)))\n\n for i in range(n_categories):\n\n cost_var = 1\n\n previous_cost = 1e6\n iterations = 0\n cost_to_plot = []\n\n while cost_var > convergence_goal:\n iterations += 1\n cost, grad = costFunction(X_train, y_train[:, i], theta[:, i])\n theta[:, i] = update_theta(theta[:, i], grad, learning_rate)\n cost_var = previous_cost - cost\n previous_cost = cost\n if iterations == 1: cost_var = 1\n cost_to_plot.append(cost)\n # print(cost)\n\n plt.plot(range(iterations), cost_to_plot, 'g-', label = 'cost')\n plt.xlabel('iterations')\n plt.ylabel('cost')\n # plt.show()\n\n predictions = lrPredict(theta, X_test)\n\n print(predictions[0:5, :])\n print(y_test[0:5, :])\n\n accuracy = np.mean([p == l for p, l in zip(predictions, y_test)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass", "def cost_grad_log_reg(w, b, X, y, Multicalss=False):\n if not len(X.shape) == 2:\n X_flattened = X.reshape(X.shape[1] * X.shape[2], -1).T\n else:\n X_flattened = X\n m = X_flattened.shape[1]\n print(m)\n if Multicalss:\n # Multi-class\n\n y_train_reshaped = y.reshape(len(y), 1)\n ohe = OneHotEncoder(categories='auto')\n y_train_reshaped = ohe.fit_transform(y_train_reshaped).toarray()\n print(y_train_reshaped.shape)\n A = softmax(np.dot(X_flattened, w) + b)\n print(A.shape)\n xentropy = -np.sum(y_train_reshaped * np.log(A))\n cost = np.mean(-1 / m * np.sum(y_train_reshaped * np.log(A) + (1 - y_train_reshaped) * np.log(1 - A), axis=1,\n keepdims=True))\n\n dw = 1 / m * np.dot(X_flattened.T, (A - y_train_reshaped))\n db = 1 / m * np.sum(A - y_train_reshaped)\n else:\n # Binary\n A = sigmoid(np.dot(w.T, X_flattened) + b)\n cost = -1 / m * np.sum(y * np.log(A) + (1 - y) * np.log(1 - A), axis=1, keepdims=True)\n\n dw = 1 / m * np.dot(X_flattened, (A - y).T)\n db = 1 / m * np.sum(A - y)\n\n # grads/derivatives\n cost = np.squeeze(cost)\n\n return dw, db, cost", "def logistic_loss(x, y):\n x = x.reshape((-1,))\n y = y.reshape((-1,))\n \n N, = x.shape\n \n y_p = np.where(y == 1,1,0)\n\n p = sigmoid(x)\n loss = -(y_p*np.log(p) + (1-y_p)*np.log(1-p))\n loss = np.sum(loss)/N\n\n dx = (1/N)*(p - y_p)\n \n return loss, dx", "def logistic_regression_vec(theta, trainX, trainY):\n # Add column of ones for bias\n trainX = np.hstack((np.ones((trainX.shape[0], 1)), trainX))\n h = sigmoid(np.inner(trainX, theta))\n # np.log(1-h) can lead to problems for h = 1.0\n h = np.where(h == 1.0, 1 - 1e-12, h)\n fval = -(trainY * np.log(h) + (1 - trainY) * np.log(1 - h)).sum()\n error = h - trainY\n # Negative gradient for a minimization, must be flattened for np.minimize\n grad = np.dot(trainX.T, error).flatten()\n return fval, grad", "def logistic_loss(x, y):\n N = x.shape[0]\n x_flat = np.squeeze(x)\n ex = np.exp(x_flat)\n loss = np.sum(-y*x_flat+np.log(1+ex))/N\n dx = (-y+ex/(1+ex))/N\n # dx = np.reshape(dx,(len(dx),1))\n return loss, dx", "def compute_logistic_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w)) - y) / len(y)", "def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad", "def loss(self, X, y=None, reg=0.0):\r\n Ws = self.weights\r\n bs = self.biases\r\n N, D = X.shape # number of samples, number of features per sample\r\n\r\n # Compute the forward pass\r\n self.activations = []\r\n for i in xrange(len(Ws)): # for each set of weights\r\n W,b = Ws[i], bs[i]\r\n if i == 0:\r\n H = np.dot(X,W) + b\r\n else:\r\n H = np.dot(self.activations[-1],W) + b\r\n if i < len(Ws) - 1: # if we're computing hidden activations, apply nonlinear function\r\n H = (H > 0) * (H) + (H < 0) * (H/100.0)\r\n self.activations.append(H)\r\n scores = self.activations[-1]\r\n \r\n # If there's no labels provided, stop here\r\n if y is None:\r\n return scores\r\n\r\n # Compute the loss\r\n exped_scores = np.exp(scores)\r\n sums = np.sum(exped_scores,axis=1)\r\n # softmax classifier loss\r\n data_loss = (-1.0/N) * np.sum(np.log(exped_scores[range(N),y.astype(int)] / sums))\r\n\r\n # loss due to regularization\r\n reg_loss = 0\r\n for i in xrange(len(Ws)):\r\n reg_loss += np.sum(Ws[i]**2)\r\n reg_loss *= reg*(0.5)\r\n\r\n loss = data_loss + reg_loss\r\n \r\n # Compute gradients\r\n weights_grads = []\r\n biases_grads = []\r\n activation_grads = []\r\n for i in xrange(len(Ws)):\r\n weights_grads.append(np.copy(Ws[i]))\r\n biases_grads.append(np.copy(bs[i]))\r\n activation_grads.append(np.copy(self.activations[i]))\r\n\r\n DlossDscores = np.array(exped_scores / (N * np.matrix(sums).T))\r\n DlossDscores[range(N),y.astype(int)] -= (1.0/N)\r\n \r\n for i in xrange(len(Ws)-1,-1,-1):\r\n if i == 0:\r\n weights_grads[0] = np.dot(X.T, activation_grads[0]) + reg*Ws[0]\r\n biases_grads[0] = np.dot(np.ones((1,N)), activation_grads[0])[0]\r\n elif i == len(Ws)-1:\r\n H = self.activations[i-1]\r\n weights_grads[i] = np.dot(H.T, DlossDscores) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), DlossDscores)[0]\r\n dH = np.dot(DlossDscores, Ws[i].T)\r\n activation_grads[i-1] = dH\r\n else:\r\n H = self.activations[i-1]\r\n dH_out = activation_grads[i]\r\n weights_grads[i] = np.dot(H.T, dH_out) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), dH_out)[0]\r\n dH = np.dot(dH_out, Ws[i].T)\r\n dH = dH * (H > 0) + dH/100.0 * (H < 0)\r\n activation_grads[i-1] = dH\r\n \r\n grads = {}\r\n grads['weights'] = weights_grads\r\n grads['biases'] = biases_grads\r\n\r\n return loss, grads", "def compute_gradient_and_loss1(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n for j in xrange(num_classes): # for every class\n if j != y[i]: # don't take the correct ground truth index\n term = s[j] - s_y + 1 # max term with Delta = 1, according to Hinge loss formula\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n loss += term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW += reg * deriv_abs(W)\n else:\n dW += 2 * reg * W # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################", "def train_logistic_regression(train_exs: List[SentimentExample],\n feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:\n indexer = feat_extractor.get_indexer()\n weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n learning_rate = 0.1\n for i in range(15):\n for ex in train_exs:\n features_of_str = feat_extractor.extract_features(ex.words, False)\n expo = math.exp(np.dot(weights, features_of_str))\n possibility = expo / (1 + expo)\n gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n weights = np.add(weights, np.dot(learning_rate, gradient_of_w))\n return LogisticRegressionClassifier(weights, feat_extractor)\n\n # Methods for plotting average training loss\n\n # x = np.arange(0, 14)\n # # learning_rate = 1\n # indexer = feat_extractor.get_indexer()\n # weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n # avrg_losses = np.zeros(14)\n # for i in range(15):\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n # weights = np.add(weights, gradient_of_w)\n # loss = 0\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # loss += -(ex.label * math.log(possibility) + (1 - ex.label) * math.log(1 - possibility))\n # avrg_losses[i - 1] = loss / train_exs.__len__()\n # plt.plot(x, avrg_losses)\n #\n # # learning_rate = 0.01\n # weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n # learning_rate = 0.01\n # avrg_losses = np.zeros(14)\n # for i in range(15):\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n # weights = np.add(weights, np.dot(learning_rate, gradient_of_w))\n # loss = 0\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # loss += -(ex.label * math.log(possibility) + (1 - ex.label) * math.log(1 - possibility))\n # avrg_losses[i - 1] = loss / train_exs.__len__()\n # plt.plot(x, avrg_losses)\n #\n # # learning_rate = 0.1\n # weights = np.transpose(np.zeros(indexer.__len__(), dtype=int))\n # learning_rate = 0.1\n # avrg_losses = np.zeros(14)\n # for i in range(15):\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # gradient_of_w = np.dot(ex.label - possibility, features_of_str)\n # weights = np.add(weights, np.dot(learning_rate, gradient_of_w))\n # loss = 0\n # for ex in train_exs:\n # features_of_str = feat_extractor.extract_features(ex.words, False)\n # expo = math.exp(np.dot(weights, features_of_str))\n # possibility = expo / (1 + expo)\n # loss += -(ex.label * math.log(possibility) + (1 - ex.label) * math.log(1 - possibility))\n # avrg_losses[i - 1] = loss / train_exs.__len__()\n # plt.plot(x, avrg_losses)\n # plt.xlabel('Epochs')\n # plt.ylabel('Average Training Loss')\n # plt.legend(['step size 1', 'step size 0.01', 'step size 0.1'], loc='upper left')\n # plt.show()\n # return LogisticRegressionClassifier(weights, feat_extractor)", "def logit_cost(self, theta, X, y):\n\n cost = 0.0\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n \n for i in range(0, X.shape[0]):\n cost += (y[i]-1)*theta[i] + np.log(sig[i])\n ### END YOUR CODE\n cost = cost #+ 0.01 * self.regularizer[0](self.weights)\n return cost", "def loss(self, X, y=None, reg=0.0):\n\n self.layers = []\n layers = self.layers\n layers.append(X)\n\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n H, C = W2.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n mid = np.maximum(0, X.dot(W1) + b1.reshape(1, -1)) # activation\n scores = mid.dot(W2) + b2.reshape(1, -1)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. So that your results match ours, multiply the #\n # regularization loss by 0.5 #\n #############################################################################\n exp_score = np.exp(scores)\n exp_score_sum = exp_score.sum(axis=1)\n correct_score = exp_score[np.arange(N), y]\n probability = (correct_score / exp_score_sum).reshape(-1, 1)\n loss = -np.log(probability).sum()\n\n loss /= N\n loss += 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2))\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n des = np.tile((-correct_score / np.square(exp_score_sum)).reshape(-1, 1), (1, C))\n des[np.arange(N), y] += 1.0 / exp_score_sum\n dsoftmax = des * (-np.ones((mid.shape[0], 1)) / probability) * np.exp(scores)\n\n # W2\n grads['W2'] = mid.T.dot(dsoftmax)\n grads['W2'] /= N\n grads['W2'] += reg * W2\n\n # b2\n grads['b2'] = np.ones_like(b2.reshape(1, -1)) * dsoftmax\n grads['b2'] = np.mean(grads['b2'], axis=0).reshape(-1)\n\n # W1\n binary = np.zeros_like(mid)\n binary[mid > 0] = 1\n grads['W1'] = X.T.dot(binary * dsoftmax.dot(W2.T)) # chain rule, compute dmid/dW1 * dscore/dmid * dsoftmax\n grads['W1'] /= N\n grads['W1'] += reg * W1\n\n # b1\n grads['b1'] = np.ones_like(b1.reshape(1, -1)) * binary * dsoftmax.dot(W2.T)\n grads['b1'] = np.mean(grads['b1'], axis=0).reshape(-1)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def compute_gradient_logreg(y, tx, w):\n assert len(set(y).difference({0., 1.})) == 0, \"Class labels must be encoded as {0, 1}\"\n\n s = sigmoid(tx.dot(w)) - y\n grad = tx.T.dot(s)\n\n return grad", "def _classification_loss(self, logits, labels, num_classes):\n labels = tf.to_int64(labels)\n onehot_labels = tf.one_hot(labels, num_classes)\n with tf.name_scope('finetuning_loss'):\n cross_entropy = tf.losses.softmax_cross_entropy(\n onehot_labels=onehot_labels, logits=logits)\n cross_entropy = tf.reduce_mean(cross_entropy)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy + self.weight_decay * regularization\n return loss", "def train_logistic_regression(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:\n lr = LogisticRegressionClassifier(feat_extractor.corpus_length, feat_extractor)\n alpha = 1e0\n # beta = 1e-4\n for epoch in range(8):\n loss = 0.\n acc = 0\n indices = np.arange(len(train_exs))\n np.random.shuffle(indices)\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z) \\\n # + beta * np.expand_dims(lr.w, axis=0).dot(np.expand_dims(lr.w, axis=1))[0, 0]\n predict = int(feat.dot(np.expand_dims(lr.w, axis=1))[0, 0] > 0)\n acc += (predict == y)\n grad = (z - y) * feat.toarray()[0] # + 2 * beta * lr.w\n lr.w = lr.w - alpha * grad\n print(\"epoch {:d}, loss: {:f}, accuracy: {:f}\".format(epoch, loss / len(train_exs), acc / len(train_exs)))\n\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z)\n print(\"training loss: {:f}\".format(loss / len(train_exs)))\n\n return lr", "def train(network,X,y):\r\n \r\n # Get the layer activations\r\n layer_activations = forward(network,X)\r\n logits = layer_activations[-1]\r\n \r\n # Compute the loss and the initial gradient\r\n loss = softmax_crossentropy_with_logits(logits,y)\r\n loss_grad = grad_softmax_crossentropy_with_logits(logits,y)\r\n \r\n for i in range(1, len(network)):\r\n loss_grad = network[len(network) - i].backward(layer_activations[len(network) - i - 1], loss_grad)\r\n #loss_grad = network[0].backward(X, loss_grad)\r\n return np.mean(loss)", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n\tif len(initial_w.shape)==2:\n\t\tinitial_w = initial_w.reshape((max(initial_w.shape)))\n\tif len(y.shape)==2:\n\t\ty = y.reshape((max(y.shape)))\n\n\tw = logistic_regression_gradient_descent(y, tx, initial_w, max_iters, gamma)\n\t\n\tloss = calculate_nll(y, tx, w)\n\n\treturn w, loss", "def compute_loss_logreg(y, tx, w):\n assert len(set(y).difference({0., 1.})) == 0, \"Class labels must be encoded as {0, 1}\"\n\n z = tx.dot(w)\n\n return np.sum(np.log(1 + np.exp(z)) - y * z)", "def reg_logistic_regression(y, tx, l, initial_w, max_iters, gamma):\r\n y_resize = (1+y)/2 #rescales target so that -1 values are changed to 0 \r\n w_list = [initial_w]\r\n loss_list = []\r\n w = initial_w\r\n\r\n for n_iter in range(max_iters):\r\n grad = calculate_gradient_LR(y_resize, tx, w) + 2*l*w\r\n w = w - gamma*grad\r\n loss = compute_loss_LG(y_resize, tx, w)+ l*np.linalg.norm(w)\r\n w_list.append(w)\r\n loss_list.append(loss)\r\n if (n_iter > 1) and (np.abs(loss_list[-1] - loss_list[-2]) <= 1e-8):\r\n break\r\n return w_list[-1],loss_list[-1]", "def compute_gradient_and_loss(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n \n if term > 0:\n local_loss = term\n \n loss += local_loss\n \n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n# dW += reg * deriv_abs(W) #dW[:,-1]\n# else:\n# dW += 2 * reg * W # l2 derivative formula \n dW[:-1,:] += reg * np.sign((W[:-1,:])) #dW[:,-1]\n else:\n dW[:-1,:] += 2 * reg * W[:-1,:] # l2 derivative formula \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def compute_cost(self,X, y):\r\n num_examples = np.shape(X)[0]\r\n z = np.dot(X,self.theta) + self.bias\r\n exp_z = np.exp(z)\r\n softmax_scores = exp_z / np.sum(exp_z, axis=1, keepdims=True)\r\n \r\n one_hot_y = np.zeros((num_examples,np.max(y)+1))\r\n logloss = np.zeros((num_examples,)) \r\n for i in range(np.shape(X)[0]):\r\n one_hot_y[i,y[i]] = 1\r\n logloss[i] = -np.sum(np.log(softmax_scores[i,:]) * one_hot_y[i,:])\r\n data_loss = np.sum(logloss)\r\n return 1./num_examples * data_loss", "def fit(self, x, y):\n # Note Logistic Regression Runtime\n start_time = time.time()\n\n # Converting Pandas DataFrame to Numpy arrays\n if not type(x).__module__ == np.__name__:\n x = x.to_numpy()\n if not type(y).__module__ == np.__name__:\n y = y.to_numpy()\n\n # Insert a column of 1 in the feature vector X for the bias term in the weights\n x = np.insert(x,0,1,axis=1)\n \n # Verify dimension of input\n if len(x) != len(y):\n print(\"The number of input features vector must be to be the same as the number of target variables\")\n else:\n losses = self.gradient_descent(x,y)\n\n # Note end time\n end_time = time.time()\n\n # Log runtime\n print(\"Logistic Regression training time: {0:.2f}s\".format(end_time - start_time))\n \n return losses", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n \n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n y = (y + 1) / 2 # [-1, 1] -> [0, 1]\n for n_iter in range(max_iters):\n # computes gradient and loss\n\n grad = compute_gradient_log(y, tx, w)\n loss = compute_loss_log(y, tx, w)\n\n #updates w\n\n w = w - gamma * grad\n # store w and loss\n\n ws.append(w)\n losses.append(loss)\n #print(\"logistic regression: Gradient Descent({bi}/{ti}): loss={l}\".format(\n # bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]), end=\"\\r\")\n \n return w, loss", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def log_reg(x_train, y_train):\n\n log_reg_classifier = LogisticRegression(max_iter=1000, solver='lbfgs')\n log_reg_classifier.fit(x_train, y_train)\n return log_reg_classifier\n\n # log_reg_classifier.fit(x_train, y_train)", "def loss(self, logits, labels):\r\n return tf.reduce_mean(tf.keras.losses.binary_crossentropy(labels,logits))", "def logistic_regression(y, tx, initial_w, max_iters, gamma, SGD=False, batch_size=-1) :\n w_start = initial_w\n w = w_start\n loss_old = 0.0\n\n for n_iter in range(max_iters):\n loss = compute_logistic_loss(y, tx, w)\n gradient = compute_logistic_gradient(y, tx, w)\n w = w - gamma * gradient\n\n if check_stop(loss, loss_old):\n #print('break!')\n break;\n loss_old = loss\n\n return w, loss", "def setup_loss(logits, labels):\n predictions = tf.nn.softmax(logits)\n cost = tf.losses.softmax_cross_entropy(onehot_labels=labels,\n logits=logits,\n )\n return predictions, cost", "def logistic_regression(X, Y):\n m, n = X.shape\n theta = np.zeros(n)\n learning_rate = 10\n\n i = 0\n while True:\n i += 1\n prev_theta = theta\n grad = calc_grad(X, Y, theta)\n theta = theta - learning_rate * grad\n if i % 10000 == 0:\n print('Finished %d iterations' % i)\n # plot decision boundary for the ith iteration listed in i_lst\n i_lst = [1, 2, 3, 10, 100, 200, 500, 1000, 10000, 30370, 40000, 50000]\n if i in i_lst:\n save_path = \"output/p01_b_a\" + str(i) + \".png\"\n plot(X, Y, theta, save_path)\n if np.linalg.norm(prev_theta - theta) < 1e-15:\n print('Converged in %d iterations' % i)\n break\n return", "def compute_gradient_and_loss2(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n local_loss = term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n loss += local_loss \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW[:,-1] += reg * deriv_abs(W[:,-1]) #dW[:,-1]\n else:\n dW[:,-1] += 2 * reg * W[:,-1] # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma, verbose=False): \n reg_loss, reg_grad = add_l2_reg(compute_logistic_loss, \n compute_logistic_gradient,\n lambda_)\n \n return gradient_descent(y, tx, initial_w, max_iters, gamma, reg_loss, reg_grad)", "def logistic_regression(y, tx, initial_w, max_iters, gamma, verbose=False):\n return gradient_descent(y, tx, initial_w, max_iters, gamma, \n compute_logistic_loss, compute_logistic_gradient, verbose=verbose)", "def train_logistic_regression(train_x, train_y):\n\n logistic_regression_model = LogisticRegression(penalty='l2', C=1.0)\n logistic_regression_model.fit(train_x, train_y)\n return logistic_regression_model", "def loss_func(self, logits, targets):\r\n return -np.sum(targets * np.log(logits)) / logits.shape[0]", "def compute_logistic_loss(y, tx, w):\n pred = tx.dot(w)\n y = np.asarray(y)\n logsig = -np.logaddexp(0, -pred) # == np.log(sigmoid(pred))\n\n return np.mean((1 - y) * pred - logsig)", "def add_loss_op(self, logits):\n one_hot_labels = tf.one_hot(\n indices=self.label_placeholder,\n depth=3,\n on_value=1,\n off_value=0,\n dtype=tf.int32\n )\n\n loss = tf.nn.softmax_cross_entropy_with_logits(\n labels=one_hot_labels, logits=logits)\n\n # regularization\n loss = loss + self.config.l2 * tf.reduce_sum([\n tf.nn.l2_loss(w) for w in tf.trainable_variables()])\n\n self.logger.debug(' loss op ready')\n return loss", "def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))", "def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss", "def cross_entropoy_loss_naive(W, X, y, reg):\n # pylint: disable=too-many-locals\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n \n\n C = W.shape[1]\n# print(\"no. of classes {}\".format(C))\n N,D = X.shape\n# print(\"no. of data {} and dimension {}\".format(N,D))\n for i in range(N):\n xi = X[i,:]\n# print(\"one record shape: {}\".format(xi.shape))\n scores = np.zeros(C)\n for c in range(C):\n w = W[:,c]\n# print(\"weight for one record {}\".format(w.shape))\n scores[c] = xi.dot(w)\n scores -= np.max(scores)\n actual_y = y[i]\n total_score = np.sum(np.exp(scores)) \n loss_i = -scores[actual_y] + np.log(total_score)\n# print('naive score : {}'.format(scores[actual_y]))\n loss += loss_i\n \n #gradient\n probability = np.exp(scores)/total_score\n for j in range(C):\n dW[:,j] += probability[j]*xi\n \n dW[:,actual_y] -= xi\n loss = loss/N\n reg_loss = 0.5*reg*np.sum(W*W)\n loss = loss + reg_loss\n print(\"loss : {}\".format(loss))\n dW = dW/N\n dW += reg*W\n \n \n \n \n\n ############################################################################\n # TODO: Compute the cross-entropy loss and its gradient using explicit #\n # loops. Store the loss in loss and the gradient in dW. If you are not #\n # careful here, it is easy to run into numeric instability. Don't forget #\n # the regularization! #\n ############################################################################\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, dW", "def svm_loss_naive(theta, X, y, reg):\n\n delta = 1.0\n dtheta = np.zeros(theta.shape) # initialize the gradient as zero\n\n # compute the loss function\n\n K = theta.shape[1]\n m = X.shape[0]\n J = 0.0\n for i in xrange(m):\n\tscores = X[i,:].dot(theta)\n\tcorrect_class_score = scores[y[i]]\n\tfor j in xrange(K):\n\t\tif j == y[i]:\n\t\t\tcontinue\n\t\tmargin = max(0,scores[j] - correct_class_score + delta)\n\t\tJ += margin\n\t\tif margin > 0 and j!=y[i]:\t\t\n\t\t\tdtheta[:,j] = dtheta[:,j]+X[i,:]\n\t\t\tdtheta[:,y[i]] = dtheta[:,y[i]]-X[i,:]\n\n\n # Right now the loss is a sum over all training examples, but we want it\n # To be an average instead so we divide by num_train.\n J /= m\n dtheta = dtheta/m\n # Add regularization to the loss.\n J += 0.5 * reg * np.sum(theta * theta)\n dtheta =dtheta + reg*theta\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dtheta. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n\n return J, dtheta", "def train_logistic_regression(x_train, y_train, learning_rate, fit_intercept=False, max_iter=500):\r\n if fit_intercept:\r\n intercept = np.ones(x_train.shape[0], 1)\r\n x_train = np.hstack((intercept, x_train)) # hstacks merges 2 arrays column wise\r\n weights = np.zeros(x_train.shape[1])\r\n for iteration in range(max_iter):\r\n weights = update_weights(x_train, y_train, weights, learning_rate)\r\n # printing cost for every 100 iterations\r\n if iteration % 100 == 0:\r\n print(calculate_cost(x_train, y_train, weights))\r\n return weights", "def reg_logistic_regression(y, tx, lambdas, initial_w, max_iters, gamma):\n w = initial_w\n for iter in range(max_iters):\n # compute gradient\n grad = reg_logistic_grad(y, tx, w, lambdas)\n # update w\n w = w - gamma * grad\n loss = reg_logistic_loss(y, tx, w, lambdas)\n return w, loss", "def main():\r\n x = [\r\n [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ], [ 0,0 ], [ 0,0 ],\r\n [ 0,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 0,0 ], [ 1,0 ],\r\n [ 1,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ]\r\n ]\r\n\r\n # Encoding of the correct classes for the training material\r\n y = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0]\r\n b = BinaryLogisticRegression(x, y)\r\n b.fit()\r\n b.print_result()", "def binlogreg_train(X, Y_):\n N = X.shape[0]\n\n w = np.random.randn(X.shape[1], 1) # D x 1\n b = np.random.randn(N, 1) # N x 1\n\n for i in range(PARAM_NITER+1):\n # klasifikacijski rezultati\n scores = np.dot(X, w) + b # N x 1\n\n # vjerojatnosti razreda c_1\n probs = sigmoid(scores, y=1) # N x 1\n\n # gubitak\n loss = -1 * float(np.dot(Y_.T, np.log(probs))) # scalar\n\n # dijagnostički ispis\n if i % 10 == 0:\n print(\"iteration {}: loss {}\".format(i, loss))\n\n # if i % 1000 == 0:\n # Y = np.around(probs, decimals=0)\n # decfun = binlogreg_decfun(w, b)\n # bbox = (np.min(X, axis=0), np.max(X, axis=0))\n # data.graph_surface(decfun, bbox, offset=0.5)\n # data.graph_data(X, Y_, Y)\n\n # derivacije gubitka po klasifikacijskom rezultatu\n dL_dscores = np.subtract(probs, Y_) # N x 1\n\n # gradijenti parametara\n grad_w = np.divide(np.dot(X.T, dL_dscores), N) # D x 1\n grad_b = np.divide(np.sum(dL_dscores), N) # 1 x 1\n\n # poboljšani parametri\n w += -PARAM_DELTA * grad_w\n b += -PARAM_DELTA * grad_b\n\n return w, b", "def loss(self, logits, labels):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n loss = tf.nn.softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits,\n name='softmax_cross_entropy_loss'\n )\n loss = tf.reduce_mean(loss, name='mean_softmax_cross_entropy_loss')\n\n tf.summary.scalar('mean cross entropy loss', loss)\n\n complexity_cost = self._complexity_cost()\n if complexity_cost is not None:\n loss = tf.add(loss, complexity_cost, name='total_loss')\n tf.summary.scalar('total loss', loss)\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return loss", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def reg_logistic_regression(y, tx, lambda_ , initial_w, max_iters, gamma):\n \n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n y = (y + 1) / 2 # [-1, 1] -> [0, 1]\n \n for n_iter in range(max_iters):\n # computes gradient and loss\n\n grad = compute_gradient_log(y, tx, w)+2*lambda_*np.linalg.norm(w)\n loss = compute_loss_log(y, tx, w)+ lambda_*(np.linalg.norm(w)**2)\n\n #updates w\n\n w = w - gamma * grad\n # store w and loss\n\n ws.append(w)\n losses.append(loss)\n #print(\"regularised logistic regression: Gradient Descent({bi}/{ti}): loss={l}\".format(\n # bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]), end=\"\\r\")\n return w, loss", "def forward(self, logits, labels, loss_type='softmax'):\n # self.epsilon = 0.1 #labelsmooth\n beta = self.beta\n gamma = self.gamma\n\n no_of_classes = logits.shape[1]\n samples_per_cls = torch.Tensor(\n [sum(labels == i) for i in range(logits.shape[1])])\n if torch.cuda.is_available():\n samples_per_cls = samples_per_cls.cuda()\n\n effective_num = 1.0 - torch.pow(beta, samples_per_cls)\n weights = (1.0 - beta) / ((effective_num) + 1e-8)\n\n weights = weights / torch.sum(weights) * no_of_classes\n labels = labels.reshape(-1, 1)\n\n weights = torch.tensor(weights.clone().detach()).float()\n\n if torch.cuda.is_available():\n weights = weights.cuda()\n labels_one_hot = torch.zeros(\n len(labels), no_of_classes).cuda().scatter_(1, labels, 1).cuda()\n\n labels_one_hot = (1 - self.epsilon) * labels_one_hot + \\\n self.epsilon / no_of_classes\n weights = weights.unsqueeze(0)\n weights = weights.repeat(labels_one_hot.shape[0], 1) * labels_one_hot\n weights = weights.sum(1)\n weights = weights.unsqueeze(1)\n weights = weights.repeat(1, no_of_classes)\n\n if loss_type == \"focal\":\n cb_loss = focal_loss(labels_one_hot, logits, weights, gamma)\n elif loss_type == \"sigmoid\":\n cb_loss = F.binary_cross_entropy_with_logits(\n input=logits, target=labels_one_hot, pos_weight=weights)\n elif loss_type == \"softmax\":\n pred = logits.softmax(dim=1)\n cb_loss = F.binary_cross_entropy(\n input=pred, target=labels_one_hot, weight=weights)\n return cb_loss", "def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss", "def __init__(self, train, validation=None, initial_weight=None,\n loss_function_name='logistic',\n calculate_weight='gradient',\n regularizer=None, regularizer_p=None):\n # Initialize the super class with given data.\n # Transform the y into {0,1}\n y, tx = train\n y[np.where(y < 0)] = 0\n train = (y, tx)\n if validation:\n val_y, val_tx = validation\n val_y[np.where(val_y < 0)] = 0\n validation = (val_y, val_tx)\n super(LogisticRegression, self).__init__(train, validation,\n initial_weight=initial_weight,\n loss_function_name=loss_function_name,\n cal_weight=calculate_weight,\n regularizer=regularizer,\n regularizer_p=regularizer_p)\n # Set predicted label\n self.pred_label = [-1, 1]", "def loss(self, logits, labels):\n loss = tf.nn.sigmoid_cross_entropy_with_logits(labels, logits)\n return tf.reduce_mean(loss)", "def loss(self, X, y=None, lambda_reg=0.0):\n \n # Unpack variables from the params dictionary\n N, D = X.shape\n\n # Compute the forward pass\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n scores, cache_list = self.network_forward(X)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n \n #############################################################################\n # TODO: Compute for the loss. This should include L2 regularization for #\n # the weights of each layer. #\n #############################################################################\n loss_softmax, dloss_softmax = self.softmax_cross_entropy_loss(scores, y)\n loss = loss_softmax\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n \n #############################################################################\n # TODO: Compute the derivatives of the weights and biases. Store the #\n # results in the grads dictionary. For example, grads['W1'] should store #\n # the gradient on the weights W of the first layer, and be a matrix of #\n # same size. #\n #############################################################################\n grads = self.network_backward(dloss_softmax, cache_list)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\r\n y_resize = (1+y)/2 #rescales target so that -1 values are changed to 0 \r\n w_list = [initial_w]\r\n loss_list = []\r\n w = initial_w\r\n \r\n for n_iter in range(max_iters):\r\n grad = calculate_gradient_LR(y_resize, tx, w)\r\n w = w - gamma * grad\r\n loss = compute_loss_LG(y_resize, tx, w)\r\n w_list.append(w)\r\n loss_list.append(loss)\r\n return w_list[-1],loss_list[-1]", "def costFunction(theta,X,y):\n m = X.shape[0]\n J = 0\n h = sigmoid (np.dot(X,theta))\n \n J = (1/m)* ((-np.dot(y.T,(np.log(h)))) - np.dot((1 - y).T,(np.log(1-h))))\n \n #grad = (1/m) * np.dot(X.T,(h-y))\n grad = (1/m) * np.dot((h.T - y), X).T\n \n return J, grad", "def logistic(weights, data, targets, hyperparameters):\n \n t = np.transpose(np.repeat(np.reshape(weights[:-1], (len(weights)-1, 1)), len(data), axis = 1))\n f_e = data * t\n z_sums = np.sum(f_e, axis=1)\n y = sigmoid(z_sums +weights[-1])\n f = np.sum(np.log(1 + np.exp(-z_sums - weights[-1])) + (1 - np.transpose(targets)) * (z_sums + weights[-1]))\n df = np.sum(data * np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0)\n df = np.append(df, np.sum(np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0))\n df = np.reshape(df, ((len(df), 1)))\n\n return f, df, np.reshape(y, (len(y), 1))", "def do_loss(logits, labels):\n return tf.reduce_sum(tf.square(logits - labels))", "def logistic(weights, data, targets, hyperparameters):\n # TODO: Finish this function\n n_data = len(data)\n dim_data = len(data[0])\n\n f = 0\n y = logistic_predict(weights, data)\n\n data = mod_data(data)\n\n # dl/dw_j = SUM(x_ij * (t_i - (1 - sigmoid(z))))\n df = np.dot(data.T, (1.0 * targets) - (1 - y))\n\n # to calculate f, we need to sum the negative log of all y iff target is 0 and (1-y) iff target is 1\n f = -1.0 * np.dot(targets.T, np.log(1 - y)) - 1.0 * np.dot(1 - targets.T, np.log(y))\n\n # calculate P(C=0|x_i) for all x_i \n return f[0,0], df, y", "def loss(self, X, Y, lmd):\n P, _ = self.forward(X)\n loss = np.mean(-np.log(np.einsum('ij,ji->i', Y.T, P)))\n\n reg = 0 # Regularization term\n for w in self.W:\n reg += np.sum(np.square(w))\n\n reg *= lmd\n\n cost = loss + reg\n\n return cost", "def _bce_loss_with_logits(output, labels, **kwargs):\n return F.binary_cross_entropy_with_logits(output, labels, reduction='none', **kwargs)", "def logistic_regression(y, tx, initial_w, max_iters, gamma, debug = False):\n losses, ws = gradient_descent(y, tx, initial_w, max_iters, gamma, loss_f = model_logistic.loss, grad_f = model_logistic.grad, debug = debug)\n return get_last_ans(ws, losses)", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n num_train = X.shape[0]\n # print(\"num_train:\", num_train)\n num_classes = W.shape[1]\n # print(\"num_classes:\", num_classes)\n \n for i in range(num_train):\n scores = X[i].dot(W) # scores is 1 * C\n correct_class = y[i]\n \n # LOSS DUE TO TRAINING SAMPLE = -log(exp^correct_score / sum(exp^all_other_scores))\n log_c = np.max(scores)\n scores -= log_c\n correct_class_score = scores[correct_class]\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(np.exp(scores))\n proportion = np.exp(correct_class_score) / sum_exp_scores\n loss -= np.log(proportion)\n # print(proportion)\n \n # ALTERNATIVELY: (we split the log)\n# loss -= scores[y[i]]\n# loss += np.log(np.sum(np.exp(X[i].dot(W))))\n \n # UPDATE GRADIENT\n for j in range(num_classes):\n p = np.exp(scores[j]) / sum_exp_scores # \"probability\" of class j\n dW[:,j] += (p - (j == y[i])) * X[i,:]\n # dW is D by C\n\n loss /= num_train\n loss += reg * np.sum(W * W) \n dW /= num_train\n dW += reg * 2 * W\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n return loss, dW", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma, SGD=False, batch_size=-1):\n w_start = initial_w\n w = w_start\n loss_old = 0.0\n\n if SGD:\n if(batch_size==-1): # compute automatically the maximum batch size\n batch_size = int(y.shape[0]/max_iters)\n for minibatch_y, minibatch_tx in batch_iter(y, tx, batch_size, max_iters):\n loss = compute_loss(minibatch_y, minibatch_tx, w)\n gradient = compute_gradient(minibatch_y, minibatch_tx, w)\n loss_reg, gradient_reg = regularizer(lambda_, w)\n loss = loss + loss_reg\n gradient = gradient + gradient_reg\n w = w - gamma * gradient\n\n if check_stop(loss, loss_old):\n #print('break!')\n break;\n loss_old = loss\n return w, loss\n\n else:\n for n_iter in range(max_iters):\n loss = compute_logistic_loss(y, tx, w)\n gradient = compute_logistic_gradient(y, tx, w)\n loss_reg, gradient_reg = regularizer(lambda_, w)\n loss_new = loss + loss_reg\n gradient = gradient + gradient_reg\n w = w - gamma * gradient\n\n if check_stop(loss, loss_old):\n #print('break!')\n break;\n loss_old = loss\n return w, loss", "def log_loss_objective(y_true: npt.NDArray, y_pred: npt.NDArray) -> Tuple[npt.NDArray, npt.NDArray]:\n y_pred = sigmoid(y_pred)\n grad = y_pred - y_true\n hess = y_pred * (1.0 - y_pred)\n return grad, hess", "def loss(self, X_batch, y_batch, learning_rate=1e-3, one_vs_all_index=-1, reg=True):\n #########################################################################\n # TODO: #\n # calculate the loss and the derivative #\n #########################################################################\n loss = 0\n for i in range(X_batch.shape[0]):\n if one_vs_all_index == -1:\n loss += -(y_batch[i] * (np.dot(self.w.T, X_batch[i]))) + np.log(\n 1 + np.exp(np.dot(self.w.T, X_batch[i])))\n else:\n if reg:\n reg = (learning_rate / 2 * X_batch.shape[0]) * np.sum(np.power(self.ws[one_vs_all_index], 2))\n loss += -(y_batch[i] * (np.dot(self.ws[one_vs_all_index].T, X_batch[i]))) + np.log(\n 1 + np.exp(np.dot(self.ws[one_vs_all_index].T, X_batch[i]))) + reg\n else:\n loss += -(y_batch[i] * (np.dot(self.ws[one_vs_all_index].T, X_batch[i]))) + np.log(\n 1 + np.exp(np.dot(self.ws[one_vs_all_index].T, X_batch[i])))\n gradients = np.zeros(X_batch.shape[1])\n if one_vs_all_index == -1:\n dot = np.dot(X_batch, self.w)\n else:\n dot = np.dot(X_batch, self.ws[one_vs_all_index])\n logists = sigmod(dot)\n diff = y_batch - logists\n for index in range(X_batch.shape[0]):\n if one_vs_all_index != -1:\n if reg:\n dot = np.dot(X_batch[index], diff[index])\n gradients[1:] += dot[1:] + (learning_rate / X_batch.shape[0]) * self.ws[one_vs_all_index][1:]\n gradients[0] += dot[0]\n else:\n gradients += np.dot(X_batch[index], diff[index])\n else:\n gradients += np.dot(X_batch[index], diff[index])\n\n return loss, gradients / X_batch.shape[0] # 取均值免得步长过大直接nan\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################", "def logistic_regression(y, tx, initial_w=None, max_iters=100, gamma=0.009, batch_size=1):\n # init parameters\n if np.all(initial_w == None): initial_w = np.zeros(tx.shape[1])\n threshold = 1e-8\n losses = []\n y = (1 + y) / 2\n # build tx\n w = initial_w\n\n # start the logistic regression\n for i in range(max_iters):\n # get loss and update w.\n for y_batch, tx_batch in batch_iter(y, tx, batch_size=batch_size, num_batches=1):\n w, _ = learning_by_gradient_descent(y_batch, tx_batch, w, gamma)\n # converge criterion\n losses.append(calculate_loss(y,tx,w))\n if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:\n break\n #if i % int(max_iters/5) == 0:\n #print(losses[-1],i,'/{tot}'.format(tot=max_iters))\n\n return w,losses[-1]", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n scores = None\n ############################################################################\n # Implementing the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n ############################################################################\n\n l_input = X.copy()\n out = []\n cache = []\n for i in range(self.num_layers - 1):\n # layerwise compute the forward pass and store outputs in out list\n key = ['W' + str(i+1), 'b' + str(i+1)]\n lout, lcache = affine_sigmoid_forward(l_input, self.params[key[0]], self.params[key[1]])\n out.append(lout)\n cache.append(lcache)\n l_input = lout\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n scores, lcache = affine_forward(out[self.num_layers - 2], self.params[key[0]], self.params[key[1]])\n cache.append(lcache)\n \n # regularization parameter compute by summing square of all weight vectors\n R = 0\n for i in range(1, self.num_layers + 1):\n key = 'W' + str(i)\n R += np.sum(np.power(self.params[key], 2))\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n\n ########################\n # Backward pass to compute the loss and gradients\n ########################\n\n loss, dscore = softmax_loss(scores, y)\n # Apply regularization of the loss \n loss = loss + 0.5 * self.reg * R\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n dx, grads[key[0]], grads[key[1]] = affine_backward(dscore, cache[self.num_layers - 1])\n grads[key[0]] += self.reg * self.params[key[0]] \n\n for i in range(self.num_layers - 1, 0, -1):\n key = ['W' + str(i), 'b' + str(i)]\n dx, grads[key[0]], grads[key[1]] = affine_sigmoid_backward(dx, cache[i-1])\n # Apply regularization to the gradients\n grads[key[0]] += self.reg * self.params[key[0]]\n\n return loss, grads", "def build_loss(self):\n import tensorflow as tf\n\n y_1d = [tf.reduce_sum(tf.multiply(self.variables[\"y\"][i], self.variables[\"y_action\"][i]), axis=1) for i in range(len(self.variables[\"y\"]))]\n loss = np.sum([tf.nn.l2_loss(y_1d[i] - self.variables[\"y_true\"]) for i in range(len(y_1d))])\n\n l1_reg = 0\n l2_reg = 0\n\n keys = sorted(self.variables.keys())\n keys = [key for key in keys if critere_keys(key) and \"W\" in key]\n for key in keys:\n l1_reg += tf.reduce_sum(tf.abs(self.variables[key]))\n l2_reg += tf.nn.l2_loss(self.variables[key])\n\n self.loss = loss + self.alpha_reg * l1_reg + self.beta_reg * l2_reg\n\n self.train_step = tf.train.RMSPropOptimizer(self.decay_learning_rate,\n decay=0.99, momentum=0., centered=True).minimize(self.loss, global_step=self.global_step)", "def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad", "def logistic_regression(X, y, fold_number=10, iteration=1000):\n \n # add additional dimension and set y=-1 if y==0\n X['x0'] = 1\n y[y==0] = -1\n \n # data preparation\n D = X.shape[1]\n fold = KFold(n_splits=fold_number)\n eta = 0.01 / 4600\n \n # record 10 output\n loss_function_list = []\n w_list = []\n \n for train_index, test_index in fold.split(X, y):\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n length = X_train.shape[0]\n w = np.zeros(D) # initialize w\n loss_function = []\n for ite in range(iteration+1): \n gradient = sum((1-expit(y_train.values[i]*X_train.values[i].dot(w)))*y_train.values[i]*X_train.values[i] for i in range(length))\n loss_function.append(sum(np.log(expit(y_train.values[i]*X_train.values[i].dot(w))) for i in range(length)))\n w += eta * gradient\n w_list.append(w)\n loss_function_list.append(loss_function)\n \n return w_list, loss_function_list", "def reg_logistic_regression(y, tx, lambda_, initial_w=None, max_iters=100, gamma=0.009, batch_size=1):\n # init parameters\n if np.all(initial_w == None): initial_w = np.zeros(tx.shape[1])\n threshold = 1e-8\n losses = []\n y = (1 + y) / 2\n # build tx\n w = initial_w\n\n # start the logistic regression\n for iter in range(max_iters):\n # get loss and update w.\n for y_batch, tx_batch in batch_iter(y, tx, batch_size=batch_size, num_batches=1):\n w, loss = learning_by_penalized_gradient_descent(y_batch, tx_batch, w, gamma, lambda_)\n # converge criterion\n loss = calculate_loss(y, tx, w) + lambda_ * np.squeeze(w.T.dot(w))\n losses.append(loss)\n if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:\n break\n #if iter % int(max_iters/5) == 0:\n #print(losses[-1],iter,'/{tot}'.format(tot=max_iters))\n\n return w,losses[-1]", "def linear_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the linear loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n \n #loss number\n for i, y_i in enumerate(y):\n _y = 0.0\n for j, w in enumerate(W):\n _y += (w*X[i][j])\n loss += 1/(2*len(y))*(y_i - _y)**2\n \n for w in W:\n loss = loss + reg * w * w\n #gradient\n for j in range(len(W)):\n dW[j] = reg * 2 * W[j]\n for i, yi in enumerate(y):\n dW[j] -= 1 / (len(y)) * X[i][j] * y[i]\n for k, wk in enumerate(W):\n dW[j] += 1 / len(y) * X[i][j] * X[i][k] * wk\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def l2_loss_vectorized(self, W, X, y, reg):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n num_train = X.shape[0]\n num_of_classes = W.shape[1]\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the perceptron loss, storing the #\n # result in loss and the gradient in dW #\n #############################################################################\n\n\n scores = X.dot(W) - y\n\n loss = np.mean(0.5 * (scores**2))\n\n grad = np.empty_like(W)\n grad = X.T.dot(scores)\n dW = grad\n dW /= num_train\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def costFunction(self,theta, X, y): \n m = len(y)\n h = self.sigmoid(X@theta)\n J = 1 / m * (- y.T @ self.log(h) - (1-y).T @ self.log(1-h)) \n # grad = 1/ m * X.T @ (h - y)\n return J", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n return least_squares_SGD(y, tx, initial_w, max_iters, gamma, loss_function=logistic_loss, gradient=logistic_grad)", "def svm_loss_naive(theta, X, y, reg):\n\n K = theta.shape[1] # number of classes\n m = X.shape[0] # number of examples\n\n J = 0.0\n dtheta = np.zeros(theta.shape) # initialize the gradient as zero\n delta = 1.0\n\n #############################################################################\n # TODO: #\n # Compute the loss function and store it in J. #\n # Do not forget the regularization term! #\n # code above to compute the gradient. #\n # 8-10 lines of code expected #\n #############################################################################\n\n for i in xrange(m):\n h = np.dot(X[i,:], theta)\n hy = h[y[i]]\n for j in xrange(K):\n if j == y[i]:\n continue\n l = h[j] - hy + delta\n if l > 0:\n J += l\n dtheta[:, j] += X[i, :]\n dtheta[:, y[i]] -= X[i, :]\n\n J /= m\n dtheta /= m\n J += 0.5 * reg * np.sum(theta * theta)\n dtheta += reg * theta\n\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dtheta. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n return J, dtheta", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n for i in xrange(num_train):\n scores = X[i].dot(W)\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[:,j] += X[i,:].T\n dW[:,y[i]] -= X[i,:].T\n \n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW/= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg*W\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n\n return loss, dW", "def compute_loss(self, x, label):\n # Forward propagation\n y_hat = self.forward_pass(x)\n return -np.log(y_hat[label])", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[0]\n num_train = X.shape[1]\n loss = 0.0\n for i in xrange(num_train):\n scores = W.dot(X[:, i])\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[j] += X[:, i]\n dW[y[i]] -= X[:, i]\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n\n return loss, (dW / num_train)", "def learning_by_penalized_gradient(y, tx, w, gamma, lambda_):\n\n #on test avec Newton\n\n loss,gradient,_ = penalized_logistic_regression(y,tx,w,lambda_)\n\n w = w - gamma*gradient\n return loss, w,gradient", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n c = np.array([0, 1])\n pred_class = []\n for i in range(num_train):\n scores = X[i].dot(W)\n pred_class.append(c[np.argmax(scores)])\n #print('scores size:',scores.shape)\n correct_class_score = scores[y[i]]\n for j in range(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n\n # Add regularization to the loss.\n loss += reg * np.sum(W * W)\n print(pred_class)\n\n return loss, dW, pred_class", "def logistic_pen(weights, data, targets, hyperparameters):\n\n wr = hyperparameters['weight_regularization']\n \n t = np.transpose(np.repeat(np.reshape(weights[:-1], (len(weights)-1, 1)), len(data), axis = 1))\n f_e = data * t\n z_sums = np.sum(f_e, axis=1)\n y = sigmoid(z_sums +weights[-1])\n f = np.sum(np.log(1 + np.exp(-z_sums - weights[-1])) + (1 - np.transpose(targets)) * (z_sums + weights[-1]))\n df = np.sum(data * np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0)\n df = np.append(df, np.sum(np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0))\n\n f += np.dot(weights[:-1].transpose()[0], weights[:-1].transpose()[0]) * wr / 2\n df = np.reshape(df, ((len(df), 1)))\n df += np.reshape(np.append(weights[:-1] * wr, 0), (len(weights), 1))\n\n f += (weights[-1, 0] ** 2) * wr / 2\n df[-1] += weights[-1,0] * wr \n\n return f, df, np.reshape(y, (len(y), 1))", "def cost_function(self, X, y, theta_list, bias):\n total_samples = len(y)\n loss = 0\n\n for i in range(total_samples):\n hypothesis = bias\n hypothesis += np.matmul(X[i], np.array(theta_list).T)\n \n de = 1.0 + np.exp(-hypothesis)\n sigmoidhypothesis = 1.0/de\n\n loss += (y[i]*np.log(sigmoidhypothesis)) + ((1-y[i])*(np.log(1 - sigmoidhypothesis)))\n\n return -1 * (loss/total_samples) #loss calculation", "def compute_loss_and_gradients(self, X, y):\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n # TODO Set parameter gradient to zeros\n # Hint: using self.params() might be useful!\n self.fulllayer1.W.grad = np.zeros_like(self.fulllayer1.W.grad)\n self.fulllayer1.B.grad = np.zeros_like(self.fulllayer1.B.grad)\n self.fulllayer2.W.grad = np.zeros_like(self.fulllayer2.W.grad)\n self.fulllayer2.B.grad = np.zeros_like(self.fulllayer2.B.grad)\n\n\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model\n res = self.fulllayer1.forward(X)\n res2 = self.reglayer1.forward(res)\n res3 = self.fulllayer2.forward(res2)\n\n loss, grad = softmax_with_cross_entropy(res3, y)\n\n back3 = self.fulllayer2.backward(grad)\n back2 = self.reglayer1.backward(back3)\n back = self.fulllayer1.backward(back2)\n \n # After that, implement l2 regularization on all params\n # Hint: self.params() is useful again!\n\n for params in self.params().keys():\n # print(params)\n # print(self.params()[params].value)\n loc_loss, loc_grad = l2_regularization(self.params()[params].value, self.reg)\n loss += loc_loss\n self.params()[params].grad += loc_grad\n\n return loss", "def run_logistic(X_train, X_test, y_train, y_test, C=1, penalty = 'l2', solver = 'lbfgs'):\n \n logreg = LogisticRegression(fit_intercept=True, C=C, penalty = penalty, solver = solver)\n logreg.fit(X_train, y_train)\n get_scores(logreg, X_train, X_test, y_train, y_test)", "def gradientDescent(self,X, y, theta): \n # number of instances\n m = len(y)\n J_history = np.zeros((self.NUM_ITERS,1))\n for i in range(self.NUM_ITERS):\n h = self.sigmoid(X@theta)\n grad = 1 / m * X.T @ (h - y)\n theta = theta - self.ALPHA * grad \n J_history[i] = self.costFunction(theta, X, y)\n \n \n return theta, J_history", "def computeGradient(self, X, y, w):\n n = len(X)\n if self.loss == 'linear':\n gradient = -2 * np.dot(X.T, (y - X.dot(w)))\n elif self.loss == 'logistic':\n g = self.logistic(X, w)\n gradient = -2 * np.dot(X.T, (y - g) * g * (1 - g))\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = ((np.dot(X, w) >= 0).astype(int) != y)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = -np.dot(usedX.T, usedY)\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = (np.dot(X, w) * newY < 1)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = 2 * w - self.C * np.dot(usedX.T, usedY)\n gradient[0] = gradient[0] + 2 * w[0]\n\n return gradient", "def svm_loss_naive(W, X, y, reg):\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n for i in range(num_train):\n scores = X[i].dot(W)\n correct_class_score = scores[y[i]]\n for j in range(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[:, y[i]] -= X[i].transpose()\n dW[:, j] += X[i].transpose() # chain rule\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg * W\n\n #############################################################################\n # TODO: #\n # Compute the gradient of the loss function and store it dW. #\n # Rather that first computing the loss and then computing the derivative, #\n # it may be simpler to compute the derivative at the same time that the #\n # loss is being computed. As a result you may need to modify some of the #\n # code above to compute the gradient. #\n #############################################################################\n\n return loss, dW", "def loss(logits, labels):\n labels = tf.to_int64(labels)\n# labels = tf.to_float(labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='xentropy')\n# y_conv = tf.nn.softmax(logits)\n# cross_entropy = -tf.reduce_sum(labels*tf.log(y_conv))\n return tf.reduce_mean(cross_entropy, name='xentropy_mean')", "def loss_supervised(logits, labels):\n # Convert from sparse integer labels in the range [0, NUM_CLASSSES)\n # to 1-hot dense float vectors (that is we will have batch_size vectors,\n # each with NUM_CLASSES values, all of which are 0.0 except there will\n # be a 1.0 in the entry corresponding to the label).\n batch_size = tf.size(labels)\n labels = tf.expand_dims(labels, 1)\n indices = tf.expand_dims(tf.range(0, batch_size), 1)\n print(indices)\n concated = tf.concat(axis=1, values=[indices, labels])\n onehot_labels = tf.sparse_to_dense(\n concated, tf.stack([batch_size, NUM_CLASSES]), 1.0, 0.0)\n print(batch_size)\n print(NUM_CLASSES)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,\n labels=onehot_labels,\n name='xentropy')\n loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')\n return loss", "def multiclass_log_loss(y_true, y_pred):\n eps=1e-15\n predictions = np.clip(y_pred, eps, 1 - eps)\n\n # normalize row sums to 1\n predictions /= predictions.sum(axis=1)[:, np.newaxis]\n\n actual = np.zeros(y_pred.shape)\n n_samples = actual.shape[0]\n actual[np.arange(n_samples), y_true.astype(int)] = 1\n vectsum = np.sum(actual * np.log(predictions))\n loss = -1.0 / n_samples * vectsum\n return loss", "def grad_softmax_cross_entropy_loss(logit, labels):\n return softmax(logit) - labels", "def logistic_pen(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def train(self, X, y):\n h1_input, h1_output, h2_input, h2_output, final_output = self.forwardpass_train(\n X\n )\n # calculate average loss per one data\n train_loss = self.cross_entropy_loss(y, final_output)\n dW1, db1, dW2, db2, dW3, db3 = self.backpropagation(\n X, y, h1_input, h1_output, h2_input, h2_output, final_output\n )\n self.update_weights(dW1, db1, dW2, db2, dW3, db3)\n return train_loss" ]
[ "0.70904523", "0.7072484", "0.6969767", "0.6957916", "0.6951513", "0.6896685", "0.68876517", "0.683848", "0.6820088", "0.6813277", "0.68107325", "0.6786551", "0.6774134", "0.6744257", "0.6732447", "0.673168", "0.6722383", "0.67222416", "0.6706524", "0.66843295", "0.66585207", "0.6642568", "0.66419953", "0.6640991", "0.6637391", "0.661756", "0.6609336", "0.66052675", "0.65835667", "0.6577445", "0.65757465", "0.65740454", "0.6571075", "0.65477043", "0.6537141", "0.6535521", "0.6520551", "0.6507616", "0.6493694", "0.6482632", "0.64780146", "0.646388", "0.6462339", "0.6460735", "0.6458589", "0.64369434", "0.6435884", "0.6435875", "0.64297026", "0.64242256", "0.64218694", "0.64200544", "0.6417215", "0.6414982", "0.64112663", "0.6404764", "0.6402876", "0.63979983", "0.6394697", "0.6393266", "0.6391512", "0.6391128", "0.6384885", "0.6383775", "0.63782376", "0.6369904", "0.63570154", "0.6351363", "0.63468456", "0.63462806", "0.6337424", "0.6328112", "0.63203335", "0.6318724", "0.63073695", "0.63067955", "0.63052475", "0.6300551", "0.63000095", "0.6290579", "0.6285846", "0.6284428", "0.6266821", "0.62660855", "0.626049", "0.62512416", "0.62501335", "0.6246813", "0.6245686", "0.6243362", "0.62431073", "0.62416905", "0.6236703", "0.62347656", "0.6231162", "0.6230135", "0.622951", "0.62286055", "0.62282383", "0.6222841" ]
0.6678023
20
Computes the loss and gradient for softmax classification.
def softmax_loss(x, y): N, C = x.shape loss, dx = 0, np.zeros(x.shape) for i in range(N): loss += -np.log(np.exp(x[i,y[i]])/np.sum(np.exp(x[i,:]))) dx[i,:] = np.exp(x[i,:])/np.sum(np.exp(x[i,:])) dx[i,y[i]] += (-1) loss /= N dx /= N return loss, dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def softmax_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement the loss and gradient for softmax classification. This #\n # will be similar to the softmax loss vectorized implementation in #\n # cs231n/classifiers/softmax.py. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n num_train = x.shape[0]\n\n x = np.exp(x)\n temp_sum = np.sum(x, axis = 1, keepdims = True)\n x = x / temp_sum\n softmax_result = x\n trans_y = np.zeros((x.shape[0],x.shape[1]))\n trans_y[np.arange(x.shape[0]), y] += 1\n x = - np.log(x)\n x = x * trans_y\n x_sum = np.sum(x)\n loss = x_sum / num_train\n loss = loss + \n\n dx = softmax_result - trans_y\n dx = dx / num_train\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return loss, dx", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n # print \"dW's shape\", dW.shape\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax.ipynb loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # For every training image\n for train_image in xrange(num_train):\n # Multiply the weights by the image to get the scores\n scores = X[train_image].dot(W)\n # print(scores)\n # And then get the correct score\n correct_label = y[train_image]\n correct_score = scores[correct_label]\n # TODO: Right up to here\n # And then get the score of every other classifier\n all_scores = np.sum(scores)\n # Add a normalizing factor for numeric stability\n normalizing_constant = np.max(scores)\n scores -= normalizing_constant\n correct_score -= normalizing_constant\n #Calculating the softmax values\n softmax = np.exp(correct_score)/np.sum(np.exp(scores))\n\n # print(\"Correct score softmax\",softmax)\n\n # And calculating the loss\n loss += -1*np.log(softmax)\n # print loss\n #TODO: Loss computation is also correct\n\n # And calculating the gradient\n\n # First, update the Weight matrix with the correct example's derivative\n dW[:,correct_label] += (softmax-1)*np.transpose(X[train_image])\n\n # Then do the same for the wrong cases\n incorrect_labels = [x for x in xrange(num_classes) if x != correct_label]\n # Now, update the weights\n for label_index in incorrect_labels:\n #Calculating the softmax for a wrong label\n incorrect_label_softmax = np.exp(scores[label_index])/(np.sum(np.exp(scores)))\n # Calculating the derivative\n necessary_weight = incorrect_label_softmax*np.transpose(X[train_image])\n # Updating the weights\n dW[:,label_index] += necessary_weight\n\n\n # Divide the loss\n loss /= num_train\n dW /= num_train\n\n # Now, do regularization\n loss += 0.5*reg*np.sum(W*W)# Penalize big weights\n dW += reg*W\n\n\n\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_classifier(W, input, label, lamda):\n\n ############################################################################\n # TODO: Put your code here\n\n loss = 0.0\n num_train = input.shape[0]\n num_classes = W.shape[1]\n\n score = np.dot(input, W) # (N,C)\n prediction = np.argmax(score, axis=1)\n score -= np.max(score, axis=1, keepdims=True)\n\n # # cross entropy loss\n # # take exponent of the score and normalized with sum of all exponents.\n probs = np.exp(score) # (N,C)\n e_y = np.sum(np.multiply(probs,label), axis=1) # (N,) probability for correct class\n e_sum = np.sum(probs, axis=1) # (N,) sum of probability over all classes\n\n # implementation of loss equivalent l_i = -f_y_i + log sum_j(e^(f_j))\n # loss = np.sum(-np.log(e_y/e_sum)) # sum of -log across all samples.\n # loss /= num_train # average loss\n loss = np.sum(-1 * e_y) + np.sum(np.log(e_sum))\n loss /= num_train\n\n loss += lamda * np.sum(W * W) # regularization \n\n # Gradient\n delta_score = probs / e_sum.reshape(num_train,1) # (N,C)\n delta_score -= label # (NxC)\n gradient = np.dot(input.T, delta_score)\n gradient /= num_train\n gradient += lamda * 2 * W\n\n ############################################################################\n\n return loss, gradient, prediction", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n num_train = X.shape[0]\n # print(\"num_train:\", num_train)\n num_classes = W.shape[1]\n # print(\"num_classes:\", num_classes)\n \n for i in range(num_train):\n scores = X[i].dot(W) # scores is 1 * C\n correct_class = y[i]\n \n # LOSS DUE TO TRAINING SAMPLE = -log(exp^correct_score / sum(exp^all_other_scores))\n log_c = np.max(scores)\n scores -= log_c\n correct_class_score = scores[correct_class]\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(np.exp(scores))\n proportion = np.exp(correct_class_score) / sum_exp_scores\n loss -= np.log(proportion)\n # print(proportion)\n \n # ALTERNATIVELY: (we split the log)\n# loss -= scores[y[i]]\n# loss += np.log(np.sum(np.exp(X[i].dot(W))))\n \n # UPDATE GRADIENT\n for j in range(num_classes):\n p = np.exp(scores[j]) / sum_exp_scores # \"probability\" of class j\n dW[:,j] += (p - (j == y[i])) * X[i,:]\n # dW is D by C\n\n loss /= num_train\n loss += reg * np.sum(W * W) \n dW /= num_train\n dW += reg * 2 * W\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train=X.shape[0]\n num_class=W.shape[1]\n num_feature=X.shape[1]\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(num_train):\n #W*Xi C*1\n x=np.exp(np.dot(W.T,X[i,:]))\n denominator=np.sum(x)\n numerator=x[y[i]]\n loss-=np.log(numerator/denominator)\n #numerator and denominator\n #for j in range(num_class):\n normalize_score=x/denominator\n nm=np.reshape(normalize_score, (num_class, 1))\n \n #CxD\n dscore=nm.dot(np.reshape(X[i,:],(1,num_feature)))\n #print(dscore.shape)\n\n dscore[y[i],:]-=X[i,:]\n dW+=dscore.T\n\n loss/=num_train\n dW = dW/num_train + reg*W\n #\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[0]\n num_class = W.shape[1]\n #scores = np.zeros(num_train,num_class)\n scores = X.dot(W)\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(num_train):\n # compute Li\n fmax= np.max(scores[i])\n scores[i] -= fmax\n correct_class_score = scores[i,y[i]]\n M = np.exp(correct_class_score)/np.sum(np.exp(scores[i]))\n loss += -np.log(M)\n for j in range(num_class):\n N = np.exp(scores[i,j])/np.sum(np.exp(scores[i]))\n if j ==y[i]:\n dW[:,y[i]]+= (M-1)*X[i].T\n else:\n dW[:,j] += N*X[i].T \n loss /= num_train\n loss += reg*np.sum(W*W)\n dW /= num_train \n dW += 2*reg*W \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0]\n num_classe = W.shape[1]\n loss = 0.0\n\n for i in range(num_train): #pour chaque image de l'ensemble d'entrainement\n scores = X[i].dot(W)\n scores -= max(scores)\n\n correct_class_score = scores[y[i]] #y[i]=c\n e_syi = np.exp(correct_class_score)\n e_sj = np.sum(np.exp(scores))\n\n loss -= np.log(e_syi/e_sj)\n\n for k in range(num_classe): #pour chaque classe\n dW[:, k] += ((np.exp(scores[k])/e_sj) - (k == y[i])) * X[i].T\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW/= num_train\n\n # Add regularization to the loss.\n loss += reg * np.sum(W * W)\n dW += 2 * reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n scores = outputVectors.dot(predicted.T) # shape = (V, 1)\n y_hat = softmax(scores)\n cost = -scores[target] + np.log(np.sum(np.exp(scores)))\n one_hot_target = np.zeros_like(y_hat)\n one_hot_target[target] = 1\n grad = np.outer((y_hat - one_hot_target), predicted)\n gradPred = outputVectors.T.dot(y_hat - one_hot_target)\n \n '''\n final_predicted = predicted.dot(outputVectors.T)\n probability = softmax(final_predicted)\n cost = -np.log(probability[target])\n \n one_hot_target = np.zeros_like(probability)\n one_hot_target[target] += 1\n dlogits = probability - one_hot_target\n grad = np.outer(predicted, dlogits).T\n gradPred = outputVectors.T.dot(dlogits)\n '''\n ### END YOUR CODE\n\n return cost, gradPred, grad", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n y_hat = softmax(np.dot(outputVectors,predicted))\n y = np.zeros(outputVectors.shape[0])\n y[target] = 1.0\n\n cost = -np.log(y_hat[target])\n gradPred = np.dot(outputVectors.T,y_hat - y)\n grad = np.outer(y_hat - y,predicted)\n ### END YOUR CODE\n\n return cost, gradPred, grad", "def softmax_loss_vectorized(W, X, y, reg):\n\n #############################################################################\n # TODO: Compute the softmax.ipynb loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n train_images = X.shape[0]\n # Store all the scores in a matrix\n all_scores = np.dot(X,W)\n #First, calculate the normalizing constant for numeric stability\n constant = np.max(all_scores,axis=1)\n normalized_scores = np.transpose(np.subtract(np.transpose(all_scores),constant))\n\n #Then, calculate softmax for the correct scores\n exp_scores = np.exp(all_scores)\n # First, keep track of the sum of values per row\n exp_sum = np.sum(exp_scores,axis=1)\n\n # Finally, calculate the softmax score for every entry\n softmax_scores = np.transpose(exp_scores)/exp_sum # useful when computing gradient\n softmax_scores = np.transpose(softmax_scores)\n # And then, compute the loss\n loss_score = softmax_scores[range(train_images),y]\n loss_score = -1 * np.log(loss_score) #taking the logarithm\n loss += np.sum(loss_score)\n\n #Normalize and regularize the loss\n loss /= train_images\n loss += 0.5*reg*np.sum(W*W)\n\n #Finally, calculate a vectorized gradient\n\n # Calculate the derivative at the correct label\n softmax_scores[range(train_images),y] -= 1\n # Then, make a matrix containing all the gradient values\n gradient_values = np.dot(np.transpose(X),softmax_scores)\n gradient_values = gradient_values\n\n #FINALLY, update the gradient\n dW+= gradient_values\n #And normalize and regularize it\n dW /= train_images\n dW += reg*W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\r\n # Initialize the loss and gradient to zero.\r\n loss = 0.0\r\n dW = np.zeros_like(W)\r\n num_train = X.shape[1] # d*n\r\n num_class = W.shape[0]\r\n\r\n #############################################################################\r\n # Compute the softmax loss and its gradient using explicit loops. #\r\n # Store the loss in loss and the gradient in dW. If you are not careful #\r\n # here, it is easy to run into numeric instability. Don't forget the #\r\n # regularization! #\r\n #############################################################################\r\n loss = 0.0\r\n for i in range(num_train):\r\n X_i = X[:,i] # D*1\r\n score_i = W.dot(X_i)\r\n score_i -= np.max(score_i) #C*1 but keepdims = false so it becomes 1*C\r\n exp_score_i = np.exp(score_i)\r\n probs_i = exp_score_i/np.sum(exp_score_i) #1*C\r\n correct_logprobs_i = -np.log(probs_i[y[i]])\r\n loss += correct_logprobs_i\r\n \r\n dscore_i = probs_i.reshape(num_class,-1)#c*1\r\n dscore_i[y[i]] -= 1 #C*1\r\n X_i = X_i.reshape(1,-1)# 1*D\r\n dW += dscore_i.dot(X_i)\r\n \r\n loss /= num_train\r\n loss += 0.5*reg*np.sum(W*W)\r\n\r\n dW /= num_train\r\n dW += reg*W\r\n \r\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n #print('num_classes = ', num_classes)\n num_train = X.shape[0]\n #print('num_train = ', num_train)\n \n min_score = 0.0\n shifted_scores = np.zeros(W.shape[1])\n #max_score = np.zeros(W.shape[1])\n max_score = 0.0\n \n loss_array = np.zeros(y.shape[0])\n for i in range(num_train):\n scores = X[i].dot(W)\n #print('scores dimensions = ', scores.shape)\n #print('scores = ', scores)\n #print('i =', i, 'y = ', y[i])\n min_score = np.min(scores)\n max_score = np.max(scores)\n #print(min_score,max_score)\n shifted_scores = np.multiply(-1,scores + abs(min_score))\n #print(scores)\n #print(shifted_scores)\n exp_scores = np.exp(shifted_scores)\n norm = np.amax(exp_scores)\n norm_scores = np.divide(exp_scores,norm)\n loss_array[i] = np.multiply(-1,np.log(norm_scores[y[i]]/(np.sum(norm_scores)-norm_scores[y[i]])))\n #print(loss_array)\n for j in range(num_classes): \n\t\n if j == y[i]: \n dW[:,j] = np.multiply(norm_scores[y[i]],1-norm_scores[y[i]])\n else:\n dW[:,j] = np.multiply(-1,np.multiply(norm_scores[y[i]],norm_scores[y[j]]))\n\t\t\t\n\t\t\t\n loss = np.amax(loss_array)\n\n # Add regularization to the loss.\n loss = 0.5 * reg * np.sum(W * W) + loss\n \n \n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[1]\n num_classes = W.shape[0]\n #############################################################################\n # Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(num_train): # for each image\n # compute the score\n scores = W.dot(X[:, i])\n\n # shift the values of f so that the highest number is 0:\n scores -= np.max(scores)\n\n # compute the loss\n loss += -np.log(np.exp(scores[y[i]]) / np.sum(np.exp(scores)))\n\n # gradient(https://github.com/seyedamo/cs231n/blob/master/assignment1/cs231n/classifiers/softmax.py)\n scores = np.exp(scores)\n scores /= np.sum(scores)\n for j in range(num_classes): # for each class\n dW[j, :] += scores[j] * X[:, i].T\n\n # dW wrt correct class scores w_yi\n dW[y[i], :] += -X[:, i].T\n\n # Average the loss \n loss /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n\n # average of the gradient\n dW /= num_train\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n (num_class, D), (D, num_train) = (W.shape, X.shape)\n class_scores = np.dot(W, X)\n \n # Subtract maximum unnormalized score from each set of class scores\n for i in range(num_train):\n max_class_score = np.max(class_scores[:, i])\n for j in range(num_class):\n class_scores[j, i] -= max_class_score\n \n # Compute softmax and update gradient\n for i in range(num_train):\n normalization_term = sum(np.exp(class_score) for class_score in class_scores[:, i])\n for j in range(num_class):\n class_scores[j, i] = np.exp(class_scores[j, i]) / normalization_term\n # Thanks again to MyHumbleSelf for making me examine this further and discover a bug in my derivation of the softmax gradient!\n dW[j] += (class_scores[j, i] - (j==y[i])) * X[:, i]\n \n # Compute cross-entropy errors and total loss from that\n losses = [np.log(class_scores[y[i], i]) for i in range(num_train)]\n loss = -sum(losses) / num_train\n\n # Add regularization to loss and normalize dW\n loss += 0.5 * reg * np.sum(W * W)\n dW /= num_train\n dW += reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n # softmax P(Y=k|X=x_i) = e^{s_k}/∑e^{s_j} softmax loss = -log(softmax)\n num_train = X.shape[0]\n num_class = W.shape[1]\n for i in range(num_train):\n scores = X[i].dot(W) # get scores\n max_score = np.max(scores)\n scores -= max_score # 考虑数值计算稳定性 softmax = (e^s_c - max)/∑(e^s_j - max)\n correct_score = scores[y[i]] # score_correct\n P_ic = np.exp(correct_score)/np.sum(np.exp(scores))\n loss += -np.log(P_ic)\n for j in range(num_class):\n if j == y[i]:\n dW[:, j] += (P_ic - 1) * X[i].T\n else:\n P_ij = np.exp(scores[j])/np.sum(np.exp(scores))\n dW[:, j] += P_ij * X[i].T\n \n \n loss /= num_train\n loss += reg*np.sum(W*W)\n dW /= num_train\n dW += 2 * reg * W\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n num_train = X.shape[0]\n # print(\"num_train:\", num_train)\n num_classes = W.shape[1]\n # print(\"num_classes:\", num_classes)\n \n scores = X.dot(W) # scores is N*D x D*C -> N*C \n log_c = np.max(scores, axis=1).T\n scores -= log_c[:,None]\n correct_class_score = scores[np.arange(num_train),y]\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(np.exp(scores), axis=1)\n proportion = np.exp(correct_class_score) / sum_exp_scores\n loss -= np.sum(np.log(proportion))\n \n # calculating dW = (p - (c = correct c ? 1 : 0)) * x\n correct_class_one_hot = np.zeros_like(scores)\n correct_class_one_hot[np.arange(num_train),y] += 1\n p = np.exp(scores) / sum_exp_scores[:,None] - correct_class_one_hot # N*C / N:1 -> N*C\n dW += X.T.dot(p) # D*N x N*C -> D*C\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W) \n dW /= num_train\n dW += reg * W\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n num_train = X.shape[0]\n\n for i in xrange(num_train):\n scores = X[i].dot(W)\n\n # Normalization trick to resolve numerical instability\n # when dealing with the large exponential terms.\n scores -= np.max(scores)\n\n # Cache some terms that are used repeatedly.\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(exp_scores)\n correct_class_score = scores[y[i]]\n \n # Update the loss \n loss -= correct_class_score\n loss += np.log(sum_exp_scores)\n\n # Update the gradient\n dW[:,y[i]] -= X[i,:].T\n for j in xrange(num_classes):\n dW[:,j] += ((X[i,:].T * exp_scores[j]) / sum_exp_scores)\n\n \n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n\n dW /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n\n dW += reg*W\n \n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss(x, y):\n # softmax\n num = np.exp(x)\n den = np.sum(num, axis=1)\n softmax = num/den[:, None]\n N = x.shape[0]\n\n # compute the los per class\n loss = softmax[np.arange(N), y]\n loss = -np.log(loss)\n\n # sum all the losses and divide by number of class\n # Also add the regularization loss term\n loss = np.sum(loss)/N \n \n dscores = softmax\n dscores[np.arange(N), y] -= 1\n dscores /= N\n\n return loss, dscores", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n num_train = X.shape[0]\n num_class = W.shape[1]\n l = np.zeros([num_train,1])\n for i in range(num_train):\n scores = np.dot(X[i], W)\n f_yi = scores[y[i]]\n exp_num = np.exp(f_yi)\n exp = np.exp(scores)\n exp_deno = np.sum(exp)\n for j in range(num_class):\n if (j == y[i]):\n dW[:,j] -= X[i,:].transpose()\n dW[:,j] += (np.exp(scores[j]) / exp_deno) * X[i,:].transpose()\n l[i] = -np.log(exp_num/exp_deno)\n\n loss = np.sum(l)/num_train\n loss += reg * np.sum(W*W)\n dW /= num_train \n dW += 2 * reg * W\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[0]\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n scores = X.dot(W)\n scores_exp = np.exp(scores-np.max(scores, axis=1, keepdims=True))\n\n sum = np.sum(scores_exp, axis=1, keepdims=True)\n probability = scores_exp/sum\n #list containing the correct classification\n indices = [range(num_train), y]\n correct_class_score = probability[indices]\n\n #calculate -log(prob_y) and take the sum across all training examples\n loss = np.sum(-np.log(correct_class_score))\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n\n #Compute Gradient\n probability[indices] -=1\n dW = X.T.dot(probability)\n dW /= num_train\n dW += .5 * reg * W\n\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n num_train = X.shape[0]\n num_class = W.shape[1]\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n scores = X.dot(W)\n temp_matrix = np.zeros(scores.shape)\n \n max_each_row = np.max(scores,axis=1).reshape(-1,1)\n scores -= max_each_row\n summation = np.sum(np.exp(scores),axis=1).reshape(-1,1)\n scores = np.exp(scores)\n scores = np.divide(scores,summation)\n temp_matrix[range(num_train),list(y)] =-1\n scores += temp_matrix\n dW = X.T.dot(scores) / num_train + 2*reg*W \n log_summation = np.log(summation)\n vector = scores[range(num_train),list(y)].reshape(-1,1) \n L = -vector+ log_summation \n loss = np.sum(L)/num_train + reg*np.sum(W*W)\n \n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def compute_gradient_and_loss(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n \n if term > 0:\n local_loss = term\n \n loss += local_loss\n \n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n# dW += reg * deriv_abs(W) #dW[:,-1]\n# else:\n# dW += 2 * reg * W # l2 derivative formula \n dW[:-1,:] += reg * np.sign((W[:-1,:])) #dW[:,-1]\n else:\n dW[:-1,:] += 2 * reg * W[:-1,:] # l2 derivative formula \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n pass\n num_tran = X.shape[0]\n num_classes = W.shape[1]\n loss_par =np.zeros(num_tran)\n\n Score = np.dot(X,W)\n expS = np.exp(Score)\n # for i in num_tran:\n sumS = np.sum(expS,axis=1)\n sumS = sumS.reshape(sumS.shape[0],1)\n normalize = np.divide(expS,sumS)\n softmax = -np.log(normalize)\n\n for i in np.arange(num_tran):\n loss_par[i]=softmax[i, y[i]]\n for j in np.arange(num_classes) :\n if j!=y[i]:\n # dW[:,j]+=1/normalize[i,y[i]]*expS[i,y[i]]*expS[i,j]/np.power(sumS[i],2) *X[i,:]\n dW[:,j]+=expS[i,j]/sumS[i] *X[i,:]\n else:\n # dW[:,y[i]]+=-1/normalize[i,y[i]]*expS[i,y[i]]*(sumS[i]-expS[i,y[i]])/np.power(sumS[i],2) *X[i,:]\n dW[:,y[i]]+=-(sumS[i]-expS[i,y[i]])/sumS[i] *X[i,:]\n\n dW /=num_tran\n\n loss = np.sum(loss_par) / num_tran\n # print num_tran,loss\n\n dW+=reg*W\n loss+=0.5*reg*np.sum(W*W)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_cases = X.shape[0]\n num_class = W.shape[1]\n y_label = np.zeros((num_cases,num_class))\n for i in range(num_cases):\n h1 = np.exp(X[i].dot(W))\n h = h1/np.sum(h1)\n y_label[i] = (np.arange(h.shape[0]) == y[i]) + 0\n loss -= (np.sum(y_label[i] * np.log(h) + (1 - y_label[i]) * np.log(1 - h)))\n delta = np.zeros(W.shape)\n for j in range(num_class):\n delta[:,j] += X[i]\n delta[:,j] *= h1[j]\n delta[:,j] *= (np.sum(h1) - h1[j])/(np.sum(h1) ** 2)\n delta[:,j] = y_label[i][j] / h[j] * delta[:,j] - (1 - y_label[i][j]) / (1 - h[j]) * delta[:,j]\n dW -= delta\n loss /= num_cases\n loss += reg * np.sum(W * W)\n dW /= num_cases\n dW += 2 * reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0]\n num_classes = W.shape[1]\n for i in xrange(num_train):\n scores = X[i, :].dot(W)\n scores -= np.max(scores)\n correct_scores = scores[y[i]]\n score_sum = np.sum(np.exp(scores))\n h = np.exp(correct_scores) / score_sum\n loss += -np.log(h)\n for j in xrange(num_classes):\n if j == y[i]:\n dW[:, y[i]] += (np.exp(scores[j]) / score_sum - 1) * X[i, :]\n else:\n dW[:, j] += (np.exp(scores[j]) / score_sum) * X[i, :]\n \n \n loss /= num_train + ( reg * np.sum(W * W))\n dW /= num_train\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[0]\n dim = dW.shape[0]\n num_classe = W.shape[1]\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n S = X.dot(W)\n # ajouter le - max a la fin\n indexes=np.arange(num_train)\n #c = correct class score\n c = S[indexes, y]\n\n e_syi = np.exp(c)\n e_sj = np.sum(np.exp(S), axis=1)\n Li = - np.log(e_syi/e_sj)\n loss = np.sum(Li) / num_train + reg * np.sum(W * W)\n\n\n M = np.exp(S)/(np.repeat(e_sj, num_classe).reshape(num_train, num_classe)) #(500,10)\n M[indexes, y] -= 1 #bonnes classes\n dW = X.T.dot(M)\n\n dW = dW/num_train + 2 * reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train, num_dim = X.shape\n num_classes = W.shape[1]\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n scores = np.dot(X,W)\n # scores = scores.T - np.max(scores,1)\n # f = np.exp(scores.T) \n # correct_scores = f[range(num_train),y] #1*N\n # col_sum = np.sum(f,1)\n # loss = np.sum(-np.log(correct_scores/col_sum))\n\n # mat = f.T/col_sum #\n # mat = mat.T\n # y_pred = np.zeros(mat.shape)\n # y_pred[range(num_train),y] = 1\n # dW = np.dot(X.T,mat-y_pred)\n\n # loss/=num_train\n # loss += 0.5*reg*np.sum(W*W)\n # dW /= num_train\n # dW += reg*W\n f = scores.T - np.max(scores,1)\n f = f.T\n f_correct = scores[range(num_train),y]\n \n sum_col = np.log(np.sum(np.exp(scores),1)) # N*1\n \n loss = sum_col - f_correct # N*1\n loss = np.sum(loss)/num_train + 0.5*reg*np.sum(W*W)\n\n prob = np.exp(f).T / np.sum(np.exp(f),1)\n prob = prob.T\n y_pred = np.zeros(scores.shape)\n y_pred[range(num_train),y] = 1\n dW = X.T.dot(prob - y_pred)\n dW = dW/float(num_train) + reg*W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n # needed for calculations\n num_train = X.shape[1]\n\n for i in xrange(num_train):\n # calculate the scores for the current training example with the current weights\n scores = W.dot(X[:, i])\n # scale by the max for numerical stability\n scores -= np.max(scores)\n # calculate the loss\n loss += -scores[y[i]] + np.log(np.sum(np.exp(scores)))\n\n ## L' = -1_y + 1/(\\sum_{}^{} e^f) * e^f\n # e^f\n scores = np.exp(scores)\n # 1/(\\sum_{}^{} e^f)\n scores /= np.sum(scores)\n # -1_y\n scores[y[i]] -= 1\n\n # now scale it by the data\n # we need to use [:, np.newaxis] because when you make a X by 1 dimension slices in numpy the 1 dimension is null\n dW += scores[:, np.newaxis].dot(X[:, i][:, np.newaxis].T)\n\n\n # get the average loss\n loss /= num_train\n # get the average gradient\n dW /= num_train\n\n # regularize the loss function\n loss += 0.5 * reg * np.sum(W * W)\n\n return loss, dW", "def softmaxCostAndGradient(vc_predicted, target, uw_output, dataset):\n\n N = uw_output.shape[0] # n_words: vocab size\n y = np.zeros(N)\n y[target] = 1 # y is a 1-hot encoded vector with the actual word's index being 1 and rest of elements being 0\n\n score = np.dot(vc_predicted, uw_output.T) # vc dot uo_transpose which gives a vector of dimension (1, n_words)\n y_hat = softmax(score)\n\n # cross-entropy cost is given by formula in assignment 1.2b\n cost = np.sum(-y * np.log(y_hat))\n\n dout = y_hat - y # (1, n_words)\n\n grad_pred_dJ_vc = np.dot(dout, uw_output) # (1, dim_embed)\n\n grad_dJ_uw = np.dot(dout.T, vc_predicted) # (n_words, dim_embed)\n\n return cost, grad_pred_dJ_vc, grad_dJ_uw", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n num_train = X.shape[0]\n for i in range(num_train):\n score = X[i].dot(W)\n exp_score = np.exp(score)\n probability = exp_score[y[i]] / exp_score.sum()\n loss += -np.log(probability)\n dp = -1 / probability\n for j in range(num_classes):\n ds = np.exp(score[j])\n if j == y[i]:\n des = (exp_score.sum() - exp_score[y[i]]) / np.square(exp_score.sum())\n else:\n des = -(exp_score[y[i]]) / np.square(exp_score.sum())\n dW[:, j] += X[i].T * ds * des * dp # chain rule\n\n loss /= num_train\n dW /= num_train\n\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n for i in range(X.shape[0]):\n# c = np.matmul(X[i],W)\n# c -= np.amax(c)\n# e_c = np.exp(c)\n# denom = np.sum(e_c)\n# #Nice fact: we know that the largest element in c will also be the largest softmax value, so we only\n# # need to transform that one value. \n# sm_c = e_c/denom\n# \n# loss1 += -np.log(sm_c[y[i]])\n\n # Need to make this whole dang thing more numerically stable. \n c = np.matmul(X[i],W)\n c -= np.amax(c)\n e_c = np.exp(c)\n denom = np.sum(e_c)\n sm_c = e_c/denom\n\n loss += np.log(denom) - c[y[i]]\n# print(-np.log(sm_c[y[i]]) - (np.log(denom)-c[y[i]]))\n\n \"\"\"They are basically the same value\"\"\"\n\n # now computing some gradients\n dL_ds = sm_c\n dL_ds[y[i]] -= 1\n #note that sm_c is modified now!\n \"\"\" #ah, something fundamentally different is happening with numpy. When an array element\n is changed, it's really changed for good. And it changes for all pointers pointing to same object.\n yikes. Actually it's the same with python lists. Anything pointing to And underlying object can\n change that underlying object for all things that point to it. Alas.\"\"\"\n# import pdb; pdb.set_trace()\n \"\"\"Okay I just coudln't bear the for loops...\"\"\"\n dW_update = np.matmul(X[i].reshape(1,X.shape[1]).T,dL_ds[np.newaxis,:])\n dW+=dW_update\n # for n in range(W.shape[0]):\n# for m in range(W.shape[1]):\n# if m == y[i]:\n# dW[n,m] += X[i,n]*(sm_c[m]-e_c[m])\n# else:\n# dW[n,m] += X[i,n]*sm_c[m]\n\n # should be numerically unstable I think.\n\n loss /= X.shape[0]\n loss += reg*np.sum(W*W)\n\n dW /= X.shape[0]\n dW += reg*2*W\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def compute_gradient_and_loss1(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n for j in xrange(num_classes): # for every class\n if j != y[i]: # don't take the correct ground truth index\n term = s[j] - s_y + 1 # max term with Delta = 1, according to Hinge loss formula\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n loss += term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW += reg * deriv_abs(W)\n else:\n dW += 2 * reg * W # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[0]\n num_classes = W.shape[1]\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n scores = np.dot(X,W)\n scores = (scores.T - np.max(scores,1)).T\n for i in xrange(num_train):\n nominator = np.exp(scores[i,:])\n denominator = np.sum(np.exp(scores[i,:]))\n loss -= np.log(nominator[y[i]]/denominator)\n for j in xrange(num_classes):\n dW[:,j] += (nominator[j]/denominator)*X[i,:]\n dW[:,y[i]] -= X[i,:]\n\n loss /= num_train\n dW /= num_train\n loss += 0.5*reg*np.sum(W*W)\n dW += reg*W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(X.shape[0]):\n scores = X[i].dot(W)\n \n idx_max = np.argmax(scores)\n s_max = scores[idx_max]\n scores -= s_max # shift for numerical stability\n \n temp = np.exp(scores)\n summation = np.sum(temp)\n loss += (- scores[y[i]] + np.log(summation))\n \n # computing gradients\n # (1) an explicit version:\n# for j in range(W.shape[1]):\n# if j == y[i]:\n# dW[:, j] -= X[i]\n# dW[:, idx_max] -= (-X[i])\n \n# dW[:, j] += (1 / summation) * temp[j] * X[i]\n# dW[:, idx_max] += (1 / summation) * temp[j] * (-X[i])\n# elif j == idx_max:\n# dW[:, j] += 0 # X[i] + (-X[i]) = 0\n# else:\n# dW[:, j] += (1 / summation) * temp[j] * X[i]\n# dW[:, idx_max] += (1 / summation) * temp[j] * (-X[i])\n \n # (2) a more concise version:\n softmax_scores = temp / summation\n for j in range(W.shape[1]):\n if j == y[i]:\n dW[:, j] += (-1 + softmax_scores[j]) * X[i]\n else:\n dW[:, j] += softmax_scores[j] * X[i]\n \n loss /= X.shape[0]\n dW /= X.shape[0]\n \n loss += reg * np.sum(W * W)\n dW += 2 * reg * W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def compute_gradient_and_loss2(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n local_loss = term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n loss += local_loss \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW[:,-1] += reg * deriv_abs(W[:,-1]) #dW[:,-1]\n else:\n dW[:,-1] += 2 * reg * W[:,-1] # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n \n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n num_train = X.shape[0]\n num_class = W.shape[1]\n\n scores = np.dot(X, W)\n idx0 = np.linspace(0,num_train-1,num_train, dtype = int).reshape(-1,1)\n idx1 = y.reshape(-1,1)\n exp_num = np.exp(scores[idx0,idx1])\n exp_deno = np.sum(np.exp(scores), axis = 1).reshape(-1,1)\n loss = np.sum(-np.log(exp_num/exp_deno))\n loss /= num_train\n loss += reg * np.sum(W*W)\n \n multiplier0 = np.exp(scores) / (exp_deno * np.ones(scores.shape))\n dW = np.dot(X.transpose(), multiplier0)\n multiplier1 = np.zeros(multiplier0.shape)\n multiplier1[idx0, idx1] = -1\n dW += np.dot(X.transpose(), multiplier1)\n dW /= num_train \n dW += 2 * reg * W\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n\t#calculate the predictions\n\t#outputVectors: U: (V,d) ->!!diff from 3(a) dimension\n\tvhat = predicted\t#center word or BOW: (d,1)\n\tz = np.dot(outputVectors, vhat)\t#(V,1)\n\tpreds = softmax(z)\t#yhat: (V,1)\n\n\t#calculate the cost \n\tcost = -np.log(preds[target])\n\n\t#gradients\n\tgrad_pred = preds \n\tgrad_pred[target] -= 1\t#yhat - y\n\n\tgrad = np.outer(grad_pred, vhat)\t#(V, d)\n\tgradPred = np.dot(outputVectors.T, grad_pred)\t#dvc\n\n\treturn cost, gradPred, grad", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n #############################################################################\n # START OF YOUR CODE #\n #############################################################################\n # construct a one-hot vector for y\n onehot_y = np.zeros((y.size, W.shape[1]))\n onehot_y[np.arange(y.size), y] = 1\n dW = dW.T\n for i in range(y.shape[0]):\n f = np.dot(X[i], W)\n \n for j in range(W.shape[1]):\n e_f = np.exp(f - np.max(f))\n softmax = e_f / e_f.sum()\n loss -= onehot_y[i][j] * np.log(softmax[j])\n dW[j] -= X[i] * (onehot_y[i][j] - softmax[j])\n \n loss = loss / y.shape[0] + reg * np.linalg.norm(W)\n dW = dW.T / y.shape[0] + 2 * reg * W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n\n return loss, dW", "def loss_grad_softmax_vectorized(W, X, y):\n loss = 0 \n grad = np.zeros_like(W)\n dim, num_train = X.shape\n\n scores = W.dot(X) # [K, N]\n # Shift scores so that the highest value is 0\n scores -= np.max(scores)\n scores_exp = np.exp(scores)\n correct_scores_exp = scores_exp[y, range(num_train)] # [N, ]\n scores_exp_sum = np.sum(scores_exp, axis=0) # [N, ]\n loss = -np.sum(np.log(correct_scores_exp / scores_exp_sum))\n loss /= num_train\n #loss += 0.5 * reg * np.sum(W * W)\n\n scores_exp_normalized = scores_exp / scores_exp_sum\n # deal with the correct class\n scores_exp_normalized[y, range(num_train)] -= 1 # [K, N]\n grad = scores_exp_normalized.dot(X.T)\n grad /= num_train\n grad += W\n\n return grad", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n # Compute class scores\n (num_class, D), (D, num_train) = W.shape, X.shape\n class_scores = np.dot(W, X)\n\n # Softmax them\n e_x = np.exp(class_scores - class_scores.max(axis=0))\n class_scores = e_x / e_x.sum(axis=0)\n \n # Create mask of ys\n gold_class_matrix = np.zeros((num_class, num_train))\n gold_class_matrix[y, range(num_train)] = 1\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # Cross entropy loss\n loss = -(gold_class_matrix * np.log(class_scores)).sum()\n \n # Add regularization and normalize\n loss += 0.5 * reg * np.sum(W * W)\n loss /= num_train\n \n # Gradients\n augmented_scores = class_scores - gold_class_matrix\n (num_class, num_train), (num_train, D) = augmented_scores.shape, X.T.shape\n dW = np.dot(augmented_scores, X.T)\n \n # Add regularization and normalize\n dW += reg * W\n dW /= num_train\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0]\n num_classes = W.shape[1]\n\n # Calculate loss for each example\n f = np.zeros((num_train, num_classes))\n f_max = np.zeros((num_train, 1))\n for i in xrange(num_train):\n for j in xrange(num_classes):\n f[i, j] = np.dot(X[i, :], W[:, j])\n if f[i, j] > f_max[i]:\n f_max[i] = f[i, j]\n\n exp_f = np.zeros_like(f)\n sum_exp_f = np.zeros((num_train, 1))\n for i in xrange(num_train):\n for j in xrange(num_classes):\n f[i, j] -= f_max[i]\n exp_f[i, j] = math.exp(f[i, j])\n sum_exp_f[i] += exp_f[i, j]\n\n for i in xrange(num_train):\n loss += -math.log(exp_f[i, y[i]] / sum_exp_f[i])\n\n loss /= num_train\n\n # Calculate regularization term\n reg_term = 0.0\n for i in xrange(W.shape[0]):\n for j in xrange(W.shape[1]):\n reg_term += W[i, j]**2\n\n loss += reg * reg_term\n\n # Calculate gradient\n P = np.zeros((num_train, num_classes))\n for i in xrange(num_train):\n for j in xrange(num_classes):\n P[i, j] = exp_f[i, j] / sum_exp_f[i]\n P[i, y[i]] -= 1\n\n for i in xrange(dW.shape[0]):\n for j in xrange(dW.shape[1]):\n dW[i, j] = 1 / num_train * np.dot(X[:, i].T, P[:, j])\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def loss(self, X, y=None, reg=0.0):\r\n Ws = self.weights\r\n bs = self.biases\r\n N, D = X.shape # number of samples, number of features per sample\r\n\r\n # Compute the forward pass\r\n self.activations = []\r\n for i in xrange(len(Ws)): # for each set of weights\r\n W,b = Ws[i], bs[i]\r\n if i == 0:\r\n H = np.dot(X,W) + b\r\n else:\r\n H = np.dot(self.activations[-1],W) + b\r\n if i < len(Ws) - 1: # if we're computing hidden activations, apply nonlinear function\r\n H = (H > 0) * (H) + (H < 0) * (H/100.0)\r\n self.activations.append(H)\r\n scores = self.activations[-1]\r\n \r\n # If there's no labels provided, stop here\r\n if y is None:\r\n return scores\r\n\r\n # Compute the loss\r\n exped_scores = np.exp(scores)\r\n sums = np.sum(exped_scores,axis=1)\r\n # softmax classifier loss\r\n data_loss = (-1.0/N) * np.sum(np.log(exped_scores[range(N),y.astype(int)] / sums))\r\n\r\n # loss due to regularization\r\n reg_loss = 0\r\n for i in xrange(len(Ws)):\r\n reg_loss += np.sum(Ws[i]**2)\r\n reg_loss *= reg*(0.5)\r\n\r\n loss = data_loss + reg_loss\r\n \r\n # Compute gradients\r\n weights_grads = []\r\n biases_grads = []\r\n activation_grads = []\r\n for i in xrange(len(Ws)):\r\n weights_grads.append(np.copy(Ws[i]))\r\n biases_grads.append(np.copy(bs[i]))\r\n activation_grads.append(np.copy(self.activations[i]))\r\n\r\n DlossDscores = np.array(exped_scores / (N * np.matrix(sums).T))\r\n DlossDscores[range(N),y.astype(int)] -= (1.0/N)\r\n \r\n for i in xrange(len(Ws)-1,-1,-1):\r\n if i == 0:\r\n weights_grads[0] = np.dot(X.T, activation_grads[0]) + reg*Ws[0]\r\n biases_grads[0] = np.dot(np.ones((1,N)), activation_grads[0])[0]\r\n elif i == len(Ws)-1:\r\n H = self.activations[i-1]\r\n weights_grads[i] = np.dot(H.T, DlossDscores) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), DlossDscores)[0]\r\n dH = np.dot(DlossDscores, Ws[i].T)\r\n activation_grads[i-1] = dH\r\n else:\r\n H = self.activations[i-1]\r\n dH_out = activation_grads[i]\r\n weights_grads[i] = np.dot(H.T, dH_out) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), dH_out)[0]\r\n dH = np.dot(dH_out, Ws[i].T)\r\n dH = dH * (H > 0) + dH/100.0 * (H < 0)\r\n activation_grads[i-1] = dH\r\n \r\n grads = {}\r\n grads['weights'] = weights_grads\r\n grads['biases'] = biases_grads\r\n\r\n return loss, grads", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n\n num_classes = W.shape[1]\n num_train = X.shape[0]\n\n # Calculate scores for each classifier (column in the weight matrix W)\n # acting on each training sample (row in X)\n scores = X.dot(W)\n \n # Normalization trick to resolve numerical instability\n # when dealing with the large exponential terms.\n scores -= np.max(scores)\n\n # Cache some terms that are used repeatedly.\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(exp_scores,axis=1)\n\n # Find the correct classifier scores for each training sample\n correct_class_scores = scores[np.arange(num_train), y]\n\n # Update the loss\n loss = np.sum(-correct_class_scores + np.log(sum_exp_scores))\n\n # Update the gradient\n correct_indices = np.zeros(scores.shape)\n correct_indices[np.arange(num_train), y] = 1\n\n dW -= correct_indices.T.dot(X).T\n dW += X.T.dot((exp_scores.T / sum_exp_scores).T)\n \n # Average over the training samples\n loss /= num_train\n dW /= num_train\n \n # Add regularization.\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg * W\n\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n # identify the target predicted vector and then find the dot product\n # between the vector and the output vectors\n # outputVector structured as V x D \n # v_c structured as 1xD matrix\n # we are assuming here that the output vector and the \n # predicted vector is structured so that each row represent a word / token in {1, V}\n v_c = predicted\n z_w = np.dot(outputVectors, v_c)\n # the output yhat is a 1xV matrix\n yhat = softmax(z_w)\n # create the one hot vector for the predicted word\n # calculate the difference for gradient\n ydiff = yhat.copy()\n ydiff[target] -= 1.0\n\n # find the cross-entropy cost function based on yhat\n # cost = calc_cost_from_prediction(y, yhat)\n cost = - np.log( yhat[target] )\n\n # calculate the gradient wrt to the v_c (the predicted word vector)\n # the gradient is U(yhat - y)\n # the output should be a D x 1 matrix, same as v_c\n # y is a one-hot vector that represents the actual word\n # and we multiply it by output vector, it can also be calculated\n # by using index to find the vector\n gradPred = np.dot( outputVectors.T, ydiff)\n\n\n # calculate the gradient wrt to all other word vectors\n # the gradient is v_c(yhat - y)\n # we multiple yhat by v_c to get a V x D matrix\n grad = np.outer(ydiff, v_c)\n\n ### END YOUR CODE\n return cost, gradPred, grad", "def train(input, label, conv, maxpool, softmax, lr=0.005):\n # Forward\n output, loss, accuracy = forward(input, label, conv, maxpool, softmax)\n\n gradient = np.zeros(10)\n gradient[label] = -1 / output[label]\n\n # Backprop\n gradient = softmax.backprop(gradient, lr)\n gradient = maxpool.backprop(gradient)\n gradient = conv.backprop(gradient, lr)\n\n return loss, accuracy", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0]\n\n scores = X.dot(W) # (1)\n s_max = np.max(scores, 1)\n scores -= s_max.reshape((-1, 1))\n \n s_y = scores[np.arange(num_train), y] # (2)\n s_exp = np.exp(scores) # (3)\n summation = np.sum(s_exp, 1) # (4)\n data_loss = np.sum(-s_y + np.log(summation)) / num_train # (5)\n \n loss = data_loss + reg * np.sum(W * W)\n \n # computing gradients: staged computation!!\n dsy = -1 / num_train # (5)\n dsummation = (1 / summation) / num_train # (5)\n\n dsexp = 1 * dsummation # (4)\n \n dscores = s_exp * dsexp.reshape((-1, 1)) # (3)\n dscores[np.arange(num_train), y] += 1 * dsy # (2)\n \n dW += X.T.dot(dscores) # (1)\n \n dW += 2 * reg * W \n\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n\n # pass conv_param to the forward pass for the convolutional layer\n # Padding and stride chosen to preserve the input spatial size\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n h1, c1 = conv_forward_im2col(X, W1, b1, conv_param) #\n h1, r1 = relu_forward(h1)\n h1, p1 = max_pool_forward_fast(h1, pool_param) #\n max_pool_shape = h1.shape\n h1 = h1.reshape(X.shape[0], -1)\n h2, c2 = affine_relu_forward(h1, W2, b2)\n scores, c3 = affine_forward(h2, W3, b3)\n\n if y is None:\n return scores\n\n loss, dx = softmax_loss(scores, y)\n\n loss += self.reg / 2 * (self.params['W1']**2).sum()\n loss += self.reg / 2 * (self.params['W2']**2).sum()\n loss += self.reg / 2 * (self.params['W3']**2).sum()\n\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n grads = {}\n dx, grads['W3'], grads['b3'] = affine_backward(dx, c3)\n grads['W3'] += self.reg * self.params['W3']\n dx, grads['W2'], grads['b2'] = affine_relu_backward(dx, c2)\n dx = dx.reshape(max_pool_shape)\n dx = max_pool_backward_fast(dx, p1)\n dx = relu_backward(dx, r1)\n dx, grads['W1'], grads['b1'] = conv_backward_im2col(dx, c1)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[1]\n num_classes = W.shape[0]\n #############################################################################\n # Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n \n # compute scores\n scores = W.dot(X)\n scores -= np.max(scores)\n\n # softmax function\n softmax = np.exp(scores) / np.sum(np.exp(scores), 0) # 10 x 49000 | C x D\n \n # cross entropy loss\n loss = -np.log(softmax[y, range(num_train)]) # 49000\n loss = np.sum(loss) / num_train\n\n # regularisation\n loss += 0.5 * reg * np.sum(W*W)\n\n # gradient (source:https://github.com/MyHumbleSelf/cs231n/blob/master/assignment1/cs231n/classifiers/softmax.py)\n ind = np.zeros(softmax.shape)\n ind[y, range(num_train)] = 1\n dW = np.dot((softmax-ind), X.T)\n dW /= num_train\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n scores = X@W # 500,10\n# print(scores.shape)\n max_scores = np.max(scores, axis=1).reshape(-1,1) # 500, numeric instablity\n# print(max_scores.shape)\n scores -= max_scores # numeric instablity\n# print(scores.shape)\n correct_scores = scores[np.arange(scores.shape[0]), y] # 500,\n P_ic = np.exp(correct_scores)/np.sum(np.exp(scores), axis=1)\n# print(P)\n loss += np.sum(-np.log(P_ic))/scores.shape[0] # L = ∑L_i/N\n loss += reg * np.sum(W * W) # regularization\n # 向量化梯度:用scores构建一个P [500, 10],首先取exp(scores)得到每一个位置的exp,然后对每个位置除以这一行的exp和\n # 上面的操作会得到500,10的矩阵,每个位置都是softmax之后的结果\n # !重点:对于[i,y[i]]位置,根据P_ic - 1, 要减1 \n P = np.exp(scores) # 正确分类的梯度, 位于梯度矩阵所有c的行\n P /= np.sum(np.exp(scores),axis=1).reshape(-1, 1)\n P[np.arange(scores.shape[0]), y] -= 1 # 将 i, y[i] -= 1\n \n # 得到这个矩阵之后,与X.T相乘即可得到dL/dW P(500,10) X(500,3073) X.T (3073, 500) W(3073, 10)\n dW += X.T@P\n dW /= scores.shape[0] # *1/N\n dW += 2*reg*W # 正则化梯度\n \n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmax_loss(x, y):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n N=x.shape[0]\n\n \n x-=np.max(x,axis=1,keepdims=True)\n temp=np.exp(x)\n dr_vec=np.sum(temp,axis=1,keepdims=True)\n\n nr=(x[np.arange(N),y]).reshape([N,1])\n loss=np.sum(-(nr)+np.log(dr_vec))\n \n loss=(loss/N)\n temp/=dr_vec\n temp[np.arange(N),y] -= 1\n \n dx = temp/N\n \n return loss, dx", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n scores = np.dot(X,W)\n scores -= np.max(scores)\n e_scores = np.exp(scores)\n score_sum = np.sum(e_scores, axis = 1)\n score_sum = np.atleast_2d(score_sum).T\n h_x = (1/(score_sum)) * e_scores\n \n onehot = np.zeros((X.shape[0], W.shape[1]))\n index = np.arange(X.shape[0])\n onehot[index, y] = 1\n\n loss = -1*onehot * np.log(h_x)\n loss = np.mean(np.sum(loss, axis = 1))\n \n \n #gradient\n dW = ((1/X.shape[0]) * np.dot(X.T,(h_x - onehot))) + 2* reg * W\n \n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # loss\n num_classes = W.shape[1]\n num_train = X.shape[0]\n score = X.dot(W)\n exp_score = np.exp(score)\n exp_score_sum = exp_score.sum(axis=1)\n correct_score = exp_score[np.arange(num_train), y]\n probability = (correct_score / exp_score_sum).reshape(-1, 1)\n loss = -np.log(probability).sum()\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n\n # dW\n des = np.tile((-correct_score / np.square(exp_score_sum)).reshape(-1, 1), (1, num_classes))\n des[np.arange(num_train), y] += 1.0 / exp_score_sum\n dW = X.T.dot(des * (-np.ones((num_train, 1)) / probability) * np.exp(score))\n dW /= num_train\n dW += reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n pass\n num_tran = X.shape[0]\n num_classes = W.shape[1]\n loss_par = np.zeros(num_tran)\n c = np.arange(num_tran)\n\n Score = np.dot(X, W)\n expS = np.exp(Score)\n # for i in num_tran:\n sumS = np.sum(expS, axis=1)\n # sumS = sumS.reshape(sumS.shape[0], 1)\n normalize = np.divide(expS, sumS.reshape(sumS.shape[0], 1))\n softmax = -np.log(normalize)\n\n S = np.divide(expS,sumS.reshape(sumS.shape[0], 1))\n # for c in np.arange(num_tran):\n S[c,y[c]]=-(sumS[c]-expS[c,y[c]])/sumS[c]\n\n dW = np.dot(X.T,S)\n\n dW /= num_tran\n dW += reg * W\n\n for i in np.arange(num_tran):\n loss_par[i]=softmax[i, y[i]]\n loss = np.sum(loss_par) / num_tran\n loss += 0.5 * reg * np.sum(W * W)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def grad_softmax_cross_entropy_loss(logit, labels):\n return softmax(logit) - labels", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n cnn_out, cnn_cache = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n hidden_out, hidden_cache = affine_relu_forward(cnn_out, W2, b2)\n scores, scores_cache = affine_forward(hidden_out, W3, b3)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n\n # Compute loss and gradients\n loss, dscores = softmax_loss(scores, y)\n dhidden, grads['W3'], grads['b3'] = affine_backward(dscores, scores_cache)\n dcnn, grads['W2'], grads['b2'] = affine_relu_backward(dhidden, hidden_cache)\n dX, grads['W1'], grads['b1'] = conv_relu_pool_backward(dcnn, cnn_cache)\n\n # Regularization\n loss = loss + 0.5*self.reg*np.sum(self.params['W3']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W2']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W1']**2)\n grads['W3'] = grads['W3'] + self.reg * self.params['W3']\n grads['W2'] = grads['W2'] + self.reg * self.params['W2']\n grads['W1'] = grads['W1'] + self.reg * self.params['W1']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def softmax_loss_vectorized(W, X, y, reg):\n\n num_train = X.shape[1]\n\n # calculate the scores for the current training example with the current weights\n scores = W.dot(X)\n # scale by the max for numerical stability\n scores -= np.max(scores, axis = 0)\n # calculate the loss\n loss = np.sum(-scores[y, range(num_train)] + np.log(np.sum(np.exp(scores), axis = 0)))\n\n ## L' = -1_y + 1/(\\sum_{}^{} e^f) * e^f\n # e^f\n scores = np.exp(scores)\n # 1/(\\sum_{}^{} e^f)\n scores /= np.sum(scores,axis = 0)\n # -1_y\n scores[y, range(num_train)] -= 1\n # now we scale it by the data\n dW = scores.dot(X.T)\n\n # get the average loss\n loss /= num_train\n # get the average gradient\n dW /= num_train\n\n # regularize the loss function\n loss += 0.5 * reg * np.sum(W * W)\n\n return loss, dW", "def loss(self, X, y=None):\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\t\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size = W1.shape[2]\n\t\tconv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\t\tscores = None\n\t\t############################################################################\n\t\t# TODO: Implement the forward pass for the three-layer convolutional net, #\n\t\t# computing the class scores for X and storing them in the scores\t\t\t\t\t #\n\t\t# variable.\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tz1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n\t\tz2, cache2 = affine_relu_forward(z1, W2, b2)\n\t\ty3, cache3 = affine_forward(z2, W3, b3)\n\t\tscores = y3\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\t############################################################################\n\t\t# TODO: Implement the backward pass for the three-layer convolutional net, #\n\t\t# storing the loss and gradients in the loss and grads variables. Compute #\n\t\t# data loss using softmax, and make sure that grads[k] holds the gradients #\n\t\t# for self.params[k]. Don't forget to add L2 regularization!\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W3'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W1'], 2).sum())\n\n\t\tdx3, grads['W3'], grads['b3'] = affine_backward(dout, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = affine_relu_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_pool_backward(dx2, cache1)\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\treturn loss, grads", "def loss(self, X, y=None, justLoss=False):\n # N = X.shape[0]\n # mode = 'test' if y is None else 'train'\n scores = None\n\n W1, b1 = self.params['W1'], self.params['b1']\n # W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n\n conv_param = {'stride': 1, 'pad': 0}\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n #######################################################################\n # TODO: Implement the forward pass for the convolutional neural net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n #######################################################################\n\n conv1, conv_cache = conv_forward(X, W1, b1, conv_param)\n relu1, relu_cache1 = relu_forward(conv1)\n\n # conv2, conv_cache2 = conv_forward(relu1, W2, b2, conv_param)\n # relu2, relu_cache2 = relu_forward(conv2)\n\n scores, maxpool_cache = max_pool_forward(relu1, pool_param)\n scores, forward_cache = fc_forward(scores, W3, b3)\n \n\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n #######################################################################\n # TODO: Implement the backward pass for the convolutional neural net, #\n # storing the loss and gradients in the loss and grads variables. #\n # Compute data loss using softmax, and make sure that grads[k] holds #\n # the gradients for self.params[k]. #\n loss, dscores = softmax_loss(scores, y)\n\n if justLoss:\n return loss\n # print(loss)\n\n\n dx_3, grads['W3'], grads['b3'] = fc_backward(dscores, forward_cache)\n dx_3 = max_pool_backward(dx_3, maxpool_cache)\n\n # dx_2 = relu_backward(dx_3, relu_cache2)\n # dx_2, grads['W2'], grads['b2'] = conv_backward(dx_3, conv_cache2)\n\n dx = relu_backward(dx_3, relu_cache1)\n dx, grads['W1'], grads['b1'] = conv_backward(dx, conv_cache)\n \n \n\n return loss, grads", "def loss(self, X, y=None):\n W1 = self.params['W1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n X, cache_conv = conv_forward(X, W1)\n X, x_relu1 = relu_forward(X)\n X, cache_maxpool = max_pool_forward(X, pool_param)\n N1,C1,H1,W1 = X.shape\n X = X.reshape(N1, C1 * H1 * W1)\n X, cache_fc2 = fc_forward(X, W2, b2)\n X, x_relu2 = relu_forward(X)\n X, cache_fc3 = fc_forward(X, W3, b3)\n scores = X\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. #\n ############################################################################\n loss, dx = softmax_loss(X, y)\n dx, dw, db = fc_backward(dx, cache_fc3)\n grads['W3'] = dw\n grads['b3'] = db\n dx = relu_backward(dx, x_relu2)\n dx, dw, db = fc_backward(dx, cache_fc2)\n grads['W2'] = dw\n grads['b2'] = db\n xx, Ind, pp = cache_maxpool\n N2,C2,H2,W2 = xx.shape\n H2 = int(H2/2)\n W2 = int(W2/2)\n dx = dx.reshape(N2,C2,H2,W2)\n dx = max_pool_backward(dx, cache_maxpool)\n dx = relu_backward(dx, x_relu1)\n dx, dw = conv_backward(dx, cache_conv)\n grads['W1'] = dw\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def softmaxCostAndGradientTestWrapper(predictedandOutputVectors):\n target = 1\n\n predicted = predictedandOutputVectors[:1,:].reshape([-1,])\n outputVectors = predictedandOutputVectors[1:,:]\n\n cost, gradPred, gradOut = softmaxCostAndGradient(predicted, target, outputVectors, None)\n\n return cost, np.vstack([gradPred,gradOut])", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n scores = None\n ############################################################################\n # Implementing the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n ############################################################################\n\n l_input = X.copy()\n out = []\n cache = []\n for i in range(self.num_layers - 1):\n # layerwise compute the forward pass and store outputs in out list\n key = ['W' + str(i+1), 'b' + str(i+1)]\n lout, lcache = affine_sigmoid_forward(l_input, self.params[key[0]], self.params[key[1]])\n out.append(lout)\n cache.append(lcache)\n l_input = lout\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n scores, lcache = affine_forward(out[self.num_layers - 2], self.params[key[0]], self.params[key[1]])\n cache.append(lcache)\n \n # regularization parameter compute by summing square of all weight vectors\n R = 0\n for i in range(1, self.num_layers + 1):\n key = 'W' + str(i)\n R += np.sum(np.power(self.params[key], 2))\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n\n ########################\n # Backward pass to compute the loss and gradients\n ########################\n\n loss, dscore = softmax_loss(scores, y)\n # Apply regularization of the loss \n loss = loss + 0.5 * self.reg * R\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n dx, grads[key[0]], grads[key[1]] = affine_backward(dscore, cache[self.num_layers - 1])\n grads[key[0]] += self.reg * self.params[key[0]] \n\n for i in range(self.num_layers - 1, 0, -1):\n key = ['W' + str(i), 'b' + str(i)]\n dx, grads[key[0]], grads[key[1]] = affine_sigmoid_backward(dx, cache[i-1])\n # Apply regularization to the gradients\n grads[key[0]] += self.reg * self.params[key[0]]\n\n return loss, grads", "def np_softmax_loss(W, X, y, reg):\n\n num_train = X.shape[0]\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n scores = X.dot(W)\n scores_shifted = scores - scores.max(axis=1).reshape(scores.shape[0], -1)\n exp_scores = np.exp(scores_shifted)\n exp_scores_sum = np.sum(exp_scores, axis=1).reshape(exp_scores.shape[0], -1)\n p = exp_scores / exp_scores_sum\n # p = exp_scores / np.sum(exp_scores)\n p_correct = np.choose(y, p.T)\n loss = np.sum(-np.log(p_correct))\n\n loss /= num_train\n\n loss += reg * np.sum(W * W)\n\n return p_correct, loss", "def softmax_loss(x, y):\n\n eps = 1e-5\n \n N,C = x.shape\n p = softmax(x)\n llikelihood = -np.log(p[range(N),y] + eps)\n# print(llikelihood)\n loss = np.sum(llikelihood) / N\n\n dx = p\n dx[range(N),y] -= 1\n dx = dx/N\n \n return loss, dx", "def _classification_loss(self, logits, labels, num_classes):\n labels = tf.to_int64(labels)\n onehot_labels = tf.one_hot(labels, num_classes)\n with tf.name_scope('finetuning_loss'):\n cross_entropy = tf.losses.softmax_cross_entropy(\n onehot_labels=onehot_labels, logits=logits)\n cross_entropy = tf.reduce_mean(cross_entropy)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy + self.weight_decay * regularization\n return loss", "def loss(params: hk.Params, batch, labels, xent_weight=self.weights, l1_coeff=self.l1_coef, l2_coeff=self.l2_coef) -> jnp.ndarray:\n logits = net.apply(params, batch)\n labels = jax.nn.one_hot(label, 2)\n\n # Note that in our problem, regularization should be after the AND-mask.\n sum_in_layer = lambda p: jnp.sum(p)\n sum_p_layers = [sum_in_layer(p) for p in jax.tree_leaves(params)]\n l1_loss = sum(sum_p_layers)\n l2_loss = 0.5 * sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params))\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits) * xent_weight)\n softmax_xent /= labels.shape[0]\n\n return softmax_xent + l2_coeff * l2_loss + l1_coeff * l1_loss", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_relu_pool_forward(x, w, b, conv_param, pool_param): return out, cache;\n out, cache['layer1'] = layer_utils.conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) \n # def affine_relu_forward(x, w, b): return out, cache;\n out, cache['layer2'] = layer_utils.affine_relu_forward(out, W2, b2)\n # def affine_forward(x, w, b): return out, cache;\n scores, cache['layer3'] = layers.affine_forward(out, W3, b3)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW3, db3 = layers.affine_backward(dscores, cache['layer3']) \n # def affine_relu_backward(dout, cache): return dx, dw, db;\n dout, dW2, db2 = layer_utils.affine_relu_backward(dout, cache['layer2'])\n # def conv_relu_pool_backward(dout, cache): return dx, dw, db;\n dout, dW1, db1 = layer_utils.conv_relu_pool_backward(dout, cache['layer1'])\n\n # reg\n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n c = np.matmul(X,W)\n c -= np.amax(c,axis = -1)[:,np.newaxis]\n e_c = np.exp(c)\n denom = np.sum(e_c,axis = -1)[:,np.newaxis]\n sm_c = e_c/denom\n\n \n loss_vec = np.log(denom) - c[range(c.shape[0]),y][:,np.newaxis]\n\n # now computing some gradients\n dL_ds = sm_c\n dL_ds[range(dL_ds.shape[0]),y] -= 1\n dW_update = np.matmul(X.T,dL_ds)\n dW+=dW_update\n \n loss = np.sum(loss_vec)\n loss /= X.shape[0]\n\n loss += reg*np.sum(W*W)\n print(dW)\n dW /= X.shape[0]\n\n dW += reg*2*W\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n print(dW)\n return loss, dW", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def loss(self, X, y=None, reg=0.0):\n\n self.layers = []\n layers = self.layers\n layers.append(X)\n\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n H, C = W2.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n mid = np.maximum(0, X.dot(W1) + b1.reshape(1, -1)) # activation\n scores = mid.dot(W2) + b2.reshape(1, -1)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. So that your results match ours, multiply the #\n # regularization loss by 0.5 #\n #############################################################################\n exp_score = np.exp(scores)\n exp_score_sum = exp_score.sum(axis=1)\n correct_score = exp_score[np.arange(N), y]\n probability = (correct_score / exp_score_sum).reshape(-1, 1)\n loss = -np.log(probability).sum()\n\n loss /= N\n loss += 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2))\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n des = np.tile((-correct_score / np.square(exp_score_sum)).reshape(-1, 1), (1, C))\n des[np.arange(N), y] += 1.0 / exp_score_sum\n dsoftmax = des * (-np.ones((mid.shape[0], 1)) / probability) * np.exp(scores)\n\n # W2\n grads['W2'] = mid.T.dot(dsoftmax)\n grads['W2'] /= N\n grads['W2'] += reg * W2\n\n # b2\n grads['b2'] = np.ones_like(b2.reshape(1, -1)) * dsoftmax\n grads['b2'] = np.mean(grads['b2'], axis=0).reshape(-1)\n\n # W1\n binary = np.zeros_like(mid)\n binary[mid > 0] = 1\n grads['W1'] = X.T.dot(binary * dsoftmax.dot(W2.T)) # chain rule, compute dmid/dW1 * dscore/dmid * dsoftmax\n grads['W1'] /= N\n grads['W1'] += reg * W1\n\n # b1\n grads['b1'] = np.ones_like(b1.reshape(1, -1)) * binary * dsoftmax.dot(W2.T)\n grads['b1'] = np.mean(grads['b1'], axis=0).reshape(-1)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n # Implement the cost and gradients for one predicted word vector\n # and one target word vector as a building block for word2vec\n # models, assuming the softmax prediction function and cross\n # entropy loss.\n\n # Inputs:\n # - predicted: numpy ndarray, predicted word vector (\\hat{v} in\n # the written component or \\hat{r} in an earlier version)\n # - target: integer, the index of the target word\n # - outputVectors: \"output\" vectors (as rows) for all tokens\n # - dataset: needed for negative sampling, unused here.\n\n # Outputs:\n # - cost: cross entropy cost for the softmax word prediction\n # - gradPred: the gradient with respect to the predicted word\n # vector\n # - grad: the gradient with respect to all the other word\n # vectors\n\n # We will not provide starter code for this function, but feel\n # free to reference the code you previously wrote for this\n # assignment!\n\n ### YOUR CODE HERE\n yhat = softmax(np.dot(outputVectors, predicted))\n\n cost = -np.log(yhat[target])\n\n yhat_y = yhat.copy()\n yhat_y[target] -= 1\n\n gradPred = np.dot(yhat_y, outputVectors)\n\n grad = yhat_y[:, np.newaxis] * np.tile(predicted, (yhat_y.shape[0], 1))\n ### END YOUR CODE\n\n return cost, gradPred, grad", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n N = X.shape[0]\n f = np.dot(X, W)\n f -= np.amax(f, axis = 1, keepdims = True) # for numerical stability\n exp_f = np.exp(f)\n exp_fyi = exp_f[range(N), y].reshape((N, 1)) # correct class probabilities\n sum_exp_f = np.sum(exp_f, axis = 1, keepdims = True)\n losses = -np.log(exp_fyi / sum_exp_f)\n loss = 1 / N * np.sum(losses) + reg * np.sum(W * W)\n\n P = exp_f / sum_exp_f\n y_one_hot = np.zeros_like(P)\n y_one_hot[range(len(y)), y] = 1\n \n df = 1 / N * (P - y_one_hot)\n dW = np.dot(X.T, df)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def compute_cost(self,X, y):\r\n num_examples = np.shape(X)[0]\r\n z = np.dot(X,self.theta) + self.bias\r\n exp_z = np.exp(z)\r\n softmax_scores = exp_z / np.sum(exp_z, axis=1, keepdims=True)\r\n \r\n one_hot_y = np.zeros((num_examples,np.max(y)+1))\r\n logloss = np.zeros((num_examples,)) \r\n for i in range(np.shape(X)[0]):\r\n one_hot_y[i,y[i]] = 1\r\n logloss[i] = -np.sum(np.log(softmax_scores[i,:]) * one_hot_y[i,:])\r\n data_loss = np.sum(logloss)\r\n return 1./num_examples * data_loss", "def loss(params: hk.Params, batch, label) -> jnp.ndarray:\r\n logits = net.apply(params, batch)\r\n labels = jax.nn.one_hot(label, n_classes)\r\n\r\n # Cross Entropy Loss\r\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))\r\n softmax_xent /= labels.shape[0]\r\n return softmax_xent", "def compute_loss(self, x, y):\n\n self.batch_size = x.shape[0]\n self.x = x\n self.y = y\n self.soft = self.softmax(x) + 10**(-11)\n out = np.zeros(self.batch_size)\n for i in range(self.batch_size):\n out[i] = -(y[i] @ np.log(self.soft[i]))\n\n return out", "def softmax_loss(x, y):\n ############################################################################\n # TODO: You can use the previous softmax loss function here. # \n # Hint: Be careful on overflow problem #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n N = len(x)\n # We want to get the real y\n log_C = -np.max(x,axis=1,keepdims = True)\n # Get numerator\n e_all = np.exp(x+log_C)\n # Get the final prob\n prob = e_all/e_all.sum(axis=1,keepdims=True)\n # Find final loss\n loss = np.sum(-np.log(prob)[np.arange(N),y])/N\n # Get dx\n dx = prob\n dx[np.arange(N),y] -= 1\n dx /= N\n \n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return loss, dx", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def loss_gradient(self, x, y):\n x_preproc = self._apply_processing(x)\n x_defences, y_defences = self._apply_defences(x_preproc, y, fit=False)\n\n # Adjust the shape of y for loss functions that do not take labels in one-hot encoding\n if self._reduce_labels:\n y_defences = np.argmax(y_defences, axis=1)\n\n grads = self._loss_grads([x_defences, y_defences])[0]\n grads = self._apply_defences_gradient(x_preproc, grads)\n grads = self._apply_processing_gradient(grads)\n assert grads.shape == x_preproc.shape\n\n return grads", "def softmax_loss(scores, y):\r\n N = scores.shape[0] # number of input data\r\n\r\n # compute data loss\r\n shifted_logits = scores - np.max(scores, axis=1, keepdims=True)\r\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\r\n log_probs = shifted_logits - np.log(Z)\r\n probs = np.exp(log_probs)\r\n loss = -np.sum(log_probs[range(N), y]) / N\r\n\r\n # Compute gradient of loss function w.r.t. scores\r\n dscores = probs.copy()\r\n dscores[range(N), y] -= 1\r\n dscores /= N\r\n \r\n return loss, dscores", "def softmax_loss(x, y):\n probs = np.exp(x - np.max(x, axis=1, keepdims=True))\n probs /= np.sum(probs, axis=1, keepdims=True)\n N = x.shape[0]\n loss = -np.sum(np.log(probs[np.arange(N), y])) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx", "def softmax_loss(x, y):\n def softmax(x):\n exps = np.exp(x)\n return exps / np.sum(exps, axis=1)[:,None]\n\n N = y.shape[0]\n p = softmax(x)\n log_likelihood = -np.log(p[range(N),y])\n loss = np.sum(log_likelihood) / N\n\n dx = p.copy()\n dx[range(N),y] -= 1\n dx = dx/N\n\n return loss, dx", "def softmax_weighted_loss(pred, gr_truth):\n gr_truth = tf.cast(gr_truth, dtype='int32')\n gr_truth = one_hot(gr_truth)\n softmax_pred = tf.nn.softmax(pred)\n loss = 0\n\n for i in range(8):\n gti = gr_truth[:, :, :, :, i]\n predi = softmax_pred[:, :, :, :, i]\n weighted = 1 - (tf.reduce_sum(gti) / tf.reduce_sum(gr_truth))\n loss += -tf.reduce_mean(weighted * gti * tf.math.log(tf.clip_by_value(predi, 0.005, 1)))\n\n return loss", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n h1 = np.exp(X.dot(W))\n h = h1 / np.reshape(np.sum(h1,axis=1),(h1.shape[0],1))\n y_label = (np.arange(h.shape[1]) == y[:,None]) + 0\n loss = - np.mean(np.sum(y_label * np.log(h) + (1 - y_label) * np.log(1 - h),axis=1))\n H = h1 * ((np.reshape(np.sum(h1,axis=1),(h1.shape[0],1)) - h1) / (np.reshape(np.sum(h1,axis=1),(h1.shape[0],1)) ** 2))\n YH = y_label / h - (1 - y_label) / (1 - h)\n dW = X.T.dot(H * YH)\n dW = - dW / X.shape[0] + 2 * reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n #############################################################################\n # START OF YOUR CODE #\n #############################################################################\n # construct a one-hot vector for y\n onehot_y = np.zeros((y.size, W.shape[1]))\n onehot_y[np.arange(y.size), y] = 1\n f = np.dot(X, W)\n loss = -np.multiply(onehot_y, np.log(softmax(f)))\n loss = loss.sum() / y.shape[0] + reg * np.linalg.norm(W)\n dW = -np.dot(X.T, (onehot_y - softmax(f))) / y.shape[0]\n dW += 2 * reg * W\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\r\n # Initialize the loss and gradient to zero.\r\n loss = 0.0\r\n dW = np.zeros_like(W)\r\n\r\n #############################################################################\r\n # Compute the softmax loss and its gradient using no explicit loops. #\r\n # Store the loss in loss and the gradient in dW. If you are not careful #\r\n # here, it is easy to run into numeric instability. Don't forget the #\r\n # regularization! #\r\n #############################################################################\r\n m = X.shape[1]\r\n f = W.dot(X)\r\n f -= np.max(f,axis = 0)\r\n exp_score = np.exp(f)\r\n probs = exp_score/np.sum(exp_score,axis = 0)\r\n corect_logprobs = -np.log(probs[y,range(m)])\r\n loss = np.sum(corect_logprobs)/m\r\n \r\n loss += 0.5*reg*np.sum(W*W)\r\n\r\n dscore = probs\r\n dscore[y,range(m)] -= 1 #C*N\r\n\r\n dW = np.dot(dscore,X.T)/m #x.T:n*d\r\n dW+= reg*W\r\n\r\n return loss, dW", "def softmax_loss1(x, y):\n # tmp = np.max(x, axis=1, keepdims=True)\n shifted_logits = x - np.max(x, axis=1, keepdims=True)\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\n log_probs = shifted_logits - np.log(Z)\n probs = np.exp(log_probs)\n N = x.shape[0]\n # tmp2 = np.arange(N)\n tmp3 = log_probs[np.arange(N), y]\n # tmp4 = log_probs[[0,1,2],[2,5,0]]\n loss = -np.sum(log_probs[np.arange(N), y]) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx", "def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss", "def loss(self, labels, input_data):\n\n pred, out = self.inference(input_data)\n loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels, out), name=\"loss\") + \\\n tf.losses.get_regularization_loss()\n return loss, pred", "def cross_entropoy_loss_naive(W, X, y, reg):\n # pylint: disable=too-many-locals\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n ############################################################################\n # TODO: Compute the cross-entropy loss and its gradient using explicit #\n # loops. Store the loss in loss and the gradient in dW. If you are not #\n # careful here, it is easy to run into numeric instability. Don't forget #\n # the regularization! #\n ############################################################################\n num_train_sample = X.shape[0] #row of train data\n num_class = W.shape[1] #column of weight, plane,horse..\n for i in range(num_train_sample):\n p_score = X[i].dot(W) #a row of score corresponding to each class\n p_score -= np.max(p_score) #normalize, highest is 1\n\n ###compute softmax loss\n # sum of scores corresponding to different classes of a sample \n sum_score = np.sum(np.exp(p_score)) \n # each class's score over sum_score of a sample \n score_i = lambda k: np.exp(p_score[k]) / sum_score\n # for the correct label in each sample, find softmax loss over sum\n # iteration make loss sum up all samples\n loss = loss - np.log(score_i(y[i]))\n\n for k in range(num_class):\n p_k = score_i(k)\n # gradient of softmax\n dW[:, k] += (p_k - (k == y[i])) * X[i]\n\n loss /= num_train_sample\n loss += 0.5 * reg * np.sum(W * W)\n dW /= num_train_sample\n dW += reg*W\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, dW", "def loss(self, X, y=None, lambda_reg=0.0):\n \n # Unpack variables from the params dictionary\n N, D = X.shape\n\n # Compute the forward pass\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n scores, cache_list = self.network_forward(X)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n \n #############################################################################\n # TODO: Compute for the loss. This should include L2 regularization for #\n # the weights of each layer. #\n #############################################################################\n loss_softmax, dloss_softmax = self.softmax_cross_entropy_loss(scores, y)\n loss = loss_softmax\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n \n #############################################################################\n # TODO: Compute the derivatives of the weights and biases. Store the #\n # results in the grads dictionary. For example, grads['W1'] should store #\n # the gradient on the weights W of the first layer, and be a matrix of #\n # same size. #\n #############################################################################\n grads = self.network_backward(dloss_softmax, cache_list)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def compute_gradients(self, inputs, targets, hprev):\n n = len(inputs)\n loss = 0\n\n # Dictionaries for storing values during the forward pass\n aa, xx, hh, oo, pp = {}, {}, {}, {}, {}\n hh[-1] = np.copy(hprev)\n\n # Forward pass\n for t in range(n):\n xx[t] = np.zeros((self.vocab_len, 1))\n xx[t][inputs[t]] = 1 # 1-hot-encoding\n\n aa[t], hh[t], oo[t], pp[t] = self.evaluate_classifier(hh[t-1], xx[t])\n\n loss += -np.log(pp[t][targets[t]][0]) # update the loss\n\n # Dictionary for storing the gradients\n grads = {\"W\": np.zeros_like(self.W), \"U\": np.zeros_like(self.U),\n \"V\": np.zeros_like(self.V), \"b\": np.zeros_like(self.b),\n \"c\": np.zeros_like(self.c), \"o\": np.zeros_like(pp[0]),\n \"h\": np.zeros_like(hh[0]), \"h_next\": np.zeros_like(hh[0]),\n \"a\": np.zeros_like(aa[0])}\n\n # Backward pass\n for t in reversed(range(n)):\n grads[\"o\"] = np.copy(pp[t])\n grads[\"o\"][targets[t]] -= 1\n\n grads[\"V\"] += grads[\"o\"]@hh[t].T\n grads[\"c\"] += grads[\"o\"]\n\n grads[\"h\"] = np.matmul(self.V.T , grads[\"o\"] )+ grads[\"h_next\"]\n grads[\"a\"] = np.multiply(grads[\"h\"], (1 - np.square(hh[t])))\n\n grads[\"U\"] += np.matmul(grads[\"a\"], xx[t].T)\n grads[\"W\"] += np.matmul(grads[\"a\"], hh[t-1].T)\n grads[\"b\"] += grads[\"a\"]\n\n grads[\"h_next\"] = np.matmul(self.W.T, grads[\"a\"])\n\n # Drop redundant gradients\n grads = {k: grads[k] for k in grads if k not in [\"o\", \"h\", \"h_next\", \"a\"]}\n\n # Clip the gradients\n for grad in grads:\n grads[grad] = np.clip(grads[grad], -5, 5)\n\n # Update the hidden state sequence\n h = hh[n-1]\n\n return grads, loss, h", "def softmax_gradient(softmax_result):\r\n\r\n s = softmax_result.reshape(-1, 1)\r\n return np.diagflat(s) - np.dot(s, s.T)", "def compute_loss_and_gradients(self, X, y):\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n # TODO Set parameter gradient to zeros\n # Hint: using self.params() might be useful!\n self.fulllayer1.W.grad = np.zeros_like(self.fulllayer1.W.grad)\n self.fulllayer1.B.grad = np.zeros_like(self.fulllayer1.B.grad)\n self.fulllayer2.W.grad = np.zeros_like(self.fulllayer2.W.grad)\n self.fulllayer2.B.grad = np.zeros_like(self.fulllayer2.B.grad)\n\n\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model\n res = self.fulllayer1.forward(X)\n res2 = self.reglayer1.forward(res)\n res3 = self.fulllayer2.forward(res2)\n\n loss, grad = softmax_with_cross_entropy(res3, y)\n\n back3 = self.fulllayer2.backward(grad)\n back2 = self.reglayer1.backward(back3)\n back = self.fulllayer1.backward(back2)\n \n # After that, implement l2 regularization on all params\n # Hint: self.params() is useful again!\n\n for params in self.params().keys():\n # print(params)\n # print(self.params()[params].value)\n loc_loss, loc_grad = l2_regularization(self.params()[params].value, self.reg)\n loss += loc_loss\n self.params()[params].grad += loc_grad\n\n return loss", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # We are gonna store everythin in a dictionnary hidden\n hidden = {}\n hidden['h0'] = X.reshape(X.shape[0], np.prod(X.shape[1:]))\n\n for i in range(self.L):\n idx = i + 1\n # Naming of the variable\n w = self.params['W' + str(idx)]\n b = self.params['b' + str(idx)]\n h = hidden['h' + str(idx - 1)]\n\n # Computing of the forward pass.\n # Special case of the last layer (output)\n if idx == self.L:\n h, cache_h = affine_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n # For all other layers\n else:\n h, cache_h = affine_relu_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n scores = hidden['h' + str(self.L)]\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n # Computing of the loss\n data_loss, dscores = softmax_loss(scores, y)\n reg_loss = 0\n for w in [self.params[f] for f in self.params.keys() if f[0] == 'W']:\n reg_loss += 0.5 * self.reg * np.sum(w * w)\n\n loss = data_loss + reg_loss\n\n # Backward pass\n\n hidden['dh' + str(self.L)] = dscores\n for i in range(self.L)[::-1]:\n idx = i + 1\n dh = hidden['dh' + str(idx)]\n h_cache = hidden['cache_h' + str(idx)]\n if idx == self.L:\n dh, dw, db = affine_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n else:\n dh, dw, db = affine_relu_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n\n # w gradients where we add the regulariation term\n list_dw = {key[1:]: val + self.reg * self.params[key[1:]]\n for key, val in hidden.iteritems() if key[:2] == 'dW'}\n # Paramerters b\n list_db = {key[1:]: val for key, val in hidden.iteritems() if key[:2] == 'db'}\n # Parameters gamma\n list_dgamma = {key[1:]: val for key, val in hidden.iteritems() if key[:6] == 'dgamma'}\n # Paramters beta\n list_dbeta = {key[1:]: val for key, val in hidden.iteritems() if key[:5] == 'dbeta'}\n grads = {}\n grads.update(list_dw)\n grads.update(list_db)\n grads.update(list_dgamma)\n grads.update(list_dbeta)\n return loss, grads", "def loss_function(\n self, x_p, const, target, reconstructed_original, confidence, min_, max_):\n\n ## get the output of model before softmax\n x_p.requires_grad = True\n logits = self.model.get_logits(x_p).to(self.device)\n\n ## find the largest class except the target class\n targetlabel_mask = (torch.from_numpy(onehot_like(np.zeros(self.classnum), target))).double()\n secondlargest_mask = (torch.from_numpy(np.ones(self.classnum)) - targetlabel_mask).to(self.device)\n\n secondlargest = np.argmax((logits.double() * secondlargest_mask).cpu().detach().numpy(), axis = 1)\n\n is_adv_loss = logits[0][secondlargest] - logits[0][target]\n\n # is_adv is True as soon as the is_adv_loss goes below 0\n # but sometimes we want additional confidence\n is_adv_loss += confidence\n\n if is_adv_loss == 0:\n is_adv_loss_grad = 0\n else:\n is_adv_loss.backward()\n is_adv_loss_grad = x_p.grad\n\n is_adv_loss = max(0, is_adv_loss)\n\n s = max_ - min_\n squared_l2_distance = np.sum( ((x_p - reconstructed_original) ** 2).cpu().detach().numpy() ) / s ** 2\n total_loss = squared_l2_distance + const * is_adv_loss\n\n\n squared_l2_distance_grad = (2 / s ** 2) * (x_p - reconstructed_original)\n\n #print(is_adv_loss_grad)\n total_loss_grad = squared_l2_distance_grad + const * is_adv_loss_grad\n return total_loss, total_loss_grad", "def ComputeGradients(self, input_data: list, target_output_data: list):\n delta = 1e-6\n normal_cost = self.Cost(input_data, target_output_data)\n\n # Evaluate Gradient for Hidden Layer Biases\n for i in range(self.hidden_layer_biases.shape[0]):\n original_bias_value = self.hidden_layer_biases[i]\n self.hidden_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_layer_biases[i] = original_bias_value\n self.hidden_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Output Layer Biases\n for i in range(self.output_layer_biases.shape[0]):\n original_bias_value = self.output_layer_biases[i]\n self.output_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.output_layer_biases[i] = original_bias_value\n self.output_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for i in range(self.input_to_hidden_weights.shape[0]):\n for h in range(self.input_to_hidden_weights.shape[1]):\n original_bias_value = self.input_to_hidden_weights[i, h]\n self.input_to_hidden_weights[i, h] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.input_to_hidden_weights[i, h] = original_bias_value\n self.input_to_hidden_weights_gradient[i, h] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for h in range(self.hidden_to_output_weights.shape[0]):\n for o in range(self.hidden_to_output_weights.shape[1]):\n original_bias_value = self.hidden_to_output_weights[h, o]\n self.hidden_to_output_weights[h, o] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_to_output_weights[h, o] = original_bias_value\n self.hidden_to_output_weights_gradient[h, o] = (plusdelta_cost - normal_cost) / delta" ]
[ "0.8163197", "0.7849804", "0.7754222", "0.7720902", "0.7686208", "0.7658823", "0.7655289", "0.75943136", "0.75897485", "0.75817245", "0.758024", "0.75620025", "0.753615", "0.7510059", "0.7506491", "0.75061125", "0.7504011", "0.74645996", "0.73846763", "0.7381628", "0.7378212", "0.73716205", "0.7360122", "0.73347354", "0.73198926", "0.730296", "0.72893333", "0.7288762", "0.7255034", "0.7252466", "0.72426677", "0.7233475", "0.72230977", "0.7213216", "0.71927243", "0.71885806", "0.7176703", "0.715187", "0.7138154", "0.71335834", "0.71277267", "0.71208787", "0.70923764", "0.70635355", "0.70566523", "0.70358664", "0.7033774", "0.70299464", "0.70138067", "0.6998351", "0.69910955", "0.6981774", "0.69815063", "0.6972523", "0.69120467", "0.6901928", "0.6871261", "0.6853344", "0.684456", "0.6837448", "0.6815709", "0.6803873", "0.6801282", "0.67952734", "0.6783599", "0.677703", "0.6767109", "0.6760231", "0.67349", "0.6731244", "0.672314", "0.67211854", "0.67141104", "0.6694267", "0.66861933", "0.667393", "0.6665315", "0.666424", "0.66626704", "0.6659731", "0.6652549", "0.6650015", "0.6649954", "0.66191965", "0.6615175", "0.6614644", "0.66122055", "0.661039", "0.66001236", "0.65895075", "0.6577143", "0.65751696", "0.6570141", "0.6565192", "0.6559724", "0.65556854", "0.6535473", "0.65348274", "0.6523656", "0.6509799" ]
0.6929496
54
When cache is enabled, records the current request response json content in the cache file.
def set_cached_response(self) -> None: if self.get_caching_duration() > 0: # if caching is enabled for this request json_response = self._request_result.json() with open(self.cache_file_name, 'w') as json_file: json.dump(json_response, json_file, indent=4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cache():\n if request.method == 'GET':\n cache_info = in_water.cache_info()\n return json.dumps({\n 'hits': cache_info.hits,\n 'misses': cache_info.misses,\n 'maxsize': cache_info.maxsize,\n 'currsize': cache_info.currsize,\n })", "def saveCacheFile(self):\n with open(self.cachePath, 'w', encoding='utf-8') as outfile:\n json.dump(self.cacheData, outfile)", "def write_to_cache(self):\n data = {'data': self.data, 'inventory': self.inventory}\n json_data = json.dumps(data, indent=2)\n\n with open(self.cache_filename, 'w') as cache:\n cache.write(json_data)", "def store_response_in_cache(responsefile, response):\n global __response_cache\n log.debug(\"Storing data from flats (%s) in cache\" % responsefile)\n __response_cache[responsefile] = {}\n modtime = str(os.path.getmtime(responsefile))\n __response_cache[responsefile][modtime] = response", "def save(self):\n if self._cache is not None:\n with open(self.cache_path, 'w') as cache_file:\n json.dump(self._cache, cache_file)", "def write_to_cache(self, data, filename):\n json_data = self.json_format_dict(data, True)\n cache = open(filename, 'w')\n cache.write(json_data)\n cache.close()", "def write_cache(self):\n self.__config.open_file(\n self.__cache_file, \"w\", lambda f: json.dump(self.cache, f)\n )\n self.__dirty = False", "def get(self):\n CACHE_KEY = 'sources'\n if not memcache.get(CACHE_KEY):\n logging.info('Populating cache.')\n feeds = Feed.all().order('name')\n feed_list = []\n for feed in feeds:\n feed_list.append(feed.ToDict())\n memcache.add(CACHE_KEY, simplejson.dumps(feed_list), 600)\n logging.info('Using cache.')\n logging.info(memcache.get(CACHE_KEY))\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(memcache.get(CACHE_KEY))", "def nocache(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'\n return response", "def cache_response(self, request, response, body=None):\r\n # From httplib2: Don't cache 206's since we aren't going to\r\n # handle byte range requests\r\n if response.status not in [200, 203]:\r\n return\r\n\r\n response_headers = CaseInsensitiveDict(response.headers)\r\n\r\n cc_req = self.parse_cache_control(request.headers)\r\n cc = self.parse_cache_control(response_headers)\r\n\r\n cache_url = self.cache_url(request.url)\r\n\r\n # Delete it from the cache if we happen to have it stored there\r\n no_store = cc.get('no-store') or cc_req.get('no-store')\r\n if no_store and self.cache.get(cache_url):\r\n self.cache.delete(cache_url)\r\n\r\n # If we've been given an etag, then keep the response\r\n if self.cache_etags and 'etag' in response_headers:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )\r\n\r\n # Add to the cache if the response headers demand it. If there\r\n # is no date header then we can't do anything about expiring\r\n # the cache.\r\n elif 'date' in response_headers:\r\n # cache when there is a max-age > 0\r\n if cc and cc.get('max-age'):\r\n if int(cc['max-age']) > 0:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )\r\n\r\n # If the request can expire, it means we should cache it\r\n # in the meantime.\r\n elif 'expires' in response_headers:\r\n if response_headers['expires']:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )", "def cache(self):\n if self._cache is None:\n with open(self.cache_path, 'r') as cache_file:\n self._cache = json.load(cache_file)\n return self._cache", "def process_response(self, request, response):\n #if not self._should_update_cache(request, response):\n # # We don't need to update the cache, just return.\n # return response\n\n if response.streaming or response.status_code != 200:\n return response\n \n # Don't cache responses that set a user-specific (and maybe security\n # sensitive) cookie in response to a cookie-less request.\n if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):\n return response\n\n # Try to get the timeout from the \"max-age\" section of the \"Cache-\n # Control\" header before reverting to using the default cache_timeout\n # length.\n timeout = get_max_age(response)\n if timeout == None:\n timeout = self.cache_timeout\n elif timeout == 0:\n # max-age was set to 0, don't bother caching.\n return response\n patch_response_headers(response, timeout)\n if timeout:\n cache_key = \"%s-%s\" % (self.key_prefix, request.get_full_path())\n #raise ValueError(cache_key)\n if hasattr(response, 'render') and isinstance(response.render, collections.Callable):\n response.add_post_render_callback(\n lambda r: cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(r.content, 9), timeout)\n )\n else:\n # we use the highest compression level, because since it is cached we hope for it to pay off\n cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(response.content, 9), timeout)\n return response", "def save_cache(self):\n with open(self.get_cache_filename(), 'wb+') as f:\n out = dict()\n out['timestamp'] = self.get_last_update()\n out['cache'] = self.cache\n f.write(pickle.dumps(out))", "def do_cache(*args, **kws):\n resp = self.response\n out = resp.out\n namespace = ''\n if self.cache_nsfuncs.get(func, None):\n namespace = self.cache_nsfuncs[func](self.request)\n p = urlsplit(self.request.url)[2]\n c = memcache.get(p, namespace)\n if c:\n # in case cache is found, use it \n # instead of rendering by calling function.\n out.write(c['body'])\n for k, i in c['hdr'].items():\n resp.headers[k] = i\n return\n\n r = func(*args, **kws)\n expire = self.cache_expires.get(func, 0)\n if expire == 0:\n return\n out.seek(0)\n try:\n p = urlsplit(self.request.url)[2]\n memcache.set(p, {'hdr':resp.headers,'body':out.read()},\n expire, namespace=namespace)\n logging.debug('%s is cahed' % p)\n except:\n memcache.flush_all()\n logging.debug('memcache is flashed.')", "def cached():\n ##from pprint import pprint\n # let's restrict this to the api server, to avoid shenanigans\n root_relative_url = request.env.request_uri.split('/cached/')[-1]\n ##pprint('ROOT-RELATIVE URL: ')\n ##pprint(root_relative_url)\n fetch_url = '%s://%s/%s' % (request.env.wsgi_url_scheme, request.env.http_host, root_relative_url)\n ##pprint('PROXYING TO SIMPLE URL: ')\n ##pprint(fetch_url)\n\n # permissive CORS handling of requests from another domain (e.g. tree.opentreeoflife.org)\n if request.env.request_method == 'OPTIONS':\n if request.env.http_access_control_request_method:\n response.headers['Access-Control-Allow-Methods'] = request.env.http_access_control_request_method\n if request.env.http_access_control_request_headers:\n response.headers['Access-Control-Allow-Headers'] = request.env.http_access_control_request_headers\n ##pprint('RESPONDING TO OPTIONS')\n raise HTTP(200, **(response.headers))\n\n # N.B. This try/except block means we'll cache errors. For now, the fix is to clear the entire cache.\n try:\n # fetch the latest IDs as JSON from remote site\n import simplejson\n\n if fetch_url.startswith('//'):\n # Prepend scheme to a scheme-relative URL\n fetch_url = \"http:%s\" % fetch_url\n\n fetch_args = request.vars # {'startingTaxonOTTId': \"\"}\n\n # TODO: For more flexibility, we should examine and mimic the original request (HTTP verb, headers, etc)\n\n # this needs to be a POST (pass fetch_args or ''); if GET, it just describes the API\n # N.B. that gluon.tools.fetch() can't be used here, since it won't send \"raw\" JSON data as treemachine expects\n req = urllib2.Request(url=fetch_url, data=simplejson.dumps(fetch_args), headers={\"Content-Type\": \"application/json\"}) \n the_response = urllib2.urlopen(req).read()\n ##pprint('RESPONSE:')\n ##pprint(the_response)\n return the_response\n\n except Exception, e:\n # throw 403 or 500 or just leave it\n return ('ERROR', e.message)", "def write_cache(cache_dict):\n try:\n with open(CACHE_FILENAME, 'w', encoding=\"utf-8\") as cache_file:\n cache_file.write(json.dumps(cache_dict))\n except:\n print(\"error when executing write_cache()\")", "def build_response(self, request, response, from_cache=False):\r\n if not from_cache and request.method == 'GET':\r\n if response.status == 304:\r\n # We must have sent an ETag request. This could mean\r\n # that we've been expired already or that we simply\r\n # have an etag. In either case, we want to try and\r\n # update the cache if that is the case.\r\n cached_response = self.controller.update_cached_response(\r\n request, response\r\n )\r\n\r\n if cached_response is not response:\r\n from_cache = True\r\n\r\n response = cached_response\r\n else:\r\n # Wrap the response file with a wrapper that will cache the\r\n # response when the stream has been consumed.\r\n response._fp = CallbackFileWrapper(\r\n response._fp,\r\n functools.partial(\r\n self.controller.cache_response,\r\n request,\r\n response,\r\n )\r\n )\r\n\r\n resp = super(CacheControlAdapter, self).build_response(\r\n request, response\r\n )\r\n\r\n # See if we should invalidate the cache.\r\n if request.method in self.invalidating_methods and resp.ok:\r\n cache_url = self.controller.cache_url(request.url)\r\n self.cache.delete(cache_url)\r\n\r\n # Give the request a from_cache attr to let people use it\r\n resp.from_cache = from_cache\r\n\r\n return resp", "def setup(cls, path, cache_filename, **kwargs):\n cache_filepath = os.path.join(path, cache_filename)\n if not os.path.isfile(cache_filepath):\n with open(cache_filepath, 'w') as cache_file:\n json.dump({'start_time': None}, cache_file)", "def write_cache(feed):\n if ARGV.get(NOCACHE_OPT):\n return\n CACHE['feed'] = feed\n CACHE['last-request'] = str(time.time())\n CACHE['max-age'] = feed.headers['Cache-Control'].split('=')[1]\n save_datfile()", "def update_cached_response(self, request, response):\r\n cache_url = self.cache_url(request.url)\r\n\r\n cached_response = self.serializer.loads(request, self.cache.get(cache_url))\r\n\r\n if not cached_response:\r\n # we didn't have a cached response\r\n return response\r\n\r\n # Lets update our headers with the headers from the new request:\r\n # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1\r\n #\r\n # The server isn't supposed to send headers that would make\r\n # the cached body invalid. But... just in case, we'll be sure\r\n # to strip out ones we know that might be problmatic due to\r\n # typical assumptions.\r\n excluded_headers = [\r\n \"content-length\",\r\n ]\r\n\r\n cached_response.headers.update(\r\n dict((k, v) for k, v in response.headers.items()\r\n if k.lower() not in excluded_headers)\r\n )\r\n\r\n # we want a 200 b/c we have content via the cache\r\n cached_response.status = 200\r\n\r\n # update our cache\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, cached_response),\r\n )\r\n\r\n return cached_response", "def cache():\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)", "def _retrieveCachedData(self):", "def cache(self):\n return self.payload.setdefault(self._CACHE_ATTRIBUTE, {})", "def never_cache_preview(self, response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response", "def write_data_cache(self, data):\n assert data, 'Must input a non-empty dictionary.'\n with open(self.cache_filename, 'w') as file_cache:\n json.dump(data, file_cache, sort_keys=True, indent=4, ensure_ascii=False)\n self.data = data # must assign the new data or risk problems", "def handle_response(self, response):\n\n self._tmp_request_args = {}\n self.cache_response(response)", "def refresh_cache_file(form, model, is_created):\n common.save_serialized_file()\n app.global_content = common.load_cached()", "def read_data_cache_file(self):\n with open(self.cache_filename, 'r') as json_data:\n return json.load(json_data)", "def add_cache(self, key_path, content, t_mserver):\n\t\tif key_path not in self.cache:\n\t\t\tself.cache[key_path] = {'time_validated': int(time.time()),\n\t\t\t\t\t\t\t\t\t't_mclient' : int(t_mserver),\n\t\t\t\t\t\t\t\t\t'content': content}\t\n\t\t\tprint(\"self.cache[key_path]: \",self.cache[key_path]['content'])", "def save_data(self):\n with open(self.storage_path, 'w') as cache_file:\n json.dump(self.data, cache_file)", "def write_to_cache(self):\n return False", "def clear_response_cache():\n global __response_cache\n __response_cache = {}", "def cache_to_json(self):\n # {\"team_number\": 0000000, \"student1\": \"\", \"student2\": \"\", \"student3\": \"\",\n # \"advisor_type\": \"\", \"advisor\": \"\", \"school\": \"\", \"prize\": \"\"}\n json_details = {\"fields\": [\"teams counts\", \"teams numbers\",\n \"student1\", \"student2\", \"student3\",\n \"advisor_type\", \"advisor\", \"school\", \"prize\"],\n \"teams counts\": 0,\n \"teams numbers\": [],\n \"info\": []}\n self.cache_result_file.seek(0, 0)\n lines = self.cache_result_file.readlines()\n json_details[\"teams counts\"] = len(lines)\n for line in lines:\n info = eval(line)\n json_details[\"teams numbers\"].append(info[\"team_number\"])\n json_details[\"info\"].append(info)\n\n with open(self.result_filename, \"w\") as f:\n json.dump(obj=json_details, fp=f, indent=4)\n\n self.logger.debug(\"Info Result Updated to JSON\")", "def save_cache(cache_key, version, data):\n\n # Save these results for the next run. Include the version information and nest the user\n # information inside a \"users\" key from the start, because experience says if we don't do this\n # then the next release will add a feature that requires a change in the data layout, and then\n # we'll have to write a data migration or something.\n\n cache_data = {cache_key: data, \"version\": version}\n\n cache_file(cache_key).write_text(json.dumps(cache_data, indent=2))", "def cache_session(self):\n # always save (to update timeout)\n self.i('Cache Session')\n with open(self.cache_file_path, \"wb\") as file:\n pickle.dump(self, file)", "def use_cached_files(self, cache_key):\r\n pass", "def cache(self, file_name, content):\n self.files_loaded[file_name] = content", "async def save_response(self, key: str, response: ClientResponse):\n if not self.is_cacheable(response):\n return\n logger.info(f'Saving response for key: {key}')\n\n expires = self.get_expiration_date(response)\n cached_response = await CachedResponse.from_client_response(response, expires)\n await self.responses.write(key, cached_response)\n\n # Alias any redirect requests to the same cache key\n for r in response.history:\n await self.redirects.write(self.create_key(r.method, r.url), key)", "def never_cache_preview(response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response", "def dump():\n global CACHE\n return CACHE", "def request_cache(self):\n return self._request_cache", "def cache_path(self):", "def cache_path(self):", "def loadCacheFile(self):\n if not os.path.exists(self.cachePath):\n self.initCacheFile()\n else:\n with open(self.cachePath) as json_cacheFile:\n self.cacheData = json.load(json_cacheFile)", "def cache_data(self):\n # Initialize key variables\n result = self.data['cache_data']\n return result", "def cache_movie_json(title: str, json_response: Dict) -> None:\n fpath = os.path.join(OMDB_JSONS_DIR, title)\n with open(fpath, 'a+') as fp:\n json.dump(json_response, fp)", "def send(self, request, **kw):\r\n if request.method == 'GET':\r\n cached_response = self.controller.cached_request(request)\r\n if cached_response:\r\n return self.build_response(request, cached_response, from_cache=True)\r\n\r\n # check for etags and add headers if appropriate\r\n request.headers.update(self.controller.conditional_headers(request))\r\n\r\n resp = super(CacheControlAdapter, self).send(request, **kw)\r\n\r\n return resp", "def cache(self):\n return {'output': self.output, 'series': self.series}", "def __write_cache(self, fileName, returnVal):\n # Cache miss\n if self.__log:\n self.__logger.info(f\"Cache miss: {fileName}\")\n self.__handle_cache_size()\n\n with open(fileName, \"wb\") as f:\n packed = pickle.dumps(returnVal)\n final = self.__handle_compression(packed)\n f.write(final)\n\n node = os.path.relpath(fileName, \"cache\")\n self.__recentAccessed.insert(0, node)", "def _write_cache_file(self, data):\n\n with open(self.cache_file, mode='wb') as f:\n f.write(data)\n\n self.log.info(f\"Cached facilities at {self.cache_file}\")", "def cache_file(cache_key):\n\n return MASTOOLS_DIR / f\"{cache_key}_cache.json\"", "def write_ocsp_response_cache_file(filename, ocsp_validation_cache):\n logger = getLogger(__name__)\n logger.debug('writing OCSP response cache file')\n file_cache_data = {}\n _encode_ocsp_response_cache(ocsp_validation_cache, file_cache_data)\n with codecs.open(filename, 'w', encoding='utf-8', errors='ignore') as f:\n json.dump(file_cache_data, f)", "def initCacheFile(self):\n self.cacheData = {\"data\": []}\n for i in range(int(self.frameCount)):\n self.cacheData[\"data\"].append({\"isLoaded\": False,\n \"faces\": []})\n self.saveCacheFile()", "def save_output_in_cache(name, filename, output):\n cache_filename = _get_cache_filename(name, filename)\n with _open_for_write(cache_filename) as f:\n f.write(output)", "def write_cache(self, filename=None):\n with open(self._cache_filename(filename), \"wb\") as fh:\n pickle.dump(self, fh)", "def data(self):\n if self._data is None:\n try:\n with open(self.storage_path, 'r') as cache_file:\n self._data = json.load(cache_file)\n except FileNotFoundError:\n self._data = {}\n return self._data", "def get_cache_path(self):", "def get_cache_path(self):", "def output(self):\n return self.__cache", "def _store_cache(self):\n assert self._already_generated, \"Must generate before storing to cache\"\n\n if self.variant_unit is not None:\n logger.warning(\"Cannot cache once variant_unit has been set\")\n return\n\n try:\n os.mkdir(os.path.dirname(self._cache_key))\n except FileExistsError:\n # Easier than checking and risking race conditions\n pass\n\n with open(self._cache_key, 'w') as f:\n json.dump(self.rows, f)\n\n logger.debug(\"Stored cache to {}\".format(self._cache_key))", "def cache_handler(event, context):\n events.cache()", "def disable_caching(self):\n\n def after_request(r: flask.Response):\n if 'Cache-Control' not in r.headers:\n r.headers['Cache-Control'] = 'no-store'\n return r\n\n self.after_request(after_request)", "def flush_cache(self):\n if self.cache_modified:\n self.cache_manager.write(self.cache_file, self.cache)", "def render_cached(self, cache_key, render_cls, max_age, cache_time=0, *args, **kwargs):\r\n\r\n # Default the cache to be the same as our max age if not\r\n # supplied.\r\n cache_time = cache_time or max_age\r\n\r\n # Postfix the cache key with the subreddit name\r\n # This scopes all the caches by subreddit\r\n cache_key = cache_key + '-' + c.site.name\r\n\r\n # Get the etag and content from the cache.\r\n hit = g.rendercache.get(cache_key)\r\n if hit:\r\n etag, content = hit\r\n else:\r\n # Generate and cache the content along with an etag.\r\n content = render_cls(*args, **kwargs).render()\r\n etag = '\"%s\"' % datetime.utcnow().isoformat()\r\n g.rendercache.set(cache_key, (etag, content), time=cache_time)\r\n\r\n # Check if the client already has the correct content and\r\n # throw 304 if so. Note that we want to set the max age in the\r\n # 304 response, we can only do this by using the\r\n # pylons.response object just like the etag_cache fn does\r\n # within pylons (it sets the etag header). Setting it on the\r\n # c.response won't work as c.response isn't used when an\r\n # exception is thrown. Note also that setting it on the\r\n # pylons.response will send the max age in the 200 response\r\n # (just like the etag header is sent in the response).\r\n response.headers['Cache-Control'] = 'max-age=%d' % max_age\r\n etag_cache(etag)\r\n\r\n # Return full response using our cached info.\r\n c.response.content = content\r\n return c.response", "def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']", "def process_response(self, req, resp, resource, req_succeeded):\n\n # Step 1: for 'rest-based' and 'rest&time-based' eviction strategies the\n # POST/PATCH/PUT/DELETE calls are never cached and even more they\n # invalidate the record cached by the GET method\n if self.cache_config['CACHE_EVICTION_STRATEGY'] in [CacheEvictionStrategy.rest_based,\n CacheEvictionStrategy.rest_and_time_based] \\\n and req.method.upper() in [HttpMethods.POST,\n HttpMethods.PATCH,\n HttpMethods.PUT,\n HttpMethods.DELETE]:\n # get the cache key created by the GET method (assuming there was one)\n key = self.generate_cache_key(req, method='GET')\n self.cache.delete(key)\n return\n\n # Step 2: if it is marked to be cached, but has not yet been cached\n # then we cache it\n if hasattr(req.context, 'cache') and req.context.cache \\\n and (not hasattr(req.context, 'cached') or not req.context.cached):\n key = self.generate_cache_key(req)\n value = self.serialize(req, resp, resource)\n\n # for the REST-based strategy there is no timeout, the cached record never expires\n if self.cache_config['CACHE_EVICTION_STRATEGY'] in [CacheEvictionStrategy.rest_based]:\n # timeout 0 - never expires\n timeout = 0\n else:\n # for the time-based and rest-and-time-based eviction strategy the\n # cached record expires\n timeout = req.context.cache_timeout if hasattr(req.context, 'cache_timeout') else 600\n\n self.cache.set(key, value, timeout=timeout)", "def _invalidate_http_cache(self):\n self._requests_cache = {}", "def save(cache_file: Path, content: Any, *, mode: str = None):\n if mode == 'binary':\n cache_file.write_bytes(content)\n else:\n if mode == 'json':\n content = json.dumps(content, cls=JSONEncoder)\n cache_file.write_text(content)", "def getData(self, local_cache):", "def cache(self):\n return self.__cache", "def cache(self):\n return self.__cache", "def cache(self):\n return self.__cache", "def cache(self):\n return self.__cache", "def dispatch(self, *args, **kwargs):\n cache_allowed = self.is_cache_allowed()\n logging.debug('%s: caching is %s', self.request.path, 'allowed' if cache_allowed else 'NOT allowed', )\n\n response = None\n cache_hit = False\n if cache_allowed: # get from cache\n response = yield self.get_cached()\n cache_hit = True if response is not None else False\n logging.debug('%s: cache %s', self.request.uri, 'HIT' if cache_hit else 'MISS')\n\n if response is None: # get actual\n response = yield self.proxy_async_request()\n\n if cache_allowed:\n if 200 <= response.code <= 299: # store into cache\n yield self.set_cache(response)\n logging.debug('%s: status %d - stored in cache', self.request.uri, response.code)\n else:\n logging.debug('%s: error status %d', self.request.uri, response.code)\n\n # output proxied response\n self.process_response(response)\n self.finish()\n\n if cache_allowed:\n if cache_hit: # renew cache if cache hit\n yield self.renew_cache(self.proxy_async_request)\n logging.debug('%s: slow endpoint, cache %s', self.request.path, 'updated' if cache_hit else 'NOT updated')", "def prefill_cache():\n print(\"Prefilling cache.\")\n print(\"\\rListing ATS files...\", end=\"\")\n file_names = list(ats_files())\n print(\"\\rListing ATS files: done.\")\n index = 0\n files_count = len(file_names)\n cached_count = 0\n for file_name in file_names:\n index += 1\n print(\"\\rHandling ATS file #%i of %i\" % (index, files_count), end=\"\")\n if get_json(file_name) is not None:\n cached_count += 1\n print(\"\\nDone: %i file(s) cached.\" % cached_count)", "def get_cache(self):\n return self.cache", "def get_response_from_cache(responsefile):\n global __response_cache\n\n if responsefile not in __response_cache:\n return\n\n if not goodfile(responsefile):\n try:\n del __response_cache[responsefile]\n except KeyError: # pragma: no cover\n pass\n return\n\n modtime = str(os.path.getmtime(responsefile))\n if modtime not in __response_cache.get(responsefile, {}):\n return\n\n log.debug(\"Retrieving data from response file (%s) in cache\" %\n responsefile)\n return __response_cache.get(responsefile, {}).get(modtime)", "def load_from_cache(self):\n try:\n with open(self.cache_filename, 'r') as cache:\n json_data = cache.read()\n data = json.loads(json_data)\n except IOError:\n data = {'data': {}, 'inventory': {}}\n\n self.data = data['data']\n self.inventory = data['inventory']", "def get(self):\n CACHE_KEY = 'topics'\n if not memcache.get(CACHE_KEY):\n logging.info('Populating cache.')\n topics = Topic.all().order('name')\n topic_list = []\n for topic in topics:\n topic_list.append(topic.ToDict())\n memcache.add(CACHE_KEY, simplejson.dumps(topic_list), 600)\n logging.info('Using cache.')\n logging.info(memcache.get(CACHE_KEY))\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(memcache.get(CACHE_KEY))", "def json_to_cache(new_json: Dict, file_name: str) -> None:\n\n json_path = os.path.join(CACHE_DIR, file_name)\n ensure_dir(json_path)\n with open(json_path, \"w\") as outfile:\n json.dump(new_json, outfile, ensure_ascii=False)", "def cache(self):\n return f'var/cache/{self.environment}'", "def cache_response(hash_v, response, show_progress=True):\n f_name = os.path.join(CACHE_DIR, hash_v)\n if os.path.isfile(f_name):\n response = open(f_name).read()\n return response\n elif response is not None:\n with open(f_name, \"w+\") as out_f:\n if show_progress:\n response = tqdm.tqdm(response)\n out_f.write(\"\".join(response))\n return open(f_name).read()", "def write_cache(self, write_cache: SmartSsdReadLookahead):\n\n self._write_cache = write_cache", "def test_cache_retrieved(self):\n read = self.client.get(\"/read/froLit/jns915/jns1856/ciham-fro1/1\")\n data = read.data.decode()\n self.assertIn(\n '<span class=\"expan\">et </span>', data,\n \"Text content should be transformed\"\n )\n self.assertIn(\n 'Facsimilaire', data,\n \"Other content should be added\"\n )\n\n cached = self.cache.get(\"urn:cts:froLit:jns915.jns1856.ciham-fro1:1\").decode()\n self.assertIn('<aside class=\"text-left\">', cached, \"Assert cache is made\")\n\n with mock.patch(\"nemo_xslttwo_plugin.shell\") as shell:\n read = self.client.get(\"/read/froLit/jns915/jns1856/ciham-fro1/1\")\n cached_response = read.data.decode()\n self.assertEqual(\n cached_response, data,\n \"Text content should the same in cache\"\n )\n self.assertEqual(\n shell.call_count, 0,\n \"Shell should not be called because we use cache\"\n )", "def store_cache(base_url, data, path=\"logs/\"):\n\n # Convert URL to filename and write html content into that file\n url_filename = url_to_filename(base_url)\n filename = f\"{path}CACHE-{url_filename}.html\"\n f = open(filename, \"w+\")\n f.write(data)\n f.close()", "def cache_key(self):\n\n return \"{}.json\".format(self.path)", "def cache_results(self):\n self.cache_manager.cache_results(\n self.parser,\n self.query,\n self.search_engine_name,\n self.scrape_method,\n self.page_number,\n db_lock=self.db_lock\n )", "def docache(minutes=5, content_type='application/json; charset=utf-8'):\n def fwrap(f):\n @wraps(f)\n def wrapped_f(*args, **kwargs):\n r = f(*args, **kwargs)\n then = datetime.now() + timedelta(minutes=minutes)\n rsp = Response(r, content_type=content_type)\n rsp.headers.add('Expires', then.strftime(\"%a, %d %b %Y %H:%M:%S GMT\"))\n rsp.headers.add('Cache-Control', 'public,max-age=%d' % int(60 * minutes))\n return rsp\n return wrapped_f\n return fwrap", "def load_cache():\n return {}", "async def get_result(request):\n job_id = request.match_info['job_id']\n r = redis.Redis(\n host=os.environ['REDIS_HOST'],\n port=6379,\n decode_responses=True,\n )\n if not r.exists(job_id):\n return web.HTTPNotFound(text='Results are unavailable.')\n output_id = r.get(job_id)\n filename = output_id + '.json'\n try:\n with open(os.path.join(CACHE_DIR, filename), 'r') as f:\n response = json.load(f)\n except FileNotFoundError:\n # Redis is out-of-sync with file system. Remove the offending key.\n r.delete(job_id)\n return web.HTTPNotFound(text='Results are unavailable.')\n return web.json_response(response, dumps=functools.partial(json.dumps, indent=4))", "def _encode_ocsp_response_cache(ocsp_response_cache, ocsp_response_cache_json):\n logger = getLogger(__name__)\n logger.debug('encoding OCSP reponse cache to JSON')\n for hkey, (current_time, ocsp_response) in \\\n ocsp_response_cache.items():\n k = b64encode(der_encoder.encode(_encode_cert_id_key(hkey))).decode(\n 'ascii')\n v = b64encode(ocsp_response).decode('ascii')\n ocsp_response_cache_json[k] = (current_time, v)", "def _cache_response(self, packet):\n self.operator.update_message(packet.message_id, packet.from_node, packet.ret_parameters)", "def hook(response, *args, **kwargs):\r\n if not getattr(response, 'from_cache', False):\r\n throttle.wait(response.url)\r\n print('Downloading1:', response.url)\r\n else:\r\n print('Returning from cache:', response.url)\r\n return response", "def handle_request(request, client, stat):\n\n # Parse headers\n print(\"Request:\"+request)\n headers = request.split('\\n')\n re = Request(headers=headers)\n\n # Fazer log do sucedido\n add_log(client.getpeername(), re.path)\n\n # Ir buscar a cache o ficheiro\n response = stat.get_from_cache(re.path)\n\n if response is not None:\n return response\n\n # Se nao houver a resposta em cache entao vai criar uma nova\n time.sleep(0.1)\n\n try:\n if re.isPrivate():\n response = Response(status=\"HTTP/1.0 403 Forbidden\", body=\"Private link\",\n connectionType=\"close\")\n stat.add_cache(response, re.path)\n return response\n\n body = re.getContent()\n\n # Restorna so o cabeçalho\n if re.verbo == \"HEAD\":\n response = Response(status=\"HTTP/1.0 200 OK\", connectionType=\"close\", length=len(body))\n stat.add_cache(response, re.path)\n return response\n\n # If is a Bad Request\n except PermissionError:\n response = Response(status=\"HTTP/1.0 400 Bad Request\", body=\"Bad Request\", connectionType=\"close\")\n stat.add_cache(response, re.path)\n return response\n\n # If the file doesn't exist\n except FileNotFoundError:\n response = Response(status=\"HTTP/1.0 404 Not Found\", body=\"File Not Found\", connectionType=\"close\")\n stat.add_cache(response, re.path)\n return response\n\n # Return response\n response = Response(status=\"HTTP/1.0 200 OK\", body=body, contentType=re.filetype,\n connectionType=re.connectionType, length=len(body))\n stat.add_cache(response, re.path)\n return response", "def cache(self, expire = 0, namespace_func = None):\n from google.appengine.api import memcache\n \n def decorate(func, *args, **kws):\n \"\"\"\n A function returned as a object in load time,\n which returns inner function do_decorate().\n \"\"\"\n # setting cache expires for given decorated function,\n # if argument 'expire' is given.\n if expire:\n self.cache_expires[func] = expire\n else:\n self.cache_expires[func] = self.get_config().page_cache_expire\n if namespace_func:\n self.cache_nsfuncs[func] = namespace_func\n\n def do_cache(*args, **kws):\n \"\"\"\n A function works every time decorated functions are called.\n \"\"\"\n resp = self.response\n out = resp.out\n namespace = ''\n if self.cache_nsfuncs.get(func, None):\n namespace = self.cache_nsfuncs[func](self.request)\n p = urlsplit(self.request.url)[2]\n c = memcache.get(p, namespace)\n if c:\n # in case cache is found, use it \n # instead of rendering by calling function.\n out.write(c['body'])\n for k, i in c['hdr'].items():\n resp.headers[k] = i\n return\n\n r = func(*args, **kws)\n expire = self.cache_expires.get(func, 0)\n if expire == 0:\n return\n out.seek(0)\n try:\n p = urlsplit(self.request.url)[2]\n memcache.set(p, {'hdr':resp.headers,'body':out.read()},\n expire, namespace=namespace)\n logging.debug('%s is cahed' % p)\n except:\n memcache.flush_all()\n logging.debug('memcache is flashed.')\n return do_cache\n\n return decorate", "def files():\n return get_cached(\"files.json\")", "def SetCacheHeaders(self, response):\n headers = framework_helpers.StaticCacheHeaders()\n for name, value in headers:\n response.headers[name] = value", "def set_cache_data(self) -> None:\n if isinstance(self.tx_storage, TransactionCacheStorage):\n hits = self.tx_storage.stats.get(\"hit\")\n misses = self.tx_storage.stats.get(\"miss\")\n if hits:\n self.transaction_cache_hits = hits\n if misses:\n self.transaction_cache_misses = misses", "def cache_question(question, answers):\n if config.CACHE_QUESTION:\n with codecs.open(config.QUESTION_CACHE_FILE, \"a\", \"utf-8\") as out:\n out.write(json.dumps({\n str(uuid.uuid4()): {'question': question, 'answers': answers}\n }, ensure_ascii=False) + '\\n')", "def cached_json_get(url):\n return requests.get(url).json()" ]
[ "0.7289211", "0.71144605", "0.7006052", "0.68958193", "0.688203", "0.68352675", "0.66610754", "0.6637904", "0.6591506", "0.6493748", "0.64900154", "0.6430256", "0.6393354", "0.62931097", "0.6260045", "0.62439954", "0.62336665", "0.6196039", "0.6179128", "0.615851", "0.61500543", "0.61453485", "0.6140379", "0.6133785", "0.6130145", "0.6109877", "0.6098252", "0.6089862", "0.6065006", "0.6062995", "0.6057159", "0.6056973", "0.60474086", "0.6023768", "0.60209674", "0.6003611", "0.59855527", "0.59817237", "0.59719753", "0.59591275", "0.59346867", "0.5925691", "0.5925691", "0.5925119", "0.59235394", "0.5915061", "0.5898588", "0.5895782", "0.58558357", "0.5854214", "0.5851331", "0.5828313", "0.58140534", "0.58135486", "0.5805387", "0.5802552", "0.58006793", "0.58006793", "0.5798189", "0.5778573", "0.57740337", "0.5770716", "0.57647413", "0.57484955", "0.570838", "0.57004654", "0.5670589", "0.5658507", "0.5627481", "0.5623727", "0.5623727", "0.5623727", "0.5623727", "0.5619225", "0.5607732", "0.55960846", "0.5594986", "0.5585396", "0.5584001", "0.55809104", "0.55722284", "0.5567662", "0.5565096", "0.5563966", "0.5554732", "0.5553752", "0.5548086", "0.5545767", "0.55446744", "0.55399567", "0.5533345", "0.5522744", "0.55196846", "0.55103093", "0.5493272", "0.54738706", "0.547178", "0.5451215", "0.54449755", "0.5441044" ]
0.83422124
0
Method processing the json content of a request, and returning a valid RestoRequestResult.
def process_json_result(self, json_result: dict) -> RestoRequestResult: try: resto_response = self.resto_response_cls(self, json_result) except RestoResponseError: msg = 'Response to {} from {} resto server cannot be understood.' # TOOD: move elsewhere ? raise IncomprehensibleResponse(msg.format(self.get_server_name())) return resto_response.as_resto_object()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _process(self, request, processor, operation):\n \n params = request.REQUEST.get('json', None)\n if params is None :\n params = ApiFacade._convertMergeDict(request.REQUEST)\n else :\n params=params\n params = json.loads(params)\n \n resp_body = processor.call(operation, params, 'json')\n return HttpResponse(resp_body, mimetype='application/json')", "def process_request(self, request):\n\n # Does the request contain a JSON payload?\n content_type = request.META.get('CONTENT_TYPE', '')\n if content_type != '' and 'application/json' in content_type:\n\n # Ignore empty payloads (e.g. for deletes)\n content_length = 0\n if request.META.get('CONTENT_LENGTH', '') != '':\n content_length = int(request.META.get('CONTENT_LENGTH', 0))\n if content_length > 0:\n try:\n # Replace request.POST with flattened dictionary from JSON\n decoded_dict = simplejson.loads(request.raw_post_data)\n request.POST = request.POST.copy()\n request.POST = self._flatten_dict(decoded_dict)\n except:\n return HttpResponse('Invalid JSON', status=400)", "def request_json(self):\n try:\n return json.loads(self.request.body)\n except Exception:\n logging.info('No JSON payload in request body.')\n return {}", "def _invoke(self):\n\n # Accepting both ContentType and Content-Type headers. ContentType because Coral and Content-Type because,\n # well, it is just the html standard\n input_content_type = request.headers.get('ContentType', request.headers.get('Content-Type', JSON_CONTENT_TYPE))\n requested_output_content_type = request.headers.get('Accept', JSON_CONTENT_TYPE)\n\n # utf-8 decoding is automatic in Flask if the Content-Type is valid. But that does not happens always.\n content = request.get_data().decode('utf-8') if input_content_type in UTF8_CONTENT_TYPES else request.get_data()\n\n try:\n response_data, output_content_type = \\\n self.transformer.transform(content, input_content_type, requested_output_content_type)\n # OK\n ret_status = 200\n except Exception as e:\n ret_status, response_data = self._handle_invoke_exception(e)\n output_content_type = JSON_CONTENT_TYPE\n\n return Response(response=response_data,\n status=ret_status,\n mimetype=output_content_type)", "def reach_process_json():\n response = request.body.read().decode('utf-8')\n body = json.loads(response)\n json_str = body.get('json')\n rp = reach.process_json_str(json_str)\n if rp and rp.statements:\n stmts = stmts_to_json(rp.statements)\n res = {'statements': stmts}\n return res\n else:\n res = {'statements': []}\n return res", "def opt_engine_rest_api():\n request_json = request.get_json()\n return process_request(request_json)", "def execute(self) -> typing.Dict[str, typing.Any]:\n headers = {\n \"User-Agent\": \"{zenora.__name__} {zenora.__version__}\",\n \"Authorization\": f\"{self.token}\",\n }\n if self.headers:\n headers = self.headers\n\n if self.json:\n r = requests.request(\n method=self.method,\n url=self.url,\n headers=headers,\n json=self.json,\n )\n else:\n r = requests.request(\n method=self.method,\n url=self.url,\n headers=headers,\n data=self.form_data,\n )\n\n return raise_error_or_return(r) # type: ignore[return-value]", "def _process_input(data, context):\n if context.request_content_type == 'application/json':\n data = data.read().decode(\"utf-8\")\n return data if len(data) else ''\n raise ValueError('{{\"error\": \"unsupported content type {}\"}}'.format(\n context.request_content_type or \"unknown\"\n ))", "def ingest_json_body(request):\n # log.debug(request.body)\n try:\n data = json.loads(str(request.body, encoding='utf-8'))\n except Exception as e:\n log.error(log.exc(e))\n return None\n return data", "def _perform_request(self, req):\n \n res = req.content\n x = json.loads(res, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))\n return x", "def run_request(self, request=None):\r\n if request is None:\r\n request = self.request\r\n\r\n response = change_email_request(self.request)\r\n return json.loads(response.content)", "def _get_request_body(_request):\n return _request.json", "def _parse_request(self):\n if len(self.request.body) > 0:\n try:\n return tornado.escape.json_decode(self.request.body)\n except Exception:\n #Not Json, Using Form data\n return self.request.arguments\n else:\n return self.request.arguments", "def make_request(self, request_type: RequestTypes, payload: dict, url_extras: [str] = []) -> json:\n s = requests.Session()\n s.headers.update({\n \"Authorization\": \"Bearer %s\" % self.access_token,\n \"Content-Type\": \"application/json\"\n })\n url = self.url_base + self.get_url_end_string(url_extras)\n #print(url)\n if request_type == RequestTypes.POST:\n response = s.post(url, json=payload)\n elif request_type == RequestTypes.GET:\n response = s.get(url, json=payload)\n else:\n print(\"ERROR: '\" + request_type + \"' is not a valid request type\")\n exit(1)\n response_json = response.json()\n self.validate_response(response_json)\n return response_json", "def processRequest( self,json, data, headers, params ):\n\n retries = 0\n result = None\n\n while True:\n\n response = requests.request( 'post', self._url, json = json, data = data, headers = headers, params = params )\n\n if response.status_code == 429: \n\n print( \"Message: %s\" % ( response.json()['error']['message'] ) )\n\n if retries <= self._maxNumRetries: \n time.sleep(1) \n retries += 1\n continue\n else: \n print( 'Error: failed after retrying!' )\n break\n\n elif response.status_code == 200 or response.status_code == 201:\n\n if 'content-length' in response.headers and int(response.headers['content-length']) == 0: \n result = None \n elif 'content-type' in response.headers and isinstance(response.headers['content-type'], str): \n if 'application/json' in response.headers['content-type'].lower(): \n result = response.json() if response.content else None \n elif 'image' in response.headers['content-type'].lower(): \n result = response.content\n else:\n print( \"Error code: %d\" % ( response.status_code ) )\n print( \"Message: %s\" % ( response.json()['error']['message'] ) )\n\n break\n\n return result", "def _process_request(self, request_type, params, marker_elems=None):\r\n response = self.make_request(request_type, params, verb='POST')\r\n return self._process_response(response, marker_elems)", "def _process_request(self, request):\n try:\n self._validate_rpc_request(request)\n except ValueError as err:\n return self._build_rpc_error(None, RpcErrors.INVALID_REQUEST, err, keep_null_id=True)\n\n id = request.get('id', None)\n\n try:\n method = getattr(rpc, request['method'])\n except AttributeError as err:\n return self._build_rpc_error(id, RpcErrors.METHOD_NOT_FOUND, err)\n\n try:\n params = request.get('params', None)\n if params is None:\n result = method()\n elif isinstance(params, list):\n result = method(*params)\n elif isinstance(params, dict):\n result = method(**params)\n\n return self._build_rpc_result(id, result)\n\n except TypeError as err:\n return self._build_rpc_error(id, RpcErrors.INVALID_PARAMS, err)\n except Exception as err:\n return self._build_rpc_error(id, RpcErrors.INTERNAL_ERROR, err)", "def preprocessRequest(self, route):\n request.jsonData = None\n\n if not request.headers.get(\"Content-Type\", \"\").startswith(\"application/json\"):\n # there is no JSON posted, so we can return\n self.logger.debug(\"No JSON to decode; finished\")\n return\n\n # JSON is expected, so ensure it is either already parsed by bottle, or parse it ourselves\n if hasattr(request, \"json\") and request.json is not None:\n # It is already parsed, so there's nothing to do\n self.logger.debug(\"JSON data already parsed by bottle\")\n request.jsonData = request.json\n return\n\n self.logger.debug(\"Attempting to parse JSON from request.body since request.json is missing/None\")\n # ensure some data was actually POSTed\n if hasattr(request, \"body\") and request.body:\n try:\n # TODO: set encoding based on request header\n request.jsonData = json.load(request.body)\n self.logger.debug(\"Decoded JSON successfully\")\n except Exception, e:\n self.logger.warn(\"Request header Content-Type indicates JSON, and we failed to parse request.body: %s\" % e)\n request.body.seek(0)\n self.logger.warn(\"Request body (first 32bytes)=%s\" % repr(request.body.read(32)))\n else:\n self.logger.warn(\"Request header Content-Type indicates JSON, but no data was POSTed?\")", "def process(self, response):\r\n if response.status_code == 200:\r\n json = response.json()\r\n if type(json) == list:\r\n return json\r\n else:\r\n return self.processData(json)\r\n else:\r\n pass\r\n # Raise Exception\r", "def parse (self, request):\n\n data = {}\n body_start = request.find('\\r\\n\\r\\n')\n if body_start == -1:\n data['body'] = None\n else:\n data['body'] = request[body_start+4:]\n parts = request.split(' ', 2)\n data['method'] = parts[0]\n data['resource'] = parts[1]\n return (data)", "def rest_api_request_handler(self, request_type):\n result = {}\n success_code = 0\n with self.resource_lock:\n if request_type == self.RestRequest.REST_MUTS:\n result = self.muts # Returns MUTs\n elif request_type == self.RestRequest.REST_TEST_SPEC:\n result = self.test_spec # Returns Test Specification\n elif request_type == self.RestRequest.REST_TEST_RESULTS:\n pass # Returns test results\n else:\n success_code = -1\n return json.dumps(self.get_rest_result_template(result, 'request/' + request_type, success_code), indent=4)", "async def parse_handle_request(self, json_command):\n try:\n # Check signature\n vasp = self.vasp\n other_key = vasp.info_context.get_peer_compliance_verification_key(\n self.other_address_str\n )\n\n message = await other_key.verify_message(json_command)\n request = json.loads(message)\n\n # Parse the request whoever necessary.\n request = CommandRequestObject.from_json_data_dict(\n request, JSONFlag.NET\n )\n\n # Going ahead to process the request.\n logger.debug(\n f'(other:{self.other_address_str}) '\n f'Processing request seq #{request.cid}',\n )\n response = self.handle_request(request)\n\n except OffChainInvalidSignature as e:\n logger.warning(\n f'(other:{self.other_address_str}) '\n f'Signature verification failed. OffChainInvalidSignature: {e}'\n )\n response = make_parsing_error(f'{e}', code=OffChainErrorCode.invalid_signature)\n\n except JSONParsingError as e:\n logger.error(\n f'(other:{self.other_address_str}) JSONParsingError: {e}',\n exc_info=True,\n )\n response = make_parsing_error()\n except Exception as e:\n logger.error(\n f'(other:{self.other_address_str}) exception: {e}',\n exc_info=True,\n )\n raise e\n\n # Prepare the response.\n full_response = await self.package_response(response)\n return full_response", "def parse_request(self, request):\n request.process_inputs()", "def processRequest( json, data, headers, params ):\n\n retries = 0\n result = None\n\n while True:\n\n response = requests.request( 'post', _url, json = json, data = data, headers = headers, params = params )\n\n if response.status_code == 429: \n\n print( \"Message: %s\" % ( response.json()['error']['message'] ) )\n\n if retries <= _maxNumRetries: \n time.sleep(1) \n retries += 1\n continue\n else: \n print( 'Error: failed after retrying!' )\n break\n\n elif response.status_code == 200 or response.status_code == 201:\n\n if 'content-length' in response.headers and int(response.headers['content-length']) == 0: \n result = None \n elif 'content-type' in response.headers and isinstance(response.headers['content-type'], str): \n if 'application/json' in response.headers['content-type'].lower(): \n result = response.json() if response.content else None \n elif 'image' in response.headers['content-type'].lower(): \n result = response.content\n else:\n print( \"Error code: %d\" % ( response.status_code ) )\n print( response.content )\n print( \"Message: %s\" % ( response.json()['error']['message'] ) )\n\t\t\t\n break\n \n return result", "def __call__(self, request):\n\n if request.method == 'POST':\n response = self.process_request(request)\n\n else:\n response = self.get_smd(request.get_full_path())\n\n return json.dumps(response)", "def process_request(self, request):\n raise NotImplementedError('process_request not implemented in BaseService')", "def get_content_or_400(a_request):\n\n request_json = request.get_json()\n if not request_json or 'content' not in request_json:\n raise InvalidAPIUsage(\"content is required\")\n return request_json['content']", "def parse(self, request):\n result = {}\n if request.method.lower() == 'post':\n params = request.get_json(\n cache=False) if request.mimetype == 'application/json' else request.form\n else:\n params = request.args\n for arg in self.args:\n if arg.name in params:\n if arg.type is not None and type(params[arg.name]) != arg.type:\n try:\n result[arg.name] = arg.coerce(params[arg.name])\n except Exception as err:\n current_app.logger.warning('Coercion failed for param: {}'.format(arg.name))\n raise ApiError('Coercion failed for param: {}'.format(arg.name), 'ArgsParserException', 1, status_code=400)\n abort(400)\n else:\n result[arg.name] = params[arg.name]\n elif arg.required:\n current_app.logger.warning(\"Missing required param: {}\".format(arg.name))\n raise ApiError('Missing required param: {}'.format(arg.name), 'ArgsParserException', 2, status_code=400)\n abort(400)\n else:\n result[arg.name] = arg.default\n return result", "async def analyze_body(\n self,\n input: Optional[Union[IO, \"models.SourcePath\"]] = None,\n **kwargs\n ) -> str:\n cls = kwargs.pop('cls', None) # type: ClsType[str]\n error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop('error_map', {}))\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n\n # Construct URL\n url = self.analyze_body.metadata['url'] # type: ignore\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = 'application/json'\n\n body_content_kwargs = {} # type: Dict[str, Any]\n if header_parameters['Content-Type'].split(\";\")[0] in ['application/pdf', 'image/jpeg', 'image/png', 'image/tiff']:\n body_content_kwargs['stream_content'] = input\n elif header_parameters['Content-Type'].split(\";\")[0] in ['application/json']:\n if input is not None:\n body_content = self._serialize.body(input, 'SourcePath')\n else:\n body_content = None\n body_content_kwargs['content'] = body_content\n else:\n raise ValueError(\n \"The content_type '{}' is not one of the allowed values: \"\n \"['application/pdf', 'image/jpeg', 'image/png', 'image/tiff', 'application/json']\".format(header_parameters['Content-Type'])\n )\n request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)\n\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response)\n\n deserialized = self._deserialize('str', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def post_algorithm():\n try:\n request_json = request.get_json()\n result = json.dumps([])\n response = app.response_class(\n response=result,\n status=200,\n mimetype='application/json')\n except ValueError as e:\n response = app.response_class(\n status=400,\n response=str(e)\n )\n return response", "def process_request(json, data, headers, params):\n retries = 0\n result = None\n\n while True:\n response = requests.request(\"post\", _url, json=json,\n data=data, headers=headers, params=params)\n if response.status_code == 429:\n if retries <= _maxNumRetries:\n time.sleep(1)\n retries += 1\n continue\n else:\n print(\"Error: failed after retrying!\")\n break\n elif response.status_code == 200 or response.status_code == 201:\n if (\"content-length\" in response.headers and\n int(response.headers[\"content-length\"]) == 0):\n result = None\n elif (\"content-type\" in response.headers and\n isinstance(response.headers[\"content-type\"], str)):\n if (\"application/json\" in\n response.headers[\"content-type\"].lower()):\n result = response.json() if response.content else None\n elif \"image\" in response.headers[\"content-type\"].lower():\n result = response.content\n else:\n print(\"Error code: %d\" % response.status_code)\n print(\"Message: %s\" % response.json()[\"error\"][\"message\"])\n break\n return result", "def process_request(input_: dict) -> dict:\n\n output = {\n \"Time\": datetime.datetime.now(),\n \"Source\": \"ecommerce.users\",\n \"Resources\": [input_[\"userName\"]],\n \"DetailType\": \"UserCreated\",\n \"Detail\": json.dumps({\n \"userId\": input_[\"userName\"],\n \"email\": input_[\"request\"][\"userAttributes\"][\"email\"]\n }),\n \"EventBusName\": EVENT_BUS_NAME\n }\n\n return output", "def process_request(self, req, resp):\n if req.method == \"GET\" or req.method == \"POST\":\n log.info((thisFilename, inspect.currentframe().f_code.co_name, req.path, \"params\", str(req.params)))\n if req.method == \"POST\":\n log.info((thisFilename, inspect.currentframe().f_code.co_name, req.path, \"media\", str(req.media)))\n if req.method != \"OPTIONS\":\n if req.path not in ignoreProcessRequestForPath:\n if \"kartoon-fapi-incoming\" not in req.params:\n resp.media = {\n \"responseId\": 103,\n \"message\": \"Invalid request\"\n }\n # exit request\n resp.complete = True\n else:\n req.params[\"kartoon-fapi-incoming\"] = json.loads(req.params[\"kartoon-fapi-incoming\"])\n if req.params[\"kartoon-fapi-incoming\"][\"secretKey\"] != FapiToBapiSecret:\n resp.media = {\n \"responseId\": 109,\n \"message\": \"Unauthorized access\"\n }\n # exit request\n resp.complete = True", "def _postproc(self, request):\n if request.status_code != 200: raise Exception('wrong error code: {0}'.format(request.status_code))\n data = request.json()\n self.data = self._finalize_data(data)", "def handle(self, content):\n # Check the API request\n serializer = ApiRequestSerializer(data=content)\n if not serializer.is_valid():\n return self.consumer.send_to_client(\n {\"topic\": \"api\", \"type\": \"error\", \"message\": \"invalid-request\"}\n )\n\n # Make request\n method = serializer.validated_data[\"method\"]\n url = serializer.validated_data[\"url\"]\n payload = serializer.validated_data.get(\"payload\", None)\n logger.info(\"API {}:{}:{}\".format(method, url, payload))\n\n response = getattr(self.client, method)(url, data=payload, follow=True)\n\n # Return to client\n # API response\n to_client = {\n \"topic\": \"api\",\n \"type\": \"response\",\n \"status_code\": response.status_code,\n }\n if response.get(\"Content-Type\") == \"application/json\":\n to_client[\"content\"] = response.json()\n else:\n to_client[\"content\"] = content\n\n # Original request params\n to_client.update({\"method\": method, \"url\": url})\n if payload is not None:\n to_client[\"payload\"] = payload\n\n self.consumer.send_to_client(to_client)", "def process_request(self, request):\n self.req = request\n command = self.get_command()\n file_handler = filehandler.FileHandler(command)\n file_handler.handle_command()\n return command.result", "def process_json(self, data):\r\n rsp = json.loads(data)\r\n\r\n if rsp['stat'] == 'fail':\r\n raise APIError, rsp\r\n\r\n return rsp", "def process_gateway_request(self, service_name=None, operation=None, id_param=None):\n # TODO make this service smarter to respond to the mime type in the request data (ie. json vs text)\n self._log_request_start(\"SVC RPC\")\n try:\n result = self._make_service_request(service_name, operation, id_param)\n return self.gateway_json_response(result)\n\n except Exception as ex:\n return self.gateway_error_response(ex)\n\n finally:\n self._log_request_end()", "def __exec_request(self, URL) -> Any:\n headers = {\n \"X-ELS-APIKey\": self.config['apikey'],\n \"Accept\": 'application/json'\n }\n\n request = requests.get(\n URL,\n headers=headers\n )\n self._status_code = request.status_code\n\n if request.status_code == 200:\n return json.loads(request.text, strict=False)\n else:\n return \"failed\"", "def _getFromBody(self, request):\n assert self.error is None\n assert isinstance(request.body, str)\n try:\n json_rpc_request_dict = loads(request.body)\n if isinstance(json_rpc_request_dict, list):\n self.list = json_rpc_request_dict\n return\n except ValueError, e:\n error(\"failed to parse JSON object in the body\")\n self.error = JsonRpcError.PARSE_ERROR\n return\n for k, v in json_rpc_request_dict.iteritems():\n if k == \"jsonrpc\":\n self.jsonrpc = v\n if k == \"method\":\n self.method = v\n if k == \"id\":\n self.id = v\n if k == \"params\":\n self.params = v\n self.dict[k] = v", "def __call__(self, request):\n response = self.get_request(request)\n return response", "def _make_request(self):\n response = urllib2.urlopen(\n url=self.api_url,\n data=self._get_request_data()\n )\n content = response.read()\n return json.loads(content.decode('utf8'))", "def _parse_json(req, resp):\n try:\n body = req.stream.read()\n return json.loads(body)\n except ValueError as e:\n err_msg = str(e) + ': ' + body\n resp.status = falcon.HTTP_400\n resp.body = make_error_body(err_msg)\n return", "def parse_request_body(self):\n try:\n request_arguments = self.request.arguments\n if request_arguments:\n new_request_arguments = {\n k: common.my_str(v[0].decode('utf8'))\n for k, v in request_arguments.items()\n }\n return new_request_arguments\n else:\n request_body = self.request.body\n request_data = request_body.decode('utf-8')\n request_data_dict = json.loads(request_data)\n self.request.arguments = {\n k: [str(v)]\n for k, v in request_data_dict.items()\n }\n new_request_arguments = {\n k: common.my_str(v)\n for k, v in request_data_dict.items()\n }\n return new_request_arguments\n except Exception as e:\n raise tornado.web.HTTPError(\n status_code=400, log_message='bad_request: {}'.format(str(e)))", "def return_request(self):\n folder_path = \"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, request.json[\"net_id\"], request.json[\"request_id\"])\n request_submitted_marker = \"{0}request.submitted\".format(folder_path)\n request_processed_marker = \"{0}request.processed\".format(folder_path)\n request_returned_marker = \"{0}request.returned\".format(folder_path)\n request_voided_marker = \"{0}request.voided\".format(folder_path)\n\n if get_user_roles(current_user.net_id)[\"STFADM\"] and path.exists(request_submitted_marker):\n try:\n return_message = request.json[\"return_message\"].strip()\n\n if path.exists(request_processed_marker):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"You must unprocess a request before returning it.\"})\n elif path.exists(request_voided_marker):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has already been voided. Please refresh the page.\"})\n elif path.exists(request_returned_marker):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has already been returned. Please refresh the page.\"})\n\n with open(\"{0}submission.json\".format(folder_path), mode=\"r\") as request_details_json:\n request_details = json.load(request_details_json)\n\n with open(\"{0}submission.json\".format(folder_path), mode=\"w\") as request_details_json:\n date_time = \"{0}\".format(datetime.now()).split()\n\n request_details[\"history\"].append({\"date\": date_time[0],\n \"time\": date_time[1],\n \"action\": \"Returned\",\n \"actor\": {\n \"first_name\": current_user.first_name,\n \"last_name\": current_user.last_name,\n \"email\": current_user.email,\n \"uta_id\": current_user.uta_id\n },\n \"metadata\": {\n \"message\": return_message\n }})\n json.dump(request_details, request_details_json)\n\n with open(request_returned_marker, mode=\"w\") as returned_marker:\n returned_marker.write(\"/n\")\n\n if return_message:\n return_message_html = \"<br><br>Message from {0}:<br>\" \\\n \"<blockquote style='border-left: 3px solid rgb(200, 200, 200); \" \\\n \"border-top-color: rgb(200, 200, 200); border-right-color: \" \\\n \"rgb(200, 200, 200); border-bottom-color: rgb(200, 200, 200); \" \\\n \"padding-left: 1ex; margin-left: 0.8ex; color: rgb(102, 102, 102);'>\" \\\n \"<div style='color: rgb(0, 0, 0);'>{1}</div>\" \\\n \"</blockquote>\".format(current_user.first_name, return_message)\n return_message = \"\\n\\nMessage from {0}:\\n{1}\".format(current_user.first_name, return_message)\n else:\n return_message_html = \"\"\n\n request_date = \"{0:02d}/{1:02d}/{2:04d}\".format(request_details[\"request_date\"][\"month\"],\n request_details[\"request_date\"][\"day\"],\n request_details[\"request_date\"][\"year\"])\n email_subject = \"Reimbursement Request Returned\"\n email_body = app_constants.EMAILS[\"return_request\"][\"text\"].format(\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_date, request_details[\"total_amount\"],\n \"{0}mavapps/\".format(URL_FULL_PATH), request_details[\"requester\"][\"net_id\"],\n request_details[\"folder_name\"], return_message,\n request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"])\n email_body_html = app_constants.EMAILS[\"return_request\"][\"html\"].format(\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_date, request_details[\"total_amount\"],\n \"{0}mavapps/\".format(URL_FULL_PATH), request_details[\"requester\"][\"net_id\"],\n request_details[\"folder_name\"], return_message_html,\n request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"])\n\n if SRV != \"prod\":\n emails = self.__TEST_EMAILS__\n else:\n emails = [[\"{0} {1}\".format(current_user.first_name, current_user.last_name), current_user.email],\n [\"{0} {1}\".format(request_details[\"requester\"][\"first_name\"], request_details[\"requester\"][\"last_name\"]), request_details[\"requester\"][\"email\"]]] \\\n + self.__PROD_EMAILS__\n\n self.mailer.send_mail(emails, email_subject, email_body, email_body_html, from_name=\"CSE Reimbursement App\")\n\n remove(request_submitted_marker)\n\n return jsonify({\"success\": True, \"type\": \"success\", \"message\": \"Request returned to the user successfully.\"})\n except Exception as e:\n print(e)\n return abort(400)\n return abort(403)", "async def rest_handler(request):\n # verify the request\n valid, reason = await verify_rest_request(request)\n if not valid:\n return generate_error(reason, 400)\n json = await request.json()\n # get the parameters\n cmd = json['cmd']\n params = json['params']\n # pass off to the correct target handler\n if cmd == 'find':\n response = await _find_handler(request, params)\n elif cmd == 'stats':\n response = await _stats_handler(request, params)\n elif cmd == 'download':\n response = await _download_handler(request, params)\n elif cmd == 'upload':\n response = await _upload_handler(request, params)\n elif cmd == 'provision':\n response = await _provision_handler(request, params)\n # return the response we get back fgrom the handler\n return response", "def execute(self, only_user_data=False):\n raw_result = self.cli.execute(self.req)\n\n if raw_result[0] == 200:\n if only_user_data:\n return json.loads(raw_result[-1])\n else:\n return json.loads(raw_result)\n else:\n print(\"<module: json_process>, Cannot get data: \", raw_result[0])\n exit(1)", "def parse_request(json_data: str) -> Request:\n logger.debug('Type: {}'.format(type(json_data)))\n data = json.loads(json_data)\n\n return Request(\n data[\"text\"],\n PatternCategory(data[\"previous_pattern\"]\n ) if \"previous_pattern\" in data else None,\n data[\"mood\"],\n data[\"affection\"],\n Gender(data[\"bot_gender\"]),\n data[\"bot_name\"],\n date.fromtimestamp(data[\"bot_birthdate\"]),\n data[\"bot_favorite_color\"],\n data[\"father_name\"],\n data[\"father_age\"],\n data[\"mother_name\"],\n data[\"mother_age\"],\n )", "def json_request(self, data):\r\n json_request = json.dumps(data, indent=4)\r\n\r\n try:\r\n result = urllib2.urlopen(self.webservice, json_request).read()\r\n except urllib2.URLError, e:\r\n return None\r\n\r\n try:\r\n response = json.loads(result)\r\n except ValueError, e:\r\n return None\r\n\r\n return response", "def prepare(self):\n if 'Content-Type' in self.request.headers:\n content_type = self.request.headers['Content-Type']\n if content_type == \"application/json\":\n print 'json data'\n data = self.request.body\n try:\n json_data = json_decode(data)\n except ValueError:\n raise tornado.httpserver._BadRequestException(\n \"Invalid JSON structure.\"\n )\n if type(json_data) != dict:\n raise tornado.httpserver._BadRequestException(\n \"We only accept key value objects!\"\n )\n for key, value in json_data.iteritems():\n self.request.arguments[key] = [unicode(value),]", "def process_request(self, req, resp, resource, params):", "def load_json(self):\n try:\n self.request.arguments = json.loads(self.request.body)\n except ValueError:\n msg = \"Could not decode JSON: %s\" % self.request.body\n logger.debug(msg)\n raise tornado.web.HTTPError(400, msg)", "def load_json(self):\n try:\n self.request.arguments = json.loads(self.request.body)\n except ValueError:\n msg = \"Could not decode JSON: %s\" % self.request.body\n logger.debug(msg)\n raise tornado.web.HTTPError(400, msg)", "def processRequest(url, json, data, headers, params):\n retries = 0\n result = None\n\n while True:\n response = requests.request('post', url, json=json, data=data, headers=headers, params=params)\n if response.status_code == 429:\n print(\"Message: %s\" % (response.json()))\n if retries <= NumRetries:\n time.sleep(1)\n retries += 1\n continue\n else:\n print('Error: failed after retrying!')\n break\n\n elif response.status_code == 200 or response.status_code == 201:\n if 'content-length' in response.headers and int(response.headers['content-length']) == 0:\n result = None\n elif 'content-type' in response.headers and isinstance(response.headers['content-type'], str):\n if 'application/json' in response.headers['content-type'].lower():\n result = response.json() if response.content else None\n elif 'image' in response.headers['content-type'].lower():\n result = response.content\n else:\n print(\"Error code: %d\" % (response.status_code))\n print(\"Message: %s\" % (response.json()))\n\n break\n return result", "def process_request(self, req):\n return None", "def parse_request(req):\n # Parsing out the request body\n data = req.get_json()\n if (data is None or\n 'action' not in data or\n 'task_id' not in data or\n 'release_id' not in data):\n abort(400)\n \n action = data['action']\n task = data['task_id']\n release = data['release_id']\n return action, task, release", "def receiver():\n req_entities = request.get_json()\n output = []\n try:\n for entity in req_entities:\n logger.debug(f'Input entity: {json.dumps(entity)}')\n do_query = True # If do_query is missing from the entity we will do the query anyways.\n if 'do_query' in entity: # Check if entity has do_query key\n do_query = entity['do_query']\n else:\n logger.warning(f'Key \"do_query\" is missing from the input entity! Doing query for EVERY entity.')\n\n if do_query:\n handler = getattr(handlers, variables.handler) # Get the handler from env vars.\n entity = handler(databaseConnection, variables, logger, entity) # Append entity with handler.\n logger.debug(f'Output entity: {json.dumps(entity)}')\n output.append(entity)\n except TypeError as e:\n logger.critical('Wrong type gave error: {}'.format(e))\n except Exception as e:\n logger.critical(f'Error when handling entities:\\n{json.dumps(req_entities)}\\nError message:\\n{e}')\n\n # Generate the response\n try:\n return Response(stream_json(output),\n mimetype='application/json')\n except BaseException as e:\n return Response(status=500, response=f\"An error occured during transform of input. Error: {e}\")", "def check_redemption_request(request: IRequest) -> Optional[bytes]:\n if request.requestHeaders.getRawHeaders(b\"content-type\") != [b\"application/json\"]:\n return bad_content_type(request)\n\n p = request.content.tell()\n content = request.content.read()\n request.content.seek(p)\n\n try:\n request_body = loads(content)\n except ValueError:\n return bad_request(request, None)\n\n expected_keys = {\"redeemVoucher\", \"redeemCounter\", \"redeemTokens\"}\n actual_keys = set(request_body.keys())\n if expected_keys != actual_keys:\n return bad_request(\n request,\n {\n \"success\": False,\n \"reason\": \"{} != {}\".format(\n expected_keys,\n actual_keys,\n ),\n },\n )\n return None", "def process_request(self, net_id, request_id, processed):\n folder_path = \"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, net_id, request_id)\n request_submitted_marker = \"{0}request.submitted\".format(folder_path)\n request_processed_marker = \"{0}request.processed\".format(folder_path)\n request_returned_marker = \"{0}request.returned\".format(folder_path)\n request_voided_marker = \"{0}request.voided\".format(folder_path)\n request_submitted = path.exists(request_submitted_marker)\n\n if get_user_roles(current_user.net_id)[\"STFADM\"] and ((request_submitted and str_2_bool(processed)) or (not request_submitted and not str_2_bool(processed))):\n date_time = \"{0}\".format(datetime.now()).split()\n\n if path.exists(request_voided_marker):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has been voided. Please refresh the page.\"})\n elif path.exists(request_returned_marker):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has been returned. Please refresh the page.\"})\n elif path.exists(request_processed_marker) and str_2_bool(processed):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has already been processed. Please refresh the page.\"})\n elif not path.exists(request_processed_marker) and not str_2_bool(processed):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has already been unprocessed. Please refresh the page.\"})\n\n with open(\"{0}submission.json\".format(folder_path), mode=\"r\") as request_details_json:\n request_details = json.load(request_details_json)\n\n try:\n request_date = \"{0:02d}/{1:02d}/{2:04d}\".format(request_details[\"request_date\"][\"month\"],\n request_details[\"request_date\"][\"day\"],\n request_details[\"request_date\"][\"year\"])\n\n history_update = {\n \"date\": date_time[0],\n \"time\": date_time[1],\n \"action\": None,\n \"actor\": {\n \"first_name\": current_user.first_name,\n \"last_name\": current_user.last_name,\n \"email\": current_user.email,\n \"uta_id\": current_user.uta_id\n },\n \"metadata\": None\n }\n\n processed = str_2_bool(processed)\n if processed:\n remove(request_submitted_marker)\n optional_message = request.json.get(\"message\", \"\").strip()\n history_update[\"action\"] = \"Processed\"\n history_update[\"metadata\"] = {\n \"transaction_number\": request.json[\"transaction_number\"],\n \"message\": optional_message\n }\n\n with open(request_processed_marker, mode=\"w\") as processed_marker:\n processed_marker.write(\"/n\")\n\n if optional_message:\n optional_message_html = \"<br><br>Please see the attached message from {0} below:\" \\\n \"<br><blockquote style='border-left: 3px solid rgb(200, 200, 200); \" \\\n \"border-top-color: rgb(200, 200, 200); border-right-color: \" \\\n \"rgb(200, 200, 200); border-bottom-color: rgb(200, 200, 200); \" \\\n \"padding-left: 1ex; margin-left: 0.8ex; color: rgb(102, 102, 102);'>\" \\\n \"<div style='color: rgb(0, 0, 0);'>{1}</div>\" \\\n \"</blockquote>\".format(current_user.first_name, optional_message)\n optional_message = \"\\n\\nPlease see the attached message from {0} below:\" \\\n \"\\n{1}\".format(current_user.first_name, optional_message)\n else:\n optional_message_html = \"\"\n\n email_subject = \"Reimbursement Request Processed\"\n requester_payto_equal = request_details[\"requester\"][\"email\"].lower().strip() == request_details[\"pay_to\"][\"email\"].lower().strip()\n\n if requester_payto_equal:\n email_body = app_constants.EMAILS[\"process_request\"][\"text\"][processed][requester_payto_equal].format(\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_date, request_details[\"total_amount\"],\n current_user.first_name, current_user.email,\n request.json[\"transaction_number\"],\n optional_message, request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n email_body_html = app_constants.EMAILS[\"process_request\"][\"html\"][processed][requester_payto_equal].format(\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_date, request_details[\"total_amount\"],\n current_user.first_name, current_user.email,\n request.json[\"transaction_number\"],\n optional_message_html, request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n else:\n email_body = app_constants.EMAILS[\"process_request\"][\"text\"][processed][requester_payto_equal].format(\n request_details[\"pay_to\"][\"name\"],\n request_date, request_details[\"total_amount\"],\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n current_user.first_name, current_user.email,\n request.json[\"transaction_number\"],\n optional_message, request_details[\"short_description\"],\n request_details[\"requester\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n email_body_html = app_constants.EMAILS[\"process_request\"][\"html\"][processed][requester_payto_equal].format(\n request_details[\"pay_to\"][\"name\"],\n request_date, request_details[\"total_amount\"],\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n current_user.first_name, current_user.email,\n request.json[\"transaction_number\"],\n optional_message_html, request_details[\"short_description\"],\n request_details[\"requester\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n\n return_payload = {\"success\": True, \"type\": \"success\", \"message\": \"File marked as processed.\"}\n else:\n with open(request_submitted_marker, mode=\"w\") as submitted_marker:\n submitted_marker.write(\"/n\")\n\n user_message = request.json[\"message\"]\n history_update[\"action\"] = \"Unprocessed\"\n history_update[\"metadata\"] = {\n \"message\": user_message\n }\n\n email_subject = \"Reimbursement Request Unprocessed\"\n requester_payto_equal = request_details[\"requester\"][\"email\"].lower().strip() == request_details[\"pay_to\"][\"email\"].lower().strip()\n\n if requester_payto_equal:\n email_body = app_constants.EMAILS[\"process_request\"][\"text\"][processed][requester_payto_equal].format(\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_date, request_details[\"total_amount\"],\n current_user.first_name, current_user.email,\n user_message, request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n email_body_html = app_constants.EMAILS[\"process_request\"][\"html\"][processed][requester_payto_equal].format(\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_date, request_details[\"total_amount\"],\n current_user.first_name, current_user.email,\n user_message, request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n else:\n email_body = app_constants.EMAILS[\"process_request\"][\"text\"][processed][requester_payto_equal].format(\n request_details[\"pay_to\"][\"name\"],\n request_date, request_details[\"total_amount\"],\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n current_user.first_name, current_user.email,\n user_message, request_details[\"short_description\"],\n request_details[\"requester\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n email_body_html = app_constants.EMAILS[\"process_request\"][\"html\"][processed][requester_payto_equal].format(\n request_details[\"pay_to\"][\"name\"],\n request_date, request_details[\"total_amount\"],\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n current_user.first_name, current_user.email,\n user_message, request_details[\"short_description\"],\n request_details[\"requester\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n\n if path.exists(request_processed_marker):\n remove(request_processed_marker)\n\n return_payload = {\"success\": True, \"type\": \"success\", \"message\": \"File marked as unprocessed.\"}\n\n with open(\"{0}submission.json\".format(folder_path), mode=\"w\") as request_details_json:\n request_details[\"history\"].append(history_update)\n json.dump(request_details, request_details_json)\n\n if SRV != \"prod\":\n emails = self.__TEST_EMAILS__\n else:\n emails = [[\"{0} {1}\".format(current_user.first_name, current_user.last_name), current_user.email],\n [\"{0} {1}\".format(request_details[\"requester\"][\"first_name\"], request_details[\"requester\"][\"last_name\"]), request_details[\"requester\"][\"email\"]]] \\\n + self.__PROD_EMAILS__\n if not requester_payto_equal:\n emails.append([\"{0}\".format(request_details[\"pay_to\"][\"name\"]), request_details[\"pay_to\"][\"email\"]])\n\n self.mailer.send_mail(emails, email_subject, email_body, email_body_html, from_name=\"CSE Reimbursement App\")\n\n return jsonify(return_payload)\n\n except Exception as e:\n print(e)\n\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"Oops! Something went wrong, contact the \"\n \"administrator if the problem persists.\"})\n return abort(403)", "def convert_Request_to_RequestEntity(request):\n\n result = ResponseEntity()\n try:\n request_entity = RequestEntity()\n request_entity.request_id = request.id\n request_entity.username = request.account.user.username\n request_entity.title = request.title\n request_entity.description = request.description\n request_entity.category = utils.convert_category_str_to_enum_list(request.category)\n request_entity.price = request.price\n request_entity.price_currency = Enum.CURRENCY(int(request.price_currency))\n request_entity.origin_city = Enum.CITY(int(request.origin_city))\n request_entity.origin_address = request.origin_address\n request_entity.destination_city = Enum.CITY(int(request.destination_city))\n request_entity.destination_address = request.destination_address\n request_entity.image_url = utils.convert_string_to_list(request.image_url) # list\n request_entity.thumb_url = utils.convert_string_to_list(request.thumb_url)\n request_entity.expired_date = localtime(request.expired_date)\n request_entity.status = Enum.REQUEST_STATUS(int(request.status))\n request_entity.created_date = localtime(request.created_date)\n request_entity.last_modified_date = request.last_modified_date\n result.success = True\n result.data = request_entity\n except Exception as e:\n print str(e)\n result.success = False\n result.message = str(e)\n finally:\n return result", "def parse(self, stream, media_type=None, parser_context=None):\n\n result = super(parsers.JSONParser, self).parse(\n stream, media_type=media_type, parser_context=parser_context\n )\n if not isinstance(result, dict) or \"data\" not in result:\n raise ParseError(\"Received document does not contain primary data\")\n\n data = result.get(\"data\")\n view = parser_context[\"view\"]\n\n from rest_framework_json_api.views import RelationshipView\n\n if isinstance(view, RelationshipView):\n # We skip parsing the object as JSONAPI Resource Identifier Object and not a regular\n # Resource Object\n if isinstance(data, list):\n for resource_identifier_object in data:\n if not (\n resource_identifier_object.get(\"id\")\n and resource_identifier_object.get(\"type\")\n ):\n raise ParseError(\n \"Received data contains one or more malformed JSONAPI \"\n \"Resource Identifier Object(s)\"\n )\n elif not (data.get(\"id\") and data.get(\"type\")):\n raise ParseError(\n \"Received data is not a valid JSONAPI Resource Identifier Object\"\n )\n\n return data\n\n request = parser_context.get(\"request\")\n\n # Sanity check\n if not isinstance(data, dict):\n raise ParseError(\n \"Received data is not a valid JSONAPI Resource Identifier Object\"\n )\n\n # Check for inconsistencies\n if request.method in (\"PUT\", \"POST\", \"PATCH\", \"DELETE\"):\n resource_name = utils.get_resource_name(\n parser_context, expand_polymorphic_types=True\n )\n if isinstance(resource_name, str):\n if data.get(\"type\") != resource_name:\n raise exceptions.Conflict(\n \"The resource object's type ({data_type}) is not the type that \"\n \"constitute the collection represented by the endpoint \"\n \"({resource_type}).\".format(\n data_type=data.get(\"type\"), resource_type=resource_name\n )\n )\n else:\n if data.get(\"type\") not in resource_name:\n raise exceptions.Conflict(\n \"The resource object's type ({data_type}) is not the type that \"\n \"constitute the collection represented by the endpoint \"\n \"(one of [{resource_types}]).\".format(\n data_type=data.get(\"type\"),\n resource_types=\", \".join(resource_name),\n )\n )\n if not data.get(\"id\") and request.method in (\"PATCH\", \"PUT\", \"DELETE\"):\n raise ParseError(\n \"The resource identifier object must contain an 'id' member\"\n )\n\n if request.method in (\"PATCH\", \"PUT\", \"DELETE\"):\n lookup_url_kwarg = getattr(view, \"lookup_url_kwarg\", None) or getattr(\n view, \"lookup_field\", None\n )\n if lookup_url_kwarg and str(data.get(\"id\")) != str(\n view.kwargs[lookup_url_kwarg]\n ):\n raise exceptions.Conflict(\n \"The resource object's id ({data_id}) does not match url's \"\n \"lookup id ({url_id})\".format(\n data_id=data.get(\"id\"), url_id=view.kwargs[lookup_url_kwarg]\n )\n )\n\n # Construct the return data\n serializer_class = getattr(view, \"serializer_class\", None)\n parsed_data = {\"id\": data.get(\"id\")} if \"id\" in data else {}\n # `type` field needs to be allowed in none polymorphic serializers\n if serializer_class is not None:\n if issubclass(serializer_class, serializers.PolymorphicModelSerializer):\n parsed_data[\"type\"] = data.get(\"type\")\n parsed_data.update(self.parse_attributes(data))\n parsed_data.update(self.parse_relationships(data))\n parsed_data.update(self.parse_metadata(result))\n return parsed_data", "def _collect_request(self, request):\n resource = self.v1api.site.getResourceFor(request)\n rendered = resource.render(request)\n\n if request.responseCode != 200:\n return request.responseCode\n\n if isinstance(rendered, str):\n return json.loads(rendered)\n elif rendered == NOT_DONE_YET:\n\n while not request.finished:\n # we need to advance until the request has been finished\n self.sygnal.reactor.advance(1)\n self.sygnal.reactor.wait_for_work(lambda: request.finished)\n\n assert request.finished > 0\n\n if request.responseCode != 200:\n return request.responseCode\n\n written_bytes = b\"\".join(request.written)\n return json.loads(written_bytes)\n else:\n raise RuntimeError(f\"Can't collect: {rendered}\")", "def process_request(self, request):\n return None", "def dispatch(self, request):\n\n # Returns an HTTP response in JSON format\n try:\n output = self._dispatch(request)\n except Exception as e:\n self.logger.exception(\"exception while handling request\")\n output = dict(error=_serialize_exc(e).as_dict())\n return Response(json.dumps(output), mimetype=\"application/json\")", "def postprocessRequest(self, retval, route):\n JSONed = False\n GZIPPED = False\n\n if retval is None:\n self.logger.warn(\"retval is None!\")\n return retval\n\n # Is this request under the a path we're enforcing JSON output for?\n if (route is not None and hasattr(route, 'rule') and route.rule.startswith(self.baseRulePath)) or response.status_code >= 400:\n # It is. Try to serialize the returned data as JSON\n self.logger.debug(\"response should be JSON\")\n\n # First, is the data even something we can serialize as JSON?\n # if the retval is not a dict, we don't know what to do with it, so just be transparent\n if type(retval) not in (dict, list):\n self.logger.error(\"\\033[41;1m You are trying to send the client data that doesn't look like it should be JSON (%s). Fix this! \\033[0m\" % type(retval))\n # TODO: consider raising an exception so as to generate a server error (500), forcing the app developer\n # to confront why/how they are sending back something that doesn't make much sense serializing as JSON\n else:\n # Was the \"pretty\" query parameter set?\n if request.query.get(\"pretty\") == 'true':\n # It was. Indent & sort keys\n self.logger.debug(\"found pretty query param, value is true, prettying JSON\")\n retval = json.dumps(retval, indent=4, sort_keys=True)\n else:\n # It was not. By default, we'll use the most compact representation\n retval = json.dumps(retval, separators=(',', ':'))\n response.content_type = \"application/json\"\n self.logger.debug(\"%d bytes of JSON created\" % len(retval))\n JSONed = True\n else:\n self.logger.debug(\"response should NOT be JSON\")\n\n # Gzipping the response\n # Can the client even handle gzipped response bodies?\n httpRespObj = None\n if isinstance(retval, bottle.HTTPResponse):\n # we'll keep the HTTPResponse so we can update it after gzipping.\n self.logger.debug(\"Found HTTPResponse instance\")\n httpRespObj = retval\n if type(retval.body) in (str, unicode):\n retval = retval.body\n elif hasattr(retval.body, \"read\"):\n retval = retval.body.read()\n else:\n self.logger.error(\"HTTPResponse.body attr is not a str and does not have a read() method!\")\n raise ValueError(\"HTTPResponse.body is not sane: attr is not a str, and is not a file-like object\")\n\n elif isinstance(retval, bottle.HTTPError):\n self.logger.debug(\"Found HTTPError instance\")\n httpRespObj = retval\n if type(retval.body) in (str, unicode):\n retval = retval.body\n elif hasattr(retval.body, \"read\"):\n retval = retval.body.read()\n else:\n self.logger.error(\"HTTPError.body attr is not a str and does not have a read() method!\")\n raise ValueError(\"HTTPError.body is not sane: attr is not a str, and is not a file-like object\")\n\n if 'gzip' in request.headers.get(\"Accept-Encoding\", \"\") and len(retval) > 0:\n self.logger.debug(\"client accepts gzip, gzipping data\")\n # the client handle gzipped data, so lets gzip out data\n self.logger.debug(\"original response data was %d bytes\" % len(retval))\n sio = StringIO.StringIO()\n gzFile = gzip.GzipFile(fileobj=sio, mode='wb', compresslevel=6)\n gzFile.write(retval)\n gzFile.close()\n sio.seek(0)\n retval = sio.read()\n sio.close()\n self.logger.debug(\"new gzipped response data is %d bytes\" % len(retval))\n GZIPPED = True\n\n # Were we given an HTTPResponse isntance? If so, we need to update it a bit\n if httpRespObj:\n self.logger.debug(\"Updating HTTPResponse instance with gzipped content, headers\")\n httpRespObj.body = retval\n httpRespObj['Content-Length'] = str(len(retval))\n httpRespObj['Content-Encoding'] = 'gzip'\n else:\n # update the content-length (it is already set) and add the content-encoding header\n response.set_header('Content-Length', str(len(retval)))\n response.set_header('Content-Encoding', 'gzip')\n else:\n self.logger.debug(\"client either doesn't accept gzip or there's no data to return; len(retval)=%d\" % len(retval))\n\n self.logger.info(\"RESPONSE %s gzipped:%s json:%s size:%dB\" % (response.status_code, GZIPPED, JSONed, len(retval)))\n if httpRespObj:\n return httpRespObj\n return retval", "def handle_request(self, request, environ, start_response,\n response_headers):\n method = environ[\"REQUEST_METHOD\"].upper()\n try:\n resource, parent_entity = self.get_resource(request)\n if request.path_option == core.PathOption.metadata:\n return self.return_metadata(\n request, environ, start_response, response_headers)\n elif request.path_option == core.PathOption.batch:\n return self.odata_error(\n request, environ, start_response, \"Bad Request\",\n \"Batch requests not supported\", 404)\n elif request.path_option == core.PathOption.count:\n if isinstance(resource, edm.Entity):\n return self.return_count(\n 1, request, environ, start_response, response_headers)\n elif isinstance(resource, edm.EntityCollection):\n resource.set_filter(\n request.sys_query_options.get(\n core.SystemQueryOption.filter,\n None))\n return self.return_count(\n len(resource), request, environ, start_response,\n response_headers)\n else:\n raise core.BadURISegment(\n \"$count must be applied to \"\n \"an EntitySet or single EntityType instance\")\n elif request.path_option == core.PathOption.links:\n # parent_entity will be source entity\n # request.links_property is the name of the navigation\n # property in the source entity\n # resource will be the target entity, a collection or\n # None\n if not isinstance(parent_entity, edm.Entity):\n raise core.BadURISegment(\"$links must be preceded by a \"\n \"single EntityType instance\")\n if method == \"GET\":\n # open the collection and select the key properties only\n if isinstance(resource, edm.EntityCollection):\n with resource as collection:\n collection.select_keys()\n collection.set_page(\n request.sys_query_options.get(\n core.SystemQueryOption.top, None),\n request.sys_query_options.get(\n core.SystemQueryOption.skip, None),\n request.sys_query_options.get(\n core.SystemQueryOption.skiptoken, None))\n inlinecount = request.sys_query_options.get(\n core.SystemQueryOption.inlinecount, None)\n collection.set_inlinecount(\n inlinecount == core.InlineCount.allpages)\n return self.return_links(\n collection, request, environ, start_response,\n response_headers)\n elif isinstance(resource, edm.Entity):\n # should have just a single link\n return self.return_link(\n resource, request, environ, start_response,\n response_headers)\n else:\n # resource is None - no linked entity\n raise core.MissingURISegment(\n \"%s, no entity is related\" %\n request.links_property)\n elif method == \"POST\":\n if resource is None:\n # can you POST to Orders(1)/$links/Customer ? - only if\n # it is currently NULL (0..1)\n resource = parent_entity[\n request.links_property].open()\n if isinstance(resource, edm.EntityCollection):\n with resource as collection:\n target_entity = self.read_entity_from_link(environ)\n collection[target_entity.key()] = target_entity\n return self.return_empty(\n start_response, response_headers)\n else:\n # you can't POST to a single link that already exists\n raise core.BadURISegment(\n \"%s is already linked, use PUT \"\n \"instead of POST to update it\" %\n request.links_property)\n elif method == \"PUT\":\n if parent_entity[request.links_property].isCollection:\n raise core.BadURISegment(\n \"%s: can't update a link with multiplicity *\" %\n request.links_property)\n with parent_entity[\n request.links_property].open() as \\\n collection:\n target_entity = self.read_entity_from_link(environ)\n collection.replace(target_entity)\n return self.return_empty(start_response, response_headers)\n elif method == \"DELETE\":\n if isinstance(resource, edm.EntityCollection):\n raise core.BadURISegment(\n \"%s: DELETE must specify a single link\" %\n request.links_property)\n elif resource is None:\n raise core.MissingURISegment(\n \"%s, no entity is related\" %\n request.links_property)\n with parent_entity[\n request.links_property].open() as \\\n collection:\n del collection[resource.key()]\n return self.return_empty(start_response, response_headers)\n else:\n raise core.InvalidMethod(\"%s not supported here\" % method)\n elif isinstance(resource, edm.Entity):\n if method == \"GET\" or method == \"HEAD\":\n if request.path_option == core.PathOption.value:\n if resource.type_def.has_stream():\n return self.return_stream(\n resource, request, environ, start_response,\n response_headers, method)\n else:\n raise core.BadURISegment(\n \"$value cannot be used since \"\n \"the entity is not a media stream\")\n else:\n self.expand_resource(resource,\n request.sys_query_options)\n return self.return_entity(\n resource, request, environ, start_response,\n response_headers)\n elif method == \"PUT\":\n if request.path_option == core.PathOption.value:\n if resource.type_def.has_stream():\n sinfo = core.StreamInfo()\n if \"CONTENT_TYPE\" in environ:\n sinfo.type = params.MediaType.from_str(\n environ[\"CONTENT_TYPE\"])\n input = messages.WSGIInputWrapper(environ)\n with resource.entity_set.open() as coll:\n coll.update_stream(input,\n resource.key(),\n sinfo)\n # need to update the resource as some fields\n # may have changed\n resource = coll[resource.key()]\n self.set_etag(resource, response_headers)\n return self.return_empty(\n start_response, response_headers)\n else:\n raise core.BadURISegment(\n \"$value cannot be used since the entity is \"\n \"not a media stream\")\n else:\n # update the entity from the request\n self.read_entity(resource, environ)\n resource.commit()\n # now we've updated the entity it is safe to calculate\n # the ETag\n self.set_etag(resource, response_headers)\n return self.return_empty(\n start_response, response_headers)\n elif method == \"DELETE\":\n if request.path_option == core.PathOption.value:\n raise core.BadURISegment(\n \"$value cannot be used with DELETE\")\n resource.delete()\n return self.return_empty(start_response, response_headers)\n else:\n raise core.InvalidMethod(\"%s not supported here\" % method)\n elif isinstance(resource, edm.EntityCollection):\n if method == \"GET\":\n self.expand_resource(resource, request.sys_query_options)\n resource.set_filter(\n request.sys_query_options.get(\n core.SystemQueryOption.filter,\n None))\n resource.set_orderby(\n request.sys_query_options.get(\n core.SystemQueryOption.orderby,\n None))\n resource.set_page(\n request.sys_query_options.get(\n core.SystemQueryOption.top, None),\n request.sys_query_options.get(\n core.SystemQueryOption.skip, None),\n request.sys_query_options.get(\n core.SystemQueryOption.skiptoken, None))\n inlinecount = request.sys_query_options.get(\n core.SystemQueryOption.inlinecount, None)\n resource.set_inlinecount(\n inlinecount == core.InlineCount.allpages)\n return self.return_entity_collection(\n resource, request, environ, start_response,\n response_headers)\n elif (method == \"POST\" and\n resource.is_medialink_collection()):\n # POST of a media resource\n sinfo = core.StreamInfo()\n if \"CONTENT_TYPE\" in environ:\n sinfo.type = params.MediaType.from_str(\n environ[\"CONTENT_TYPE\"])\n if \"HTTP_LAST_MODIFIED\" in environ:\n sinfo.modified = params.FullDate.from_http_str(\n environ[\"HTTP_LAST_MODIFIED\"])\n input = messages.WSGIInputWrapper(environ)\n if \"HTTP_SLUG\" in environ:\n slug = app.Slug(environ[\"HTTP_SLUG\"])\n # if the slug is a bracketed string treat it\n # as the key predicate\n key = None\n kp = slug.slug.strip()\n if kp and kp[0] == '(' and kp[-1] == ')':\n try:\n name, kp = core.ODataURI.split_segment(kp)\n # kp is a dictionary for the entity key\n key = resource.entity_set.get_key(kp)\n except ValueError:\n pass\n if not key:\n key = resource.entity_set.extract_key(slug.slug)\n else:\n slug = key = None\n entity = resource.new_stream(input, sinfo=sinfo, key=key)\n if slug:\n for k, v in entity.data_items():\n # catch property-level feed customisation here\n property_def = entity.type_def[k]\n if (property_def.get_target_path() ==\n [(atom.ATOM_NAMESPACE, \"title\")]):\n entity[k].set_from_value(slug.slug)\n resource.update_entity(entity)\n break\n response_headers.append(\n ('Location', str(entity.get_location())))\n return self.return_entity(\n entity, request, environ, start_response,\n response_headers, 201, \"Created\")\n elif method == \"POST\":\n # POST to an ordinary entity collection\n entity = resource.new_entity()\n # read the entity from the request\n self.read_entity(entity, environ)\n resource.insert_entity(entity)\n response_headers.append(\n ('Location', str(entity.get_location())))\n return self.return_entity(\n entity, request, environ, start_response,\n response_headers, 201, \"Created\")\n else:\n raise core.InvalidMethod(\"%s not supported here\" % method)\n elif isinstance(resource, edm.EDMValue):\n if method == \"GET\":\n if request.path_option == core.PathOption.value:\n if resource:\n return self.return_dereferenced_value(\n parent_entity, resource, request, environ,\n start_response, response_headers)\n else:\n raise core.MissingURISegment(\n \"%s (NULL)\" % resource.p_def.name)\n else:\n return self.return_value(\n parent_entity, resource, request, environ,\n start_response, response_headers)\n elif method == \"PUT\":\n if request.path_option == core.PathOption.value:\n if resource:\n self.read_dereferenced_value(resource, environ)\n else:\n raise core.MissingURISegment(\n \"%s (NULL)\" % resource.p_def.name)\n else:\n self.read_value(resource, environ)\n parent_entity.commit()\n self.set_etag(parent_entity, response_headers)\n return self.return_empty(start_response, response_headers)\n elif method == \"DELETE\":\n if request.path_option == core.PathOption.value:\n raise core.BadURISegment(\n \"$value cannot be used with DELETE\")\n # make this one NULL, only if it is nullable\n if resource.p_def and not resource.p_def.nullable:\n raise core.InvalidMethod(\n \"DELETE failed, %s property is not nullable\" %\n resource.p_def.name)\n resource.value = None\n parent_entity.commit()\n return self.return_empty(start_response, response_headers)\n else:\n raise core.InvalidMethod(\"%s not supported here\" % method)\n elif isinstance(resource, edm.FunctionCollection):\n return self.return_collection(\n resource, request, environ, start_response,\n response_headers)\n else:\n # None or the DataService object: means we are trying to get\n # the service root\n response_type = self.content_negotiation(\n request, environ, self.ServiceRootTypes)\n if response_type is None:\n return self.odata_error(\n request, environ, start_response, \"Not Acceptable\",\n 'atomsvc+xml or json formats supported', 406)\n elif response_type == \"application/json\":\n return self.return_json_root(\n request, environ, start_response, response_headers)\n else:\n # override the default handling of service root to improve\n # content negotiation\n data = to_text(self.serviceDoc).encode('utf-8')\n response_headers.append(\n (\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n start_response(\"200 Ok\", response_headers)\n return [data]\n except core.MissingURISegment as e:\n return self.odata_error(\n request, environ, start_response, \"Resource not found\",\n \"Resource not found for segment %s\" % str(e), 404)\n except core.BadURISegment as e:\n return self.odata_error(\n request, environ, start_response, \"Bad Request\",\n \"Resource not found for segment %s\" % str(e), 400)\n except edm.NavigationError as e:\n return self.odata_error(\n request, environ, start_response, \"NavigationError\", str(e),\n 403)\n except edm.ConstraintError as e:\n return self.odata_error(\n request, environ, start_response, \"ConstraintError\", str(e),\n 403)\n except NotImplementedError as e:\n return self.odata_error(\n request, environ, start_response, \"NotImplementedError\",\n str(e), 405)", "def handle_crawl_domain_request(): #*args,**kwargs):\n \n try:\n the_json=request.json\n except: the_json={}\n \n if the_json:\n Helper.cache_request('crawl_domain',the_json, id='')\n \n try: print (str(request.json))\n except: pass\n\n result={}\n result['status_code']=200\n\n return jsonify(result)", "def __submit_json(self, method,url, payload):\n if self.settings.requests:\n import json\n HEADERS = {'content-type': 'application/json'}\n if method == 'put':\n r = self.settings.requests.put(url, data=json.dumps(payload), headers=HEADERS)\n else:\n r = self.settings.requests.post(url, data=json.dumps(payload), headers=HEADERS)\n return self._process_result(r.json())\n\n else:\n import json\n data = json.dumps(payload)\n if sys.version_info[0] == 3:\n data = data.encode('ascii')\n HEADERS = {'content-type': 'application/json'}\n r = self.settings.urllib2.Request(url, data, {'Content-Type': 'application/json'})\n if method == 'put':\n r.get_method = lambda: method\n for key, value in HEADERS.items():\n r.add_header(key, value)\n try:\n data = self.settings.urllib2.urlopen(r)\n except self.settings.urllib2.HTTPError:\n raise\n return self._process_result(self.settings.json.load(data))", "def handle_json(self, source, data):\n method, args = json.loads(data)\n try:\n result = self.call(source, method, *args)\n except Exception as exc:\n result = str(exc)\n\n return json.dumps(result)", "def handle_request_from(self, user, request):\n request_type = request.request_type\n\n if request_type in self._plain_requests:\n ret = Response(\n request_type,\n data=self._plain_requests[request_type]()\n )\n elif request_type in self._user_requests and not user:\n ret = self._no_user_response(request_type)\n elif request_type in self._user_requests:\n ret = Response(\n request_type,\n data=self._user_requests[request_type](user)\n )\n else:\n ret = self._complex_requests[request_type](user, request.data)\n\n if ret.success:\n self._operation_count = \\\n (self._operation_count + 1) % self._save_frequency\n if self._operation_count == 0:\n self._users.commit()\n\n return ret", "def translate(self, request):\n\n request.content_type = 'application/json'\n\n try:\n if request.raw_post_data == \"\":\n request.data = \"\"\n else:\n request.data = json.loads(request.raw_post_data)\n\n # Reset both POST and PUT from request, as its\n # misleading having their presence around.\n request.POST = request.PUT = dict()\n except (TypeError, ValueError):\n raise InvalidParameter(\"JSON\")\n\n return request", "def _check_process_resp(self, resp, expected_fields=None):\n\n if resp.status_code != LenovoRestClient.RESP_CODE_OK:\n raise cexc.NOSRestHTTPError(http_code=resp.status_code,\n http_reason=resp.reason, http_op=resp.request.method,\n url=resp.url, http_response=resp.text)\n\n rj = resp.json()\n\n if not expected_fields:\n return rj\n\n for field in expected_fields:\n try:\n val = rj[field]\n except KeyError:\n raise cexc.NOSJsonFieldNotFound(field=field, url=resp.url, json=rj)\n\n return rj", "def get_request(req: Dict) -> Dict:\n for field in ['body']:\n if field in req:\n data = req[field]\n if isinstance(data, str):\n return create_request(data)\n elif isinstance(data, dict) and 'text' in data:\n return data\n return None", "def _read_payload(self):\n content_type = self.headers.get(\"content-type\")\n # Check the content type, if not set we assume json.\n # We can have a charset just after the content type, e.g.\n # application/json; charset=UTF-8.\n\n if content_type and not re.search(r\"\\s*application/json\\s*;?\", content_type):\n raise SgJiraBridgeBadRequestError(\n \"Invalid content-type %s, it must be 'application/json'\" % content_type\n )\n\n content_len = int(self.headers.get(\"content-length\", 0))\n body = self.rfile.read(content_len)\n payload = {}\n if body:\n payload = json.loads(body)\n\n return payload", "def handler(data, context):\n processed_input = _process_input(data, context)\n response = requests.post(context.rest_uri, data=processed_input)\n return _process_output(response, context)", "def request_action(self, request, data):\n\n response = self.oauth.post(url=f'{self.base_url}/json/{request}', data=data)\n return response.json()", "def process_request(request, client):\n # type: (Dict, ConnectClient) -> Dict\n\n # Get the subscription Id from the request that needs to be resumed\n # Saving the Subscription ID from Vendor system is encouraged to be able to map the subscription in Connect\n # with the subscription in Vendor system\n # The Subscription ID can be saved in a fulfillment parameter\n # This external_subscription_id from Vendor platform might be required to call the Vendor API\n # to resume the suspended subscription\n external_subscription_id = Utils.get_param_value(request, 'fulfillment', 'subscription_id')\n\n # TODO: Add code to Resume the subscription in vendor system by calling the Vendor API\n # to resume subscription\n # api_client = APIClient(api_url='',\n # api_key='')\n # resume_payload = {}\n # api_client.resume_subscription(resume_payload, external_subscription_id)\n\n # When successful, approve the fulfillment request with the following code:\n return Utils.approve_fulfillment_request(request, client)\n # If resume is ok the status of Fulfillment Request object will be Approved and Subscription object Active.\n # If the resume action can't be done the request can be rejected using Utils.reject_fulfillment_request method.", "def prepare(self):\n if self.request.body:\n if self.request.headers[\"Content-Type\"] and self.request.headers[\"Content-Type\"].startswith(\"application/json\") and self.request.body:\n self.json_body = self.request.body\n else:\n self.json_body = None", "def api_call():\n\n json_str = load_input()\n output = {\n 'inputs': json_str,\n 'results': 'cool results'}\n\n return json.dumps(output), 200, {'Content-Type': 'text/plain;charset=utf-8'}", "def load_json_body(self, request):\n\n request.json_body = None\n\n if not request.META.get(\"CONTENT_TYPE\", \"\").startswith(\"application/json\"):\n return\n\n if not len(request.body):\n return\n\n try:\n request.json_body = json.loads(request.body.decode('utf-8'))\n except json.JSONDecodeError:\n return", "def _do_request(self, url, method='GET', body=None):\n response, content = self.request(url, method=method, body=body, headers=self.headers)\n if int(response['status']) != 200:\n raise GPAPIError(response['status'], 'ERROR IN REQUEST')\n json = simplejson.loads(content)\n return json", "def handle_request(self, method_name, app_prefix, path, payload=None):\n path = self.__get_path(app_prefix, path)\n method = getattr(self._session, method_name)\n return ReselloResponse(method(path, json=payload))", "def _executeOperation(self, request:CSERequest, reqRi:str) -> Result:\n\t\t# Execute the actual operation\n\t\trequest.args.operation == Operation.RETRIEVE and (operationResult := CSE.dispatcher.processRetrieveRequest(request, request.headers.originator)) is not None\n\t\trequest.args.operation == Operation.CREATE and (operationResult := CSE.dispatcher.processCreateRequest(request, request.headers.originator)) is not None\n\t\trequest.args.operation == Operation.UPDATE and (operationResult := CSE.dispatcher.processUpdateRequest(request, request.headers.originator)) is not None\n\t\trequest.args.operation == Operation.DELETE and (operationResult := CSE.dispatcher.processDeleteRequest(request, request.headers.originator)) is not None\n\n\t\t# Retrieve the <request> resource\n\t\tif (res := CSE.dispatcher.retrieveResource(reqRi)).resource is None:\t\n\t\t\treturn Result(status=False) \t\t\t\t\t\t\t\t\t\t\t\t\t\t# No idea what we should do if this fails\n\t\treqres = res.resource\n\n\t\t# Fill the <request>\n\t\treqres['ors'] = {\t# operationResult\n\t\t\t'rsc'\t: operationResult.rsc,\n\t\t\t'rqi'\t: reqres.rid,\n\t\t\t'to'\t: request.id,\n\t\t\t'fr'\t: reqres.org,\n\t\t\t'ot'\t: reqres['mi/ot'],\n\t\t\t'rset'\t: reqres.et\n\t\t}\n\t\tif operationResult.rsc in [ RC.OK, RC.created, RC.updated, RC.deleted ] :\t\t\t# OK, created, updated, deleted -> resource\n\t\t\treqres['rs'] = RequestStatus.COMPLETED\n\t\t\tif operationResult.resource is not None:\n\t\t\t\treqres['ors/pc'] = operationResult.resource.asDict()\n\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Error\n\t\t\treqres['rs'] = RequestStatus.FAILED\n\t\t\tif operationResult.dbg is not None:\n\t\t\t\treqres['ors/pc'] = { 'm2m:dbg' : operationResult.dbg }\n\n\t\t# Update in DB\n\t\treqres.dbUpdate()\n\n\t\treturn Result(resource=reqres, status=True)", "def dispatchRequest (self, base_path=\"\", path_info=\"/\", params={}, request_method = \"GET\", post_data = None, accepts = \"\"): \n response_code = \"200 OK\"\n host = base_path\n request = None\n content_types = {\n 'application/vnd.google-earth.kml+xml': 'KML',\n 'application/json': 'GeoJSON',\n 'text/javascript': 'GeoJSON',\n 'application/rss+xml': 'GeoRSS',\n 'text/html': 'HTML',\n 'osm': 'OSM',\n 'gml': 'WFS',\n 'wfs': 'WFS',\n 'kml': 'KML',\n 'json': 'GeoJSON',\n 'georss': 'GeoRSS',\n 'atom': 'GeoRSS',\n 'html': 'HTML',\n 'geojson':'GeoJSON'\n } \n \n path = path_info.split(\"/\")\n \n found = False\n \n format = \"\"\n \n if params.has_key(\"format\"):\n format = params['format']\n if format.lower() in content_types:\n format = content_types[format.lower()]\n found = True\n \n if not found and len(path) > 1:\n path_pieces = path[-1].split(\".\")\n if len(path_pieces) > 1:\n format = path_pieces[-1]\n if format.lower() in content_types:\n format = content_types[format.lower()]\n found = True\n \n if not found and accepts:\n if accepts.lower() in content_types:\n format = content_types[accepts.lower()]\n found = True\n \n if not found and not format:\n if self.metadata.has_key(\"default_service\"):\n format = self.metadata['default_service']\n else: \n format = \"GeoJSON\"\n \n service_module = __import__(\"Service.%s\" % format, globals(), locals(), format)\n service = getattr(service_module, format)\n request = service(self)\n \n response = []\n \n request.parse(params, path_info, host, post_data, request_method)\n \n # short circuit datasource where the first action is a metadata request. \n if len(request.actions) and request.actions[0].method == \"metadata\": \n return request.encode_metadata(request.actions[0])\n \n datasource = self.datasources[request.datasource] \n \n if request_method != \"GET\" and hasattr(datasource, 'processes'):\n raise Exception(\"You can't post data to a processed layer.\") \n\n \n datasource.begin()\n try:\n for action in request.actions:\n method = getattr(datasource, action.method)\n result = method(action)\n response += result \n datasource.commit()\n except:\n datasource.rollback()\n raise\n \n if hasattr(datasource, 'processes'):\n for process in datasource.processes.split(\",\"):\n if not self.processes.has_key(process): \n raise Exception(\"Process %s configured incorrectly. Possible processes: \\n\\n%s\" % (process, \",\".join(self.processes.keys() ))) \n response = self.processes[process].dispatch(features=response, params=params)\n\n mime, data = request.encode(response)\n data = data.encode(\"utf-8\") \n return (mime, data)", "def modify_requests(rf):\n post_url = '/docs/modify/'\n request_succ = [{\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"fetch\",\n \"document_id\": 1,\n }),\n \"response\": {\n \"code\": 200,\n }\n },\n {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 2,\n \"type\": \"modify\",\n \"document_id\": 2,\n \"content\": \"I will modify content!\"\n }),\n \"response\": {\n \"code\": 200,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 3,\n \"type\": \"delete\",\n \"document_id\": 3,\n }),\n \"response\": {\n \"code\": 200,\n }\n }]\n request_fields_absent = [{\n \"request\": gen_request(rf, post_url, {\n \"type\": \"fetch\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"delete\",\n }),\n \"response\": {\n \"code\": 400,\n \"data\": \"It seems silly, but it can pass code smell test\"\n }\n }]\n request_user_invalid = [{\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"\",\n \"type\": \"fetch\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"qs\",\n \"type\": \"delete\",\n \"document_id\": 1\n }),\n \"response\": {\n \"code\": 400\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 5,\n \"type\": \"delete\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"10\",\n \"type\": \"delete\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": {},\n \"type\": \"fetch\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }]\n\n request_type_invalid = [{\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"deleted\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"yysy\",\n \"document_id\": 3\n }),\n \"response\": {\n \"code\": 400,\n }\n }]\n\n request_document_invalid = [{\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"fetch\",\n \"document_id\": \"\"\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"fetch\",\n \"document_id\": \"happy\"\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": 1,\n \"type\": \"delete\",\n \"document_id\": 1000\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"1\",\n \"type\": \"fetch\",\n \"document_id\": {}\n }),\n \"response\": {\n \"code\": 400,\n }\n }]\n\n request_operation_on_deleted_file = [{\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"1\",\n \"type\": \"modify\",\n \"document_id\": 5,\n \"content\": \"\"\n }),\n \"response\": {\n \"code\": 400,\n }\n }]\n\n request_content = [{\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"1\",\n \"type\": \"modify\",\n \"document_id\": 2,\n }),\n \"response\": {\n \"code\": 400,\n }\n }, {\n \"request\": gen_request(rf, post_url, {\n \"user_id\": \"1\",\n \"type\": \"modify\",\n \"document_id\": 2,\n \"content\": \"\"\n }),\n \"response\": {\n \"code\": 400,\n }\n }]\n\n request_decode_error = [{\n \"request\": gen_request(rf, post_url, {\n \"null\"\n }),\n \"response\": {\n \"code\": 400,\n }\n }]\n requests = request_succ + request_fields_absent + request_user_invalid + \\\n request_type_invalid + request_document_invalid + \\\n request_content + request_operation_on_deleted_file + \\\n request_decode_error\n return requests", "def _doRequest(self, httpClientMethod, *args):\n try:\n resp = httpClientMethod(*args)\n return resp.json()\n except RequestException as e:\n raise checkedError(e)", "def rewrite_request(self, request):\n if not self.should_rewrite(request):\n return request\n body = request.get('body', None)\n if body is not None:\n body = json.loads(body)\n self.rewrite_body(body)\n request['body'] = body", "def result():\n # Retrieve JSON parameters data.\n data = request.get_json() or {}\n data.update(dict(request.values))\n tid = data.get(\"tid\")\n if not tid:\n raise abort(400, \"missing 'tid' data\")\n\n # Get the result (if exists and finished).\n result = tasks.process_message.AsyncResult(tid)\n # Return status and result if available.\n resp = {\n \"status\": result.status,\n \"result\": None,\n }\n if result.ready():\n resp[\"result\"] = result.get()\n return resp", "def detect_object_json():\n response = None\n try:\n logger.info(request)\n req_json = request.get_json()\n logger.info(req_json)\n\n if req_json is not None:\n base_img_url = req_json.get('base_image_url')\n\n if base_img_url is not None:\n\n base_img = cv2.imdecode(\n np.asarray(bytearray(urllib.request.urlopen(base_img_url).read()), dtype=\"uint8\"), cv2.IMREAD_COLOR)\n\n if base_img is not None:\n response = predictionService.verify(base_img=base_img)\n else:\n response = BaseResponse(code=400, reason='base_image cannot be null')\n except urllib.error.URLError as e:\n logger.error(e)\n response = BaseResponse(code=500, reason=\"Could not read from image URL provided for base and target\")\n except cv2.error as e:\n logger.error(e)\n response = BaseResponse(code=500, reason=\"URL provided is not a valid image\")\n except Exception as e:\n logger.error(e)\n response = BaseResponse(code=500, reason=\"Internal server error occurred. refer to logs\")\n\n return response.toJSON()", "def handle_request(self, given_request: Request):\n try:\n with open(given_request.input_file, mode='r', encoding='utf-8') as file:\n content = ''.join(file)\n if not self.next_handler:\n print(content)\n return True\n given_request.result = content\n return self.next_handler.handle_request(given_request)\n except FileNotFoundError as e:\n print(e)\n return False", "def process_job(self, job_request):\n\n try:\n # Validate JobRequest message\n validation_errors = [\n Error(\n code=error.code,\n message=error.message,\n field=error.pointer,\n )\n for error in (JobRequestSchema.errors(job_request) or [])\n ]\n if validation_errors:\n raise JobError(errors=validation_errors)\n\n # Add a client router in case a middleware wishes to use it\n job_request['client'] = self.make_client(job_request['context'])\n\n # Build set of middleware + job handler, then run job\n wrapper = self.make_middleware_stack(\n [m.job for m in self.middleware],\n self.execute_job,\n )\n job_response = wrapper(job_request)\n except JobError as e:\n self.metrics.counter('server.error.job_error').increment()\n job_response = JobResponse(\n errors=e.errors,\n )\n except Exception as e:\n # Send an error response if no middleware caught this.\n # Formatting the error might itself error, so try to catch that\n self.metrics.counter('server.error.unhandled_error').increment()\n return self.handle_error(e)\n\n return job_response", "def handle_request(json_items):\r\n try:\r\n steps = parse_request(json_items)\r\n answer, method = evaluate_steps(steps)\r\n return answer, method\r\n except Exception as generic_error:\r\n print(generic_error)\r\n return None, f'Error while evaluating steps: {generic_error}'", "def send_request(self, request):\n json_results = requests.get(request).json()\n\n status = json_results['status']\n\n if status == const.STATUS_OK:\n return json_results['results']\n\n self.log.warning(self.get_status_code(status))", "def submitRequest(self, json):\n uID = json.get('uID')\n request = True\n approval = \"Wait\"\n if uID:\n\n RequestsDAO().insertRequest(uID, request, approval)\n mapped_result = self.buildRequestToDict(uID, request, approval)\n return jsonify(TURN=mapped_result), 201\n\n else:\n return jsonify(Error=\"Unexpected attributes in post request\"), 400", "def convert_RequestEntity_to_Request(request_entity):\n result = ResponseEntity()\n try:\n user = User.objects.get(username=request_entity.username)\n account = Account.objects.get(user=user)\n request = Request.objects.get(id=request_entity.request_id)\n request = copy_field_RequestEntity_to_Request(request_entity, request)\n result.success = True\n result.data = request\n except Exception as e:\n print str(e)\n result.success = False\n result.message = str(e)\n finally:\n return result", "def http_request(\n self,\n method: str,\n url_suffix: str,\n json_data=None,\n params=None,\n headers=None,\n ):\n resp = Response()\n try:\n resp = super()._http_request(\n method=method,\n url_suffix=url_suffix,\n json_data=json_data,\n params=params,\n headers=headers,\n resp_type='response',\n timeout=self.request_timeout,\n ok_codes=(200, 201),\n error_handler=self.handle_error_response,\n )\n except MissingSchema:\n raise ValueError(MESSAGES['MISSING_SCHEMA_ERROR'])\n except InvalidSchema:\n raise ValueError(MESSAGES['INVALID_SCHEMA_ERROR'])\n except InvalidURL:\n raise ValueError(MESSAGES['INVALID_API_URL'])\n except DemistoException as e:\n self.handle_demisto_exception(e)\n\n if resp.ok:\n content_type = resp.headers.get('Content-Type', '')\n if content_type == CONTENT_TYPE_JSON:\n # Handle empty response\n if resp.text == '':\n return resp\n else:\n return resp.json()\n elif self.is_supported_context_type(content_type):\n return resp", "def readjson():\n uuid = request.query.get('uuid','')\n if(uuid == \"\"):\n result = { \"code\":\"fail\", \"message\":\"empty uuid\"}\n return result\n else:\n zenodo = ZenodoRequest(uuid)\n return {'data':zenodo.saveInDatabase()}", "def handle(req):\n payload = json.loads(req)\n\n if ('user_mentions' not in payload or\n 'req_id' not in payload):\n msg = '''Make sure the input has `user_mentions` and `req_id`'''\n ret = json.dumps({\"status\":\"MissingFieldError\", \"message\":msg})\n sys.exit(ret)\n\n user_mentions_str = json.dumps(payload['user_mentions'])\n req_id = payload['req_id']\n req_id_str = str(payload['req_id'])\n\n redis_server= os.getenv('REDIS_SERVER')\n redis_port = os.getenv('REDIS_PORT')\n try:\n r = redis.Redis(host = redis_server, port = redis_port)\n\n hset_reply = r.hset(req_id_str, \"user_mentions\", user_mentions_str)\n hlen_reply = r.hincrby(req_id_str, \"num_components\", 1)\n r.expire(req_id_str, os.getenv('REDIS_EXPIRE_TIMEOUT'))\n except:\n ret = {\"status\":\"ComposePostServiceUploadUserMentionsError\",\n \"errors\":[{\"message\": \"Redis failure\",\n \"exception\": str(sys.exc_info()[1]),\n \"traceback\": traceback.format_exc()}\n ]}\n sys.exit(dumps(ret))\n\n if hlen_reply == int(os.getenv('NUM_COMPONENTS')):\n ret = compose_and_upload(req_id)\n if ret['http_status_code']!= 200:\n sys.exit(dumps({\"status\":\"ComposePostServiceUploadUserMentionsError\", \"req_id\": req_id_str,\n \"errors\": [ret]}))\n else:\n return dumps({\"status\":\"success\", \"req_id\": req_id_str,\n \"compose_and_upload\": ret})\n\n return dumps({\"status\":\"success\", \"req_id\": req_id_str})", "def _process_request(self, request, response):\n ...", "def request_routine(self, url, request_method, json_data=None):\n response_obj = requests.request(request_method,\n url=url,\n headers=self.header,\n data=json.dumps(json_data),\n verify=self.verify)\n\n LOG.debug('JovianDSS: Response code: %s', response_obj.status_code)\n LOG.debug('JovianDSS: Response data: %s', response_obj.text)\n\n ret = dict()\n ret['code'] = response_obj.status_code\n\n if '{' in response_obj.text and '}' in response_obj.text:\n if \"error\" in response_obj.text:\n ret[\"error\"] = json.loads(response_obj.text)[\"error\"]\n else:\n ret[\"error\"] = None\n if \"data\" in response_obj.text:\n ret[\"data\"] = json.loads(response_obj.text)[\"data\"]\n else:\n ret[\"data\"] = None\n\n return ret" ]
[ "0.60520667", "0.5982503", "0.585078", "0.5832222", "0.5702895", "0.5620679", "0.55594456", "0.55570936", "0.555036", "0.547908", "0.54710627", "0.54656655", "0.542057", "0.54201967", "0.54093033", "0.5404972", "0.53861696", "0.534103", "0.534082", "0.5264856", "0.5264153", "0.5242653", "0.5225588", "0.52082044", "0.51915896", "0.5188761", "0.5173802", "0.5162292", "0.5153314", "0.51489073", "0.5133672", "0.5126875", "0.5126334", "0.51177377", "0.5100902", "0.5095488", "0.5076411", "0.50747204", "0.5055698", "0.5054438", "0.505222", "0.50508326", "0.50420177", "0.5033403", "0.50227606", "0.50222737", "0.50072914", "0.5003295", "0.4997783", "0.49866647", "0.49786776", "0.4978572", "0.4978572", "0.49656236", "0.49616024", "0.4954719", "0.49477306", "0.49462745", "0.49416116", "0.49412745", "0.49410498", "0.49317998", "0.49299675", "0.4924977", "0.49240044", "0.49200913", "0.49101302", "0.4907414", "0.4901708", "0.48982444", "0.4897358", "0.4876798", "0.48766744", "0.4871372", "0.48653486", "0.48649046", "0.48639", "0.4862201", "0.48573557", "0.48567215", "0.48566982", "0.48544905", "0.4846494", "0.4845277", "0.4839456", "0.48309538", "0.4830168", "0.48298457", "0.48294538", "0.48274952", "0.48196346", "0.48191693", "0.48148698", "0.48020646", "0.479871", "0.47923103", "0.47857854", "0.4782004", "0.47819236", "0.47803548" ]
0.6597384
0
Recommended maximum number of datapoints Returns int
def recommended_max_num_datapoints(self) -> int: # very large number, essentially no limit by default return 1e9
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def graph_data_size_max(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size_max or 0)", "def get_minimum_number_of_data_points(cls):\n return cls._MINIMUM_NUMBER_OF_DATA_POINTS", "def numberOfPoints(self):\n return 20000", "def data_edge_count_max(self) -> int:\n return int(self.graph_tuple_stats.data_edge_count_max or 0)", "def _max_periods(self):\n return self.data.shape[0]", "def max_positions(self):\n return int(100000.0)", "def maximum_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"maximum_count\")", "def get_max_readings( self ):\n return 2500", "def __len__(self):\n return int(np.ceil(self.max_index / float(self.batch_size)))", "def max_count(self):\n return self.config.get('max_count', 500)", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def __len__(self):\n\t\treturn min(len(self.dataset), self.opt.max_dataset_size)", "def max(self):\n return(len(self.__d))", "def maxpoints(self):\n return self[\"maxpoints\"]", "def max_trials(self) -> int:\n return self._max_trials", "def abs_max_heat_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_max_heat_setpoint_limit\", 3000)", "def get_max_iters():\n return 2000", "def get_max_rows_per_partition() -> int:\n pass", "def max_mireds(self):\n return 333", "def maxTicks(self) -> int:\n return self._maxTicks", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def get_number_of_data_points(self):\n\n log.warning(\n \"get_number_of_data_points not implemented, values for statistical measurements such as AIC or BIC are \"\n \"unreliable\",\n )\n\n return 1.0", "def adaptive_limit(self) -> int:\n return pulumi.get(self, \"adaptive_limit\")", "def abs_max_cool_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_max_cool_setpoint_limit\", 3200)", "def max_temp(self):\n return 99", "def data_size( self, groups ):\n #if len(groups) == 0:\n # return 0\n return max( groups.values() )", "def max_num_batches(self):\n return self._max_num_batches", "def max_results(self) -> float:\n return self._max_results", "def maxsize(self):\n return len(self._data)", "def __len__(self):\n return int(np.floor(len(self.dataset_df) / self.batch_size))", "def node_count_max(self) -> int:\n return int(self.graph_tuple_stats.node_count_max or 0)", "def max_known_number(self):\n return len(self.number_list)-1", "def max_positions(self):\n return int(1e5)", "def edge_count_max(self) -> int:\n return int(self.graph_tuple_stats.edge_count_max or 0)", "def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")", "def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")", "def max_instance_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_instance_count\")", "def max_instance_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_instance_count\")", "def max_temp(self):\n return 30", "def maxsize(self) -> int:\n return self._maxsize", "def max_positions(self):\n return int(1e5) # an arbitrary large number", "def max_positions(self):\n return int(1e5) # an arbitrary large number", "def max_value(self) -> int:\n return self.__max_value", "def _determine_limit(self, limit):\n\n # Note: +1 is allowed here because it allows\n # the user to fetch one beyond to see if they\n # are at the end of the list\n if not limit:\n res = conf.api_configuration.max_returned_num + 1\n else:\n res = min(conf.api_configuration.max_returned_num + 1, limit)\n\n return res", "def _get_max_answers(self):\n return max([len(x) for x in self.labels])", "def maximum_number_of_workers(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"maximum_number_of_workers\")", "def call_edge_count_max(self) -> int:\n return int(self.graph_tuple_stats.call_edge_count_max or 0)", "def max_nodes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max_nodes\")", "def get_max_log_lines(self):\n\t\treturn self.spinMaxLogLines.get_value_as_int()", "def max_steps(self) -> int:\n return pulumi.get(self, \"max_steps\")", "def data_flow_positive_node_count_max(self) -> Optional[int]:\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_max or 0)", "def quick_run_limit(self):\n try:\n return int(environment.get(\"Quick\"))\n except KeyError:\n return maxsize", "def control_edge_count_max(self) -> int:\n return int(self.graph_tuple_stats.control_edge_count_max or 0)", "def _maxValueLength(self):\n returnvalue = 0\n for row in self._value:\n for item in row:\n if (type(item) == type(float())):\n returnvalue = max(returnvalue, len('%.3f' % item))\n else:\n returnvalue = max(returnvalue, len(str(item)))\n return returnvalue", "def data_flow_steps_max(self) -> Optional[int]:\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def Max(data):\n return data.max()", "def max_individuals(self) -> int:\n return self.group_size.upper * self.groups_allowed", "def get_max_size(self):\n max_size = 0\n file = h5py.File(self.filename, 'r')\n for idx in range(len(self)):\n label = self.labels[idx]\n timestamps_group = file['/'][self.mode + '_timestamps']\n timestamps_dset = timestamps_group[label]\n size = len(timestamps_dset)\n if size > max_size: max_size = size\n file.close()\n return max_size\n\n # max_size = 0\n # for i in range(len(self)):\n # item = self[i][0]\n # if len(item) > max_size:\n # max_size = len(item)\n # return max_size", "def maxsize(self):\r\n return self._maxsize", "def graph_data_size(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size or 0)", "def n_points(self) -> int:\n return len(self.df)", "def max(self) -> int:\n return self._status['party_size'][1]", "def _get_maximum(self):\n return self._maximum", "def n_points(self) -> int:\n return len(self.all_df)", "def getUpperBound(self) -> int:\n return self.upper_bound", "def load_max(self):\n return max(self.load_samples)", "def max_epochs(self):\n return self.trainer_cfg[\"max_num_epochs\"]", "def graph_data_size_min(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size_min or 0)", "def required_memory_maximum(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"required_memory_maximum\")", "def maximum_item_count(self):\n return self._maximum_item_count", "def getMaxValue(self):\n # TODO: make this more consistent accross versions\n # This was a \"fix\" when we started supported PS5000a\n return self.MAX_VALUE", "def get_max_score(self):\r\n return sum(self.maxpoints.values())", "def get_max_density(self):\n max_density = str(self.density.index(min(self.density)) + 1)\n print(max_density)\n return max_density", "def getmaxnumvar(self): # 3\n res,resargs = self.__obj.getmaxnumvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumvar_return_value = resargs\n return _maxnumvar_return_value", "def _maximum(self) -> float:\n return self._config[CONF_MAX]", "def max(self):\r\n\t\treturn max(self.sample)", "def max_counts(self):\n\n return np.nanmax(self.pre_proc_data)", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def nr_points(self):\n return len(self.x)", "def getMaximumDistances(self):\n pass", "def max(self):\r\n return np.max(self.data_array)", "def max(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max\")", "def max(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max\")", "def fail_max(self) -> int:\n return self._fail_max", "def get_max_rois(self):\n \n maxsize = 0\n for index in self.SampleID:\n rois = self.__getrois__(index);\n maxsize = max(maxsize, rois.shape[0])\n \n return maxsize", "def limit(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"limit\")", "def maxTouchPoints(self):\n return 1", "def max_findings(self) -> float:\n return pulumi.get(self, \"max_findings\")", "def max_telemetry_items_per_second(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_telemetry_items_per_second\")", "def max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_node_count\")", "def prepared_max(self) -> int:\n return self._prepared.prepared_max", "def maximum_number_of_workers(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"maximum_number_of_workers\")", "def ExpectedMaxBatchSizes(self, run_params):\n return self.max_batch_sizes", "def n_points(self):\n return self.points.shape[0]", "def max_positions(self):\n return 1e6 # an arbitrary large number" ]
[ "0.74292016", "0.74018806", "0.7281152", "0.7237732", "0.7090521", "0.6958116", "0.6913622", "0.68758655", "0.6854837", "0.68302363", "0.67935854", "0.6772748", "0.6769792", "0.67452073", "0.67428017", "0.67161715", "0.66959566", "0.668196", "0.66803277", "0.6658686", "0.66514474", "0.66514474", "0.66514474", "0.66514474", "0.66514474", "0.6631744", "0.65738827", "0.6570087", "0.65681887", "0.6564514", "0.6558092", "0.6553032", "0.6551757", "0.65513134", "0.6535992", "0.6491977", "0.6489239", "0.6469231", "0.64688015", "0.64688015", "0.64624745", "0.64624745", "0.6449456", "0.64485943", "0.6447626", "0.6447626", "0.6443373", "0.6433404", "0.6432301", "0.6424528", "0.6424525", "0.64139235", "0.6400098", "0.6394409", "0.63810486", "0.63761216", "0.63695174", "0.6365308", "0.63584715", "0.6356129", "0.6347987", "0.6345646", "0.63392884", "0.6334323", "0.6333982", "0.6330728", "0.6326488", "0.63201654", "0.63168555", "0.63118964", "0.63117635", "0.6306937", "0.62957036", "0.6281506", "0.62796897", "0.6259222", "0.6252769", "0.62502277", "0.6248124", "0.6246856", "0.62467045", "0.6241582", "0.6240394", "0.6238567", "0.6236961", "0.6234053", "0.62335706", "0.62335706", "0.6224959", "0.62244624", "0.62234145", "0.62186193", "0.62182367", "0.62172747", "0.62155986", "0.6215395", "0.6215009", "0.6214383", "0.6212473", "0.621217" ]
0.87322336
0
Fits function y=f(x) given training pairs (x_train, y_train).
def _fit(self, x_train, y_train, x_valid, y_valid, regressor_callback=None):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, X, y):", "def train(self, x_train, y_train, x_val, y_val):\n pass", "def train(self, X, y):\n pass", "def train(self, X, y):\n lagrange_multipliers = self._compute_multipliers(X, y)\n return self._construct_predictor(X, y, lagrange_multipliers)", "def fit(self, X, y):\n self.X_train = X\n self.y_train = y", "def train(self, X, y):\n self.X_train = X\n self.y_train = y", "def train(self, X, y):\n self.X_train = X\n self.y_train = y", "def train(self, X, y):\n self.model.fit(X, y)", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def learn(self, Xtrain, ytrain):", "def fit(self, x, y):\r\n\r\n self.train_x = x\r\n self.train_y = y\r\n self.__find_psi__()", "def train(self, X, y):\n self.X_train = X\n self.y_train = y\n assert isinstance(X, np.ndarray)\n assert isinstance(y, np.ndarray)", "def fit(self, X_train, y_train):\n return self", "def train(self, X, y):\r\n # the nearest neighbor classifier simply remembers all the training data\r\n self.Xtr = X\r\n self.ytr = y", "def train(self, X, y):\n # the nearest neighbor classifier simply remembers all the training data\n self.Xtr = X\n self.ytr = y", "def train(self, X, y):\n # the nearest neighbor classifier simply remembers all the training data\n self.Xtr = X\n self.ytr = y", "def trainData(self, X, y, NeuralNet, epochs):", "def fit(self, X, y, X_val=None, y_val=None):\n\n # fit function has to return an instance of itself or else it won't work with test.py\n # adding extra 1s and reshaping y\n n_samples = X.shape[0]\n n_features = X.shape[1]\n temp = np.empty((n_samples, n_features+1), dtype=float)\n for i in range(n_samples):\n temp[i] = np.append(X[i], 1)\n X = temp\n\n try: # if validation set is present\n if X_val.all() != None and y_val.all() != None:\n # adding extra 1s and reshaping y\n val_n_samples = X_val.shape[0]\n val_n_features = X_val.shape[1]\n temp = np.empty((val_n_samples, val_n_features+1), dtype=float)\n for i in range(val_n_samples):\n temp[i] = np.append(X_val[i], 1)\n X_val = temp\n except AttributeError:\n pass\n\n self.theta = np.zeros((n_features+1,1)) # these are the model parameters\n self.train_history = np.zeros(self.n_iterations)\n self.val_history = np.zeros(self.n_iterations)\n y = y.reshape((-1,1))\n\n if self.cost_fn == 'Batch':\n self.batch_gradient_descent(X, y, X_val, y_val) # Batch\n elif self.cost_fn == 'Stochastic':\n self.stochastic_gradient_descent(X, y, X_val, y_val) # Stochastic\n\n h_theta_of_x = self.sigmoid(np.dot(X, self.theta)) # predicted value\n predicted = np.around(h_theta_of_x) # rounding off according to 0.5\n accuracy = (predicted == y).all(axis=1).mean() # calculating accuracy of model\n train_cost = self.log_loss(n_samples, h_theta_of_x, y)\n\n print('training loss', train_cost)\n print('training accuracy:', accuracy)\n\n # fit function has to return an instance of itself or else it won't work with test.py\n return self", "def train(self, X_train, y_train):\n self.model.fit(X_train, y_train)", "def fit(self, X, Y):\n ...", "def fit_and_predict(self, X_train, y_train, X_test, y_test):\n if self.feature_transform_func:\n X_train, X_test = self.feature_transform_func(X_train, X_test)\n\n self.fit(X_train, y_train)\n y_predict = self.predict(X_test)\n return self.Acu_eval(y_predict, y_test)", "def fit(self, x, y):\n self._pred.fit(x, y)\n\n return self", "def fit(self, X, y, max_iter=MAX_ITER):\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n start = time.time()\n cost_function = 0\n X = self.normalize_data(X)\n X = self.add_bias(X)\n for t in range(n_tasks):\n #print('Training {} task with lasso regression'.format(t))\n lasso = Fista(self, self.lambda_1)\n w = lasso.fit(xk=W[:, t], A=X[t], b=y[t], ind=self.groups,\n max_iter=max_iter)\n W[:, t] = w\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def fit(self, X,y):\n pass", "def train_model(X_train, y_train, f_classifier: Callable[[], list]=None, seed:int=0, param_dict:dict={}) -> list:\n if seed:\n param_dict['random_state'] = seed\n classifier_func = f_classifier or LogisticRegression\n\n classifier = classifier_func(**param_dict)\n classifier.fit(X_train, y_train)\n return classifier", "def fit(self, X_train: np.ndarray, y_train: np.ndarray) -> None:\n self.X_train = X_train\n self.y_train = y_train", "def train(self,features,y):\r\n \r\n if self.learn_type == \"nn\":\r\n #generate supervised dataset\r\n return(self.learner.train_on_batch(features,y))\r\n elif self.learn_type == \"linear\":\r\n grad = 0\r\n n = len(features)\r\n for i in range(n):\r\n #sum over the instances to get an estimate of the gradient\r\n print((y[i] - self.learner.activate(features[i])))\r\n grad -= (y[i] - self.learner.activate(features[i])) * \\\r\n self.learner.grad(features[i])\r\n grad /= n\r\n #update paramter\r\n param = np.copy(self.learner.param)\r\n self.learner.param = param - self.alpha * grad\r\n #print(self.learner.param)\r", "def fit(self, X_train, y_train, X_test=None, y_test=None):\n\n self.initialize_weights_and_bias(X_train)\n\n # for progress formatting\n epoch_strlen = len(str(self.epochs))\n self.eval_ = {'cost_train': [], \n 'cost_test': [], \n 'train_preform': [], \n 'valid_preform': [],\n 'train_preform_r2': [], \n 'valid_preform_r2': []}\n\n # iterate over training epochs\n for epoch in range(self.epochs):\n\n # Includes forward + backward prop.\n self._minibatch_sgd( X_train, y_train)\n\n # Evaluation after each epoch during training\n z_h, a_h, z_out, a_out = self._forwardprop(X_train)\n _, _, _, a_out_test = self._forwardprop(X_test)\n\n y_train_pred = self.predict(X_train)\n y_test_pred = self.predict(X_test)\n\n y_test = y_test.reshape((len(y_test),1))\n y_train = y_train.reshape((len(y_train),1))\n\n y_test = standardicing_responce(y_test)\n y_test_pred = standardicing_responce(y_test_pred)\n \n y_train = standardicing_responce(y_train)\n y_train_pred = standardicing_responce(y_train) \n \n train_preform = mean_squared_error(y_train, y_train_pred) \n valid_preform = mean_squared_error(y_test, y_test_pred)\n \n train_preform_r2 = r2_score(y_train, y_train_pred) \n valid_preform_r2 = r2_score(y_test, y_test_pred)\n\n self.eval_['train_preform'].append(train_preform)\n self.eval_['valid_preform'].append(valid_preform)\n self.eval_['train_preform_r2'].append(train_preform_r2)\n self.eval_['valid_preform_r2'].append(valid_preform_r2)\n\n # Calculate the error in the output\n self.model_error = np.subtract(y_train, y_train_pred)\n \n return self", "def fit(self, X, y, alpha, n_epochs):\n y = self.__one_hot(y)\n \n # perform training epochs\n for i in range(n_epochs):\n print(\"Epoch\", i)\n # stochastic gradient descent\n for j in range(len(X)):\n self.__backpropagation(X[j], y[j], alpha)", "def train(self,X,y):\n #the nearest neighbour classifier simply remembers all the training data\n self.Xtr=X\n self.ytr=y", "def train(self, x, y, epochs=1, verbose=False):\n numTrainingPoints = len(x)\n for e in range(epochs):\n # Set accuracy at beginning of epochs to 0s\n accuracy = 0\n # Compute the output for all training points\n allOutputs = self.getOutputNoActivation(x)\n for i in range(numTrainingPoints):\n # Increment iterations for learning rate scheduling\n self.iterations += 1\n # Calculate the new learning rate from scheduling\n lr = self.iterations ** -1\n # Grab the input for the specific training point\n trainingPointInputs = x[i]\n # Grab the output for the specific training point\n trainingPointOutput = allOutputs[i]\n # Get the target outputs for the specific training point\n targets = y[i]\n # Compare each output 1 by 1\n for outputIndex in range(len(trainingPointOutput)):\n # Grab specific output and corresponding target value\n targetVal = targets[outputIndex]\n outputVal = trainingPointOutput[outputIndex]\n # If the outputs match, increment accuracy\n if targetVal == outputVal:\n accuracy += 1\n continue\n # Else, update the weights\n else:\n # For each input weight, compute its delta change, and then apply the change\n for inputWeightIndex in range(len(self.weights[outputIndex])):\n # If the inputWeightIndex is in the range of values for inputs, use the input at that index\n if inputWeightIndex < len(trainingPointInputs):\n trainingPointInput = trainingPointInputs[inputWeightIndex]\n # Else, that value is the bias, and the input should be constant 1.0\n else:\n trainingPointInput = 1.0\n # Compute delta w and apply the change\n inputNorm = 0\n for tpi in trainingPointInputs:\n inputNorm += tpi ** 2\n inputNorm = math.sqrt(inputNorm)\n deltaW = lr * (targetVal - outputVal) * trainingPointInput / inputNorm**2\n self.weights[outputIndex, inputWeightIndex] += deltaW\n # Compute accuracy\n accuracy /= numTrainingPoints \n # If verbose == True, print accuuracy for each training epoch\n if verbose:\n print('Epoch ' + str(e+1) + ' / ' + str(epochs) + ' Accuracy: ' + str(accuracy))\n # Return final accuracy\n return accuracy", "def fit(self, X, y):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n cost_function = 0\n start = time.time()\n for t in range(n_tasks):\n #print('Training task {} with group lasso'.format(t))\n fista = Fista(self, self.lambda_1)\n w_opt = fista.fit(W[:, t], X[t], y[t], self.groups,\n max_iter=self.max_iter)\n W[:, t] = w_opt\n cost_function += self.cost(X[t], y[t], W[:, t])\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def train_step(x, y):\n with tf.GradientTape() as tape:\n predictions = self.model(x)\n loss = self.loss_object(y, predictions)\n gradients = tape.gradient(loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n\n self.train_loss(loss)\n self.train_acc(y, predictions)", "def fit(self, x, y, epochs=2000, x_test=None, y_test=None, optimizer='adam', learning_rate=0.1,\n load_best_weights=False, val_freq=1000, log_freq=1000, verbose=1):\n\n x = self.tensor(x)\n y = self.tensor(y)\n\n self.start_time = time.time()\n self.prev_time = self.start_time\n\n if optimizer == 'adam':\n self.train_adam(x, y, epochs, x_test, y_test, learning_rate, val_freq, log_freq, verbose)\n elif optimizer == 'lbfgs':\n self.train_lbfgs(x, y, epochs, x_test, y_test, learning_rate, val_freq, log_freq, verbose)\n\n if load_best_weights is True:\n self.load_weights()", "def train(self, X, y):\n # the nearest neighbor classifier simply remembers all the training data\n self.Xtr = X\n self.ytr = y", "def train(self, train_X, train_y):\n if self.feat_sel:\n train_X = self.do_feat_sel(train_X, train_y)\n\n train_X, train_y = self.sample.fit_resample(train_X, train_y)\n self.clf.fit(train_X, train_y)", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def train(self, x, y):\n h = self.neurons_output(x, self.w)\n pseudoinverse = np.linalg.pinv(h)\n self.c = np.dot(pseudoinverse, y)\n return", "def fit(self, X, y, max_iter=MAX_ITER):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n start = time.time()\n cost_function = 0\n for t in range(n_tasks):\n #print('Training {} task with lasso regression'.format(t))\n lasso = Lasso(alpha=self.lambda_1, positive=self.positive, max_iter=max_iter)\n lasso.fit(X[t], y[t])\n W[:, t] = lasso.coef_\n cost_function += np.linalg.norm(np.dot(X[t], W[:, t]) - y[t]) \\\n + sum(abs(W[:, t]))\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def train(self, train_x,train_y):\n self._model.fit(train_x,train_y,batch_size=8,epochs = self._epochSize)\n return None", "def fit(self, x, y):\n def initiate_theta(dim):\n self.theta = np.zeros(dim)\n # print('self.theta initiated is {}'.format(self.theta))\n \n def implement_sigmoid(x):\n if self.theta is None:\n initiate_theta(x.shape[1])\n z = np.matmul(np.transpose(self.theta), np.transpose(x))\n return 1/(np.ones(x.shape[0]) + np.exp(-z))\n \n def implement_partial_loss(x, y):\n return -np.matmul(np.transpose(y - implement_sigmoid(x)), x)/x.shape[0]\n \n def implement_transposed_hess(x):\n sigmoid_hadamard = implement_sigmoid(x) * (np.ones(x.shape[0]) - implement_sigmoid(x))\n hess2 = np.diag(sigmoid_hadamard)\n hess = np.matmul(hess2,x)\n hess = np.matmul(np.transpose(x),hess)/x.shape[0]\n hess_inverse = np.linalg.inv(hess)\n return hess_inverse\n \n def train(x, y):\n count = 0\n if self.theta is None:\n initiate_theta(x.shape[1])\n while count < self.max_iter:\n if self.verbose:\n loss_y1 = np.matmul(np.transpose(y), np.log(implement_sigmoid(x)))\n loss_y0 = np.matmul(np.transpose(np.ones(x.shape[0]) - y), np.log(np.ones(x.shape[0]) - implement_sigmoid(x)))\n loss = -(loss_y1 + loss_y0 )/x.shape[0]\n print('Average empirical loss for step {} is {}'.format(count, loss))\n delta = np.matmul(implement_transposed_hess(x), implement_partial_loss(x, y))\n new_theta = self.theta - delta * self.step_size\n delta_theta = np.linalg.norm(new_theta - self.theta)\n # print('delta is {}'.format(delta_theta))\n if delta_theta < self.eps:\n return new_theta\n else:\n self.theta = new_theta\n count += 1\n return self.theta\n \n return train(x, y)", "def fit(self, X, y):\n\t\tself._initialize_weights(X.shape[1])\n\t\tself.cost_ = []\n\n\t\tfor i in range(self.n_iter):\n\t\t\tif self.shuffle:\n\t\t\t\tX, y = self._shuffle(X,y)\n\t\t\tcost = []\n\t\t\t#calculate for each sample\n\t\t\tfor xi, target in zip(X, y):\n\t\t\t\tcost.append(self._update_weights(xi, target))\n\t\t\tave_cost = sum(cost)/len(y)\n\t\t\tself.cost_.append(ave_cost)\n\t\treturn self", "def train_on_batch(\n network,\n optimizer,\n loss_fn,\n X,\n y_target,\n current_epoch\n):\n training_stats = ValueRegistry.get_instance(\"training_stats\")\n training_stats[\"current_epoch\"] = current_epoch\n optimizer.zero_grad()\n # Do the forward pass to predict the primitive_parameters\n y_hat = network(X)\n loss = loss_fn(y_hat, y_target)\n # Do the backpropagation\n loss.backward()\n nn.utils.clip_grad_norm_(network.parameters(), 1)\n # Do the update\n optimizer.step()\n\n return (\n loss.item(),\n [x.data if hasattr(x, \"data\") else x for x in y_hat],\n )", "def fit(self, X, y):\n\n # compute number of minibatches for training, validation and testing\n input_size = X.shape.eval()[0]\n num_training_batches = input_size/self.mini_batch_size\n\n # define the (regularized) cost function, symbolic gradients, and updates\n l2_norm_squared = sum([(layer.w**2).sum() for layer in self.layers])\n cost = self.layers[-1].cost(self)+\\\n 0.5*self.l2*l2_norm_squared/num_training_batches\n grads = T.grad(cost, self.params)\n updates = [(param, param-self.eta*grad)\n for param, grad in zip(self.params, grads)]\n\n # define functions to train a mini-batch, and to compute the\n # accuracy in validation and test mini-batches.\n i = T.lscalar() # mini-batch index\n train_mb = theano.function(\n [i], cost, updates=updates,\n givens={\n self.x:\n X[i*self.mini_batch_size: (i+1)*self.mini_batch_size],\n self.y:\n y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]\n })\n \n # Do the actual training\n for epoch in xrange(self.epochs):\n print(\"Training epoch number: {0}\".format(epoch))\n for minibatch_index in xrange(num_training_batches):\n iteration = num_training_batches*epoch+minibatch_index\n cost_ij = train_mb(minibatch_index)", "def train(self, x, y, lr):\n self.forward(x)\n self.backward(y)\n self.update(lr)\n return", "def fit(self, X, y):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n cost_function = 0\n start = time.time()\n for t in range(n_tasks):\n fista = Fista(self, self.lambda_1)\n w_opt = fista.fit(W[:, t], X[t], y[t], self.groups,\n max_iter=self.max_iter)\n W[:, t] = w_opt\n cost_function += self.cost(X[t], y[t], W[:, t])\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def fit ( self, X: np.ndarray, y: np.ndarray ):\n \n self.X = X\n self.y = y", "def fit(self, X, y):\n self.X_data = X\n self.y = y", "def fit(self, X, y=None):\n # Check that X and y have correct shape\n X, y = check_X_y(X, y)\n # Store the classes seen during fit\n self.classes_ = unique_labels(y)\n \n if self.shuffle:\n X, y = shuffle(X, y)\n\n self.X_ = X\n self.y_ = y\n \n self._initialize_map()\n self._create_location_vectors()\n self._initialize_sigma()\n \n for i in range(self.num_epoch):\n j= 0\n print(f\"Epoch:{i}\")\n while(j < self.X_.shape[0]):\n current_batch_end = j+self.batch_size if j+self.batch_size < self.X_.shape[0] else self.X_.shape[0]\n current_batch = self.X_[j:current_batch_end]\n self._feedforward(current_batch)\n self._backprop(j, self.X_.shape[0], current_batch)\n j = current_batch_end \n return self", "def fit(self, X, y, max_iter=MAX_ITER):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n start = time.time()\n cost_function = 0\n for t in range(n_tasks):\n #print('Training {} task with lasso regression'.format(t))\n lasso = LogisticRegression(C=self.lambda_1,\n penalty='l1',\n max_iter=max_iter)\n lasso.fit(X[t], y[t])\n W[:, t] = lasso.coef_\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def train(self, X_train, y_train,X_test,y_test):\n K = np.arange(self.epochs)\n F= y_train.shape[0]\n for s in tqdm(range(self.epochs)):\n for i in range( 0, F, 10):\n grad_W = self.calc_gradient(self.W, np.insert(X_train[i:i+10], 0, 1, axis=1), y_train[i:i+10], self.reg_const)\n self.W = self.W - self.alpha * grad_W\n K[s] = self.get_acc(self.predict(X_test),y_test)\n return K", "def fit(self, x, y, **kwargs):\n if self.build_fn is None:\n self.model = self.__call__(**self.filter_sk_params(self.__call__))\n elif (not isinstance(self.build_fn, types.FunctionType) and\n not isinstance(self.build_fn, types.MethodType)):\n self.model = self.build_fn(\n **self.filter_sk_params(self.build_fn.__call__))\n else:\n self.model = self.build_fn(**self.filter_sk_params(self.build_fn))\n\n loss_name = self.model.loss\n if hasattr(loss_name, '__name__'):\n loss_name = loss_name.__name__\n if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:\n y = to_categorical(y)\n\n fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))\n fit_args.update(kwargs)\n\n history = self.model.fit(x, y, **fit_args)\n\n return history", "def train(self,X,y):\n self.X_train = X\n self.y_train = y\n self.class_labels = np.unique(self.y_train)", "def fit(self, X, y):\n y = np.array(y)\n rows, columns = X.shape\n num_batches = int(np.ceil(rows / self.batch_size))\n batches = np.arange(num_batches + 1) * self.batch_size\n indxs = np.arange(rows)\n\n self.W = np.zeros(columns)\n self.b = rng.random(1)\n\n # stochastic gradient descent logic\n for _ in range(self.num_epochs):\n rng.shuffle(indxs)\n\n for i, j in zip(batches[0:-1], batches[1:]):\n batch_indxs = indxs[i:j]\n x_batch = X[batch_indxs]\n y_batch = y[batch_indxs]\n self.update(x_batch, y_batch)\n\n # track loss history during training\n self.loss_history.append(self.loss(self.predict_proba(X), y))\n self.accuracies.append(self.accuracy(X, y))", "def fit(self,X,y):\n self.X = X\n self.y = y\n return self", "def predict(x_train, y_train, x_test, y_test, fn, params):\n y_train_predicted = fn(x_train, None, *params)\n y_train_predicted = (y_train_predicted >= 0.5) * 1\n y_test_predicted = fn(x_test, None, *params)\n y_test_predicted = (y_test_predicted >= 0.5) * 1\n\n train_acc = np.sum(y_train_predicted == y_train) / x_train.shape[0]\n test_acc = np.sum(y_test_predicted == y_test) / x_test.shape[0]\n print('train accuracy =', train_acc)\n print('test accuracy =', test_acc)\n scatter_plot(x_train, y_train_predicted, x_test, y_test_predicted, 'predicted 0', 'predicted 1')", "def fit(self, X, y, X_val=None, y_val=None):\n #Adding an extra column of 1s for constant term\n n_samples = X.shape[0]\n n_features = X.shape[1]\n temp = np.empty((n_samples, n_features+1), dtype=float) # adding extra 1s to X\n for i in range(n_samples):\n temp[i] = np.append(X[i], 1)\n X = temp\n y = y.reshape((-1,1)) # reshaping y \n\n try: # if validation set is present\n if X_val.all() != None and y_val.all() != None:\n # adding extra 1s and reshaping y\n val_n_samples = X_val.shape[0]\n val_n_features = X_val.shape[1]\n temp = np.empty((val_n_samples, val_n_features+1), dtype=float)\n for i in range(val_n_samples):\n temp[i] = np.append(X_val[i], 1)\n X_val = temp\n y_val = y_val.reshape((-1,1))\n except AttributeError:\n pass\n\n self.theta = np.zeros((n_features+1,1)) # these are the model parameters\n self.train_history = np.zeros(self.n_iterations) # initialising array store train loss history\n self.val_history = np.zeros(self.n_iterations) # initialising array store val loss history\n \n if self.cost_fn == 'RMSE':\n self.gradient_descent_rmse(X, y, X_val, y_val) # RMSE gradient call\n h_theta_of_x = np.dot(X, self.theta)\n cost = self.RMSE(n_samples, h_theta_of_x, y) # final RMSE loss\n # print('training_loss', cost)\n\n elif self.cost_fn == 'MAE':\n self.gradient_descent_mae(X, y, X_val, y_val) # MAE gradient call\n h_theta_of_x = np.dot(X, self.theta)\n cost = self.MAE(n_samples, h_theta_of_x, y) # final MSE loss\n # print('training_loss', cost)\n\n\n # fit function has to return an instance of itself or else it won't work with test.py\n return self", "def fit(self, trainA, trainY , trainX = None, valA = None, valX = None, valY = None):\n \n n_nodes = trainA.shape[1]\n \n if self.feature_type == 'identity':\n n_features = n_nodes\n trainX = np.eye(n_features)\n n_classes = trainY.shape[1]\n feature_type = self.feature_type\n n_hidden = self.n_hidden\n act_func = self.act_func\n reg_type = self.reg_type\n reg_beta = self.reg_beta\n drop_rate = self.drop_rate\n \n \n #Build the model\n self.build_model(n_nodes, n_features, n_classes, feature_type, n_hidden,\n act_func, reg_type, reg_beta, drop_rate)\n \n #Test the model\n prediction = self._predict(trainA, trainX)\n \n #Train the model\n # self._train_model(trainA, trainX, trainY, valA, valX, valY)\n \n return prediction", "def train_fn(self, x_train, y_train, compute_error):\n def train(theta_value, theta):\n theta.set_value(theta_value, borrow=True)\n return compute_error(x_train, y_train)\n\n return train", "def fit(self, X, y):\n if self._intercept:\n X = self._add_intercept(X)\n\n if self._theta is None or (self._theta and self._theta.shape != X.shape[1]):\n self._theta = np.zeros(X.shape[1]) # Initialize parameters\n\n for n in range(self._n_iter):\n h = sigmoid(np.dot(X, self._theta))\n dW = np.dot(X.T, (y-h))\n self._theta += self._lr * dW\n if (n % (self._n_iter/10) == 0): # Print every 10% of total training iterations\n print(\"Train Accuracy: \", binary_accuracy(y, h)) \n print(\"Train Loss: \", binary_cross_entropy(y, h))", "def _train(self,\n Xs: Array,\n Ys: Array,\n metric: Callable = None,\n **kwargs):\n self.model.fit(Xs, Ys, **kwargs)\n return None", "def fit(self, X):", "def train(self, trainX, trainY):\n self.model = KNeighborsRegressor(n_neighbors=5)\n self.model.fit(trainX, trainY)", "def train(X, y, W1, W2):\n \n # Forward propagation\n \n # Dot product of X (input) and first set of 3x2 weights\n Z2 = np.dot(X, W1)\n # activation function\n A2 = sigmoid(Z2) \n # dot product of hidden layer (Z2) and second set of 3x1 weights\n Z3 = np.dot(A2, W2) \n # final activation function\n A3 = sigmoid(Z3) \n\n \n # Back propagation\n \n # error in output\n o_error = y - A3 \n # applying derivative of sigmoid to error\n o_delta = o_error*sigmoid_gradient(A3) \n\n\n # z2 error: how much our hidden layer weights contributed to output error\n z2_error = o_delta.dot(W2.T) \n # applying derivative of sigmoid to z2 error\n z2_delta = z2_error*sigmoid_gradient(A2) \n\n\n # Update the weights\n W1 += X.T.dot(z2_delta) \n W2 += A2.T.dot(o_delta) \n\n return A3", "def fit(self,X_train,y_train):\n assert X_train.shape[0] == y_train.shape[0], \\\n \"the size of X_train must equal to the size of y_train\"\n assert self.k <= X_train.shape[0],\\\n \"the size of X_train must be at least k\"\n self._X_train =X_train\n self._y_train =y_train\n\n return self", "def fit(self, X, y):\n self.model_x = X\n self.model_y = y", "def fit(self, x, y):\n # *** START CODE HERE ***\n num_examples = x.shape[0]\n num_features = x.shape[1]\n iteration = 1\n if self.theta == None:\n self.theta = np.zeros((num_features,))\n while iteration <= self.max_iter:\n h_theta = np.dot(x, self.theta)\n g_theta = self.sigmoid(h_theta)\n J_cost = -np.mean(y*np.log(g_theta) + (1 - y)*np.log(1 - g_theta))\n H = 1/num_examples*(np.dot(np.transpose(g_theta*(1-g_theta))*np.transpose(x), x))\n J_prime = - 1/num_examples*np.dot(np.transpose(y - g_theta), x)\n d_theta = - np.linalg.solve(H, J_prime)\n self.theta += d_theta\n if np.linalg.norm(d_theta, 1) < self.eps:\n break\n if self.verbose:\n print(\"Loss value: \", J_cost)\n iteration += 1\n # *** END CODE HERE ***", "def fit(self, X, y) :\n\n ### ========== TODO : START ========== ###\n # part b: set self.probabilities_ according to the training set\n # create a dictionary of frequencies and convert to probabilities\n frequencies = Counter(y)\n self.probabilities_ = {key:float(value)/len(y) for (key,value) in frequencies.items()}\n ### ========== TODO : END ========== ###\n\n return self", "def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None:\n # Stores training data and targets for use in derived classes\n self.x_train = x_train\n self.y_train = y_train", "def fit(self, X, y, **fit_params):\n ...", "def fit(self, X, y):\n self.__X = X\n self.__y = y\n self.__trained = True", "def fit(self, X, y=None):\n # train on a training dataset\n self.logger.info(\n self.__name__ + ' is trained on {:d} samples with {:d} features.'.format(X.shape[0], X.shape[1]))\n pass", "def fit(self,X_train,y_train):\r\n \r\n self.X_train_data=X_train.reset_index(drop=True)\r\n self.y_train_data=y_train.reset_index(drop=True)\r\n \r\n temp_fitted_model=[]\r\n for each_model in self.model_list:\r\n each_model.fit(self.X_train_data,self.y_train_data)\r\n temp_fitted_model.append(each_model)\r\n \r\n self.fitted_model=temp_fitted_model", "def fit_predict(self, train_x: pd.DataFrame, train_y: pd.Series, test_x: pd.DataFrame, test_y: pd.Series) -> dict:\n self.evaluator.fit(train_x, train_y, test_x, test_y)\n predictions = self.evaluator.predict(test_x)\n print(predictions)\n metrics = metrics_stat(predictions, test_y)\n return metrics", "def ytrain(self,value: list)->None:", "def train(self, train_x, train_y, optimzer='adam'):\n self.history = self.model.fit(train_x, train_y, epochs=self.epochs, batch_size=self.batch_size,\n verbose=self.verbose, shuffle=False)", "def train(self, xFeat, y):\n if type(xFeat) != np.ndarray: # if the data isn't a numpy array, eg dataframe, convert to numpy\n self.xTrain = xFeat.to_numpy()\n else:\n self.xTrain = xFeat\n if type(y) != np.ndarray:\n self.yTrain = y.to_numpy()\n else:\n self.yTrain = y\n # set the train set columns to [1 x classification, d x features, 1 x placeholder for distances]\n self.trainSet = np.column_stack((np.atleast_1d(self.yTrain), self.xTrain, np.empty(len(self.xTrain))))\n return self", "def train(self, X_train, y_train):\n\n self.model_pipeline.fit(X_train, y_train)", "def train(self, x, y):\n len_dim_0 = x.shape[0]\n x = np.reshape(x, (len_dim_0, 784))\n with tf.GradientTape() as tape:\n tape.watch(self.layers)\n y_out = self.forward(x)\n loss = self.mean_squared_error(y_out, y)\n grads = tape.gradient(loss, self.layers)\n temp_layers = []\n for layer, grad in zip(self.layers, grads):\n new_w = layer.weights - (self.learning_rate * grad.weights)\n new_b = layer.biases - (self.learning_rate * grad.biases)\n temp_layers.append(self.Layer(new_w, new_b))\n self.layers = temp_layers\n\n return loss", "def _fit_function(self,x,a,b):\n return b + a*x", "def fit(self, X, y):\n # Code to fit the model.\n\n train_stuff = self._vectorizer.fit_transform(X, y)\n\n self._classifier.fit(train_stuff, y = y)\n\n\n return self", "def fit(self, X, y):\n\t\trgen = np.random.RandomState(self.random_state)\n\t\tself.w_ = rgen.normal(loc=0.0, scale=0.01, size=1+X.shape[1])\n\t\tself.cost_ = []\n\n\t\tfor i in range(self.n_iter):\n\t\t\tnet_input = self.net_input(X)\n\t\t\toutput = self.activation(net_input)\n\t\t\terrors = y - output\n\t\t\t#update weights\n\t\t\tself.w_[1:] += self.eta * X.T.dot(errors)\n\t\t\tself.w_[0] += self.eta * errors.sum()\n\t\t\tcost = (errors**2).sum() / 2.0\n\t\t\tself.cost_.append(cost)\n\t\treturn self", "def ytrain(self,)->list:", "def fit(self, X_train, y_train, X_test, y_test, n_epochs=1):\n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n init.run()\n for epoch in range(n_epochs):\n # Create Batches with size of BATCH_SIZE\n X_train_batches, y_train_batches = generate_random_batches(X_train, y_train, self.batch_size)\n # print(\"-------------------X_train shape: \", X_train.shape)\n # print(\"-------------------y_train shape: \", y_train.shape)\n\n # Iterage through the batches and performn training each time\n for X_batch, y_batch in zip(X_train_batches, y_train_batches):\n # print(\"X_batch shape: \", X_batch.shape)\n # print(\"y_batch shape: \", y_batch.shape)\n # print(X_batch)\n # print(y_batch)\n # Calculate Next Gradient Descent Step\n feed_dict = {self.X_tf: X_batch, self.y_tf: y_batch, self.keep_prob: 0.5}\n summary, _ = sess.run([self.merged_summaries, self.training_op], feed_dict=feed_dict)\n self.writer_train.add_summary(summary, epoch)\n\n # Log Accuracy of Test Data\n feed_dict = {self.X_tf: X_test, self.y_tf: y_test, self.keep_prob: 0.5}\n summary, acc = sess.run([self.merged_summaries, self.accuracy], feed_dict=feed_dict)\n self.writer_test.add_summary(summary, epoch)\n\n # if epoch % 1 == 0:\n acc_train = self.accuracy.eval(feed_dict={self.X_tf: X_train, self.y_tf: y_train, self.keep_prob: 1.0})\n acc_test = self.accuracy.eval(feed_dict={self.X_tf: X_test, self.y_tf: y_test, self.keep_prob: 1.0})\n print(\"Epoch: \", epoch, \"Train accuracy:\", acc_train, \"Test accuracy:\", acc_test)\n\n #Save the final model\n self.saver.save(sess, self.log_dir + '/model')", "def fit(self, X_train, y_train):\n if X_train.shape[0] != y_train.shape[0]:\n raise AssertionError\n\n X_train = np.hstack((np.ones((X_train.shape[0], 1)), X_train))\n rows = X_train.shape[0]\n cols = X_train.shape[1]\n self._weight = np.random.normal(size=cols)\n for epoch in range(self._max_iter):\n data = np.hstack((X_train, y_train.reshape((-1, 1))))\n np.random.shuffle(data)\n X_train = data[:, : -1]\n y_train = data[:, -1].flatten()\n for i in np.arange(0, rows, self._batch_size):\n batch = X_train[i:i + self._batch_size]\n y = y_train[i:i + self._batch_size]\n pred = batch @ self._weight.T\n f_error = pred - y\n gradient = 2 * f_error.dot(batch) + self._alpha * self._l1_ratio * np.sign(\n self._weight) + self._alpha * (1 - self._l1_ratio) * self._weight\n self._weight -= self._lc * gradient / batch.shape[0]\n self._loss.append(self.mse(X_train @ self._weight.T, y_train))", "def _fit_predict(X_train, y_train, X_test):\n raise NotImplementedError()", "def train(self, X_train, Y_train, X_test = None, Y_test = None, epochs = 100, batch_size = 32, learning_rate = 0.005):\n m_train = X_train.shape[1]\n for epoch in range(epochs + 1):\n batch = np.arange(0, m_train)\n np.random.shuffle(batch)\n for k in range(m_train // batch_size + 1):\n if k * batch_size < m_train:\n X_mini_batch = X_train[:,batch[k * batch_size:(k + 1) * batch_size]]\n Y_mini_batch = Y_train[:,batch[k * batch_size:(k + 1) * batch_size]]\n self.update_weights(X_mini_batch, Y_mini_batch, learning_rate)\n \n if epoch % 10 == 0: \n # Loss function\n A2 = self.feedforward(X_train)\n cost = (1 / m_train) * np.sum(-np.multiply(Y_train, np.log(A2)) - np.multiply(1 - Y_train, np.log(1 - A2)))\n print(f\"epoch:{epoch}, Cost: {cost}, \", end = '')\n # Accutacy on training data\n if X_test is not None and Y_test is not None:\n A2_test = self.feedforward(X_test)\n class_pred = A2_test.argmax(axis = 0)\n class_actual = Y_test.argmax(axis = 0)\n acc = sum(class_actual == class_pred)\n print(f\"accuracy:{acc}/{X_test.shape[1]}\")", "def train(self, x_data, y_data):\n self.model.fit(np.array(x_data), np.array(y_data),\n batch_size=2,\n epochs=3,\n verbose=1)\n self.model.save_weights(self.model_filename)", "def train_gd(self,f,x,y,learnrate=0.0001,T=1000):\n f = copy.deepcopy(f)\n loss = numpy.zeros(T)\n t = 0\n deltal = -numpy.inf\n while (t<T) and (deltal<1e-7):\n (loss[t],dldw) = self(f,x,y)\n f.w = f.w -learnrate*dldw\n\n if (numpy.remainder(t,100)==0):\n print('Epoch %d: loss=%f' % (t,loss[t]))\n if (t>0):\n deltal = loss[t]-loss[t-1]\n t += 1\n \n return (f,loss)" ]
[ "0.7322144", "0.71384645", "0.71244806", "0.69531596", "0.6825773", "0.6776183", "0.6776183", "0.67015135", "0.6648316", "0.6648316", "0.6648316", "0.66165537", "0.6569884", "0.65607", "0.64630246", "0.6440666", "0.6422532", "0.6422532", "0.63722926", "0.6364112", "0.6359813", "0.63469934", "0.63109094", "0.6283902", "0.6279547", "0.62762666", "0.6268684", "0.6264824", "0.62594575", "0.62430626", "0.62425256", "0.6240298", "0.6229097", "0.62207747", "0.6210249", "0.62063605", "0.61997455", "0.6194289", "0.6191956", "0.6191956", "0.6191956", "0.6191956", "0.6191956", "0.6191956", "0.6191956", "0.6191956", "0.6191956", "0.6191956", "0.6190731", "0.61892635", "0.6180477", "0.61775666", "0.6166012", "0.6150523", "0.61325544", "0.6121467", "0.61164486", "0.61097866", "0.6107278", "0.60989374", "0.6086677", "0.6086117", "0.60775304", "0.60752547", "0.6068245", "0.60558546", "0.60339946", "0.6030465", "0.6030221", "0.6026553", "0.60264266", "0.6022157", "0.6008406", "0.60077834", "0.6000448", "0.5987641", "0.59771425", "0.5976922", "0.5976721", "0.5974665", "0.59741145", "0.5973406", "0.5972556", "0.5966811", "0.59405714", "0.59363264", "0.59313285", "0.5929898", "0.5918243", "0.5913003", "0.5910071", "0.5908342", "0.5901676", "0.58993345", "0.58954114", "0.5880697", "0.58806795", "0.58749104", "0.58577096", "0.5853988" ]
0.6362209
20
Fits function y=f(x) given training pairs (x_train, y_train).
def fit( self, x_train, y_train, x_valid=None, y_valid=None, regressor_callback=None ): has_more_than_one_channel = len(y_train.shape) > 1 if x_valid is None: x_valid = x_train y_valid = y_train # to the multi-channel form, but with just one chanel; if not has_more_than_one_channel: y_train = y_train[numpy.newaxis, ...] y_valid = y_valid[numpy.newaxis, ...] self.models = [] self._stop_fit = False for y_train_channel, y_valid_channel in zip(y_train, y_valid): gc.collect() model_channel = self._fit( x_train, y_train_channel, x_valid, y_valid_channel, regressor_callback ) self.models.append(model_channel) self.loss_history.append(model_channel.loss_history) self.num_channels = len(self.models)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, X, y):", "def train(self, x_train, y_train, x_val, y_val):\n pass", "def train(self, X, y):\n pass", "def train(self, X, y):\n lagrange_multipliers = self._compute_multipliers(X, y)\n return self._construct_predictor(X, y, lagrange_multipliers)", "def fit(self, X, y):\n self.X_train = X\n self.y_train = y", "def train(self, X, y):\n self.X_train = X\n self.y_train = y", "def train(self, X, y):\n self.X_train = X\n self.y_train = y", "def train(self, X, y):\n self.model.fit(X, y)", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def learn(self, Xtrain, ytrain):", "def fit(self, x, y):\r\n\r\n self.train_x = x\r\n self.train_y = y\r\n self.__find_psi__()", "def train(self, X, y):\n self.X_train = X\n self.y_train = y\n assert isinstance(X, np.ndarray)\n assert isinstance(y, np.ndarray)", "def fit(self, X_train, y_train):\n return self", "def train(self, X, y):\r\n # the nearest neighbor classifier simply remembers all the training data\r\n self.Xtr = X\r\n self.ytr = y", "def train(self, X, y):\n # the nearest neighbor classifier simply remembers all the training data\n self.Xtr = X\n self.ytr = y", "def train(self, X, y):\n # the nearest neighbor classifier simply remembers all the training data\n self.Xtr = X\n self.ytr = y", "def trainData(self, X, y, NeuralNet, epochs):", "def fit(self, X, y, X_val=None, y_val=None):\n\n # fit function has to return an instance of itself or else it won't work with test.py\n # adding extra 1s and reshaping y\n n_samples = X.shape[0]\n n_features = X.shape[1]\n temp = np.empty((n_samples, n_features+1), dtype=float)\n for i in range(n_samples):\n temp[i] = np.append(X[i], 1)\n X = temp\n\n try: # if validation set is present\n if X_val.all() != None and y_val.all() != None:\n # adding extra 1s and reshaping y\n val_n_samples = X_val.shape[0]\n val_n_features = X_val.shape[1]\n temp = np.empty((val_n_samples, val_n_features+1), dtype=float)\n for i in range(val_n_samples):\n temp[i] = np.append(X_val[i], 1)\n X_val = temp\n except AttributeError:\n pass\n\n self.theta = np.zeros((n_features+1,1)) # these are the model parameters\n self.train_history = np.zeros(self.n_iterations)\n self.val_history = np.zeros(self.n_iterations)\n y = y.reshape((-1,1))\n\n if self.cost_fn == 'Batch':\n self.batch_gradient_descent(X, y, X_val, y_val) # Batch\n elif self.cost_fn == 'Stochastic':\n self.stochastic_gradient_descent(X, y, X_val, y_val) # Stochastic\n\n h_theta_of_x = self.sigmoid(np.dot(X, self.theta)) # predicted value\n predicted = np.around(h_theta_of_x) # rounding off according to 0.5\n accuracy = (predicted == y).all(axis=1).mean() # calculating accuracy of model\n train_cost = self.log_loss(n_samples, h_theta_of_x, y)\n\n print('training loss', train_cost)\n print('training accuracy:', accuracy)\n\n # fit function has to return an instance of itself or else it won't work with test.py\n return self", "def _fit(self, x_train, y_train, x_valid, y_valid, regressor_callback=None):", "def train(self, X_train, y_train):\n self.model.fit(X_train, y_train)", "def fit(self, X, Y):\n ...", "def fit_and_predict(self, X_train, y_train, X_test, y_test):\n if self.feature_transform_func:\n X_train, X_test = self.feature_transform_func(X_train, X_test)\n\n self.fit(X_train, y_train)\n y_predict = self.predict(X_test)\n return self.Acu_eval(y_predict, y_test)", "def fit(self, x, y):\n self._pred.fit(x, y)\n\n return self", "def fit(self, X, y, max_iter=MAX_ITER):\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n start = time.time()\n cost_function = 0\n X = self.normalize_data(X)\n X = self.add_bias(X)\n for t in range(n_tasks):\n #print('Training {} task with lasso regression'.format(t))\n lasso = Fista(self, self.lambda_1)\n w = lasso.fit(xk=W[:, t], A=X[t], b=y[t], ind=self.groups,\n max_iter=max_iter)\n W[:, t] = w\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def fit(self, X,y):\n pass", "def train_model(X_train, y_train, f_classifier: Callable[[], list]=None, seed:int=0, param_dict:dict={}) -> list:\n if seed:\n param_dict['random_state'] = seed\n classifier_func = f_classifier or LogisticRegression\n\n classifier = classifier_func(**param_dict)\n classifier.fit(X_train, y_train)\n return classifier", "def fit(self, X_train: np.ndarray, y_train: np.ndarray) -> None:\n self.X_train = X_train\n self.y_train = y_train", "def train(self,features,y):\r\n \r\n if self.learn_type == \"nn\":\r\n #generate supervised dataset\r\n return(self.learner.train_on_batch(features,y))\r\n elif self.learn_type == \"linear\":\r\n grad = 0\r\n n = len(features)\r\n for i in range(n):\r\n #sum over the instances to get an estimate of the gradient\r\n print((y[i] - self.learner.activate(features[i])))\r\n grad -= (y[i] - self.learner.activate(features[i])) * \\\r\n self.learner.grad(features[i])\r\n grad /= n\r\n #update paramter\r\n param = np.copy(self.learner.param)\r\n self.learner.param = param - self.alpha * grad\r\n #print(self.learner.param)\r", "def fit(self, X, y, alpha, n_epochs):\n y = self.__one_hot(y)\n \n # perform training epochs\n for i in range(n_epochs):\n print(\"Epoch\", i)\n # stochastic gradient descent\n for j in range(len(X)):\n self.__backpropagation(X[j], y[j], alpha)", "def fit(self, X_train, y_train, X_test=None, y_test=None):\n\n self.initialize_weights_and_bias(X_train)\n\n # for progress formatting\n epoch_strlen = len(str(self.epochs))\n self.eval_ = {'cost_train': [], \n 'cost_test': [], \n 'train_preform': [], \n 'valid_preform': [],\n 'train_preform_r2': [], \n 'valid_preform_r2': []}\n\n # iterate over training epochs\n for epoch in range(self.epochs):\n\n # Includes forward + backward prop.\n self._minibatch_sgd( X_train, y_train)\n\n # Evaluation after each epoch during training\n z_h, a_h, z_out, a_out = self._forwardprop(X_train)\n _, _, _, a_out_test = self._forwardprop(X_test)\n\n y_train_pred = self.predict(X_train)\n y_test_pred = self.predict(X_test)\n\n y_test = y_test.reshape((len(y_test),1))\n y_train = y_train.reshape((len(y_train),1))\n\n y_test = standardicing_responce(y_test)\n y_test_pred = standardicing_responce(y_test_pred)\n \n y_train = standardicing_responce(y_train)\n y_train_pred = standardicing_responce(y_train) \n \n train_preform = mean_squared_error(y_train, y_train_pred) \n valid_preform = mean_squared_error(y_test, y_test_pred)\n \n train_preform_r2 = r2_score(y_train, y_train_pred) \n valid_preform_r2 = r2_score(y_test, y_test_pred)\n\n self.eval_['train_preform'].append(train_preform)\n self.eval_['valid_preform'].append(valid_preform)\n self.eval_['train_preform_r2'].append(train_preform_r2)\n self.eval_['valid_preform_r2'].append(valid_preform_r2)\n\n # Calculate the error in the output\n self.model_error = np.subtract(y_train, y_train_pred)\n \n return self", "def train(self,X,y):\n #the nearest neighbour classifier simply remembers all the training data\n self.Xtr=X\n self.ytr=y", "def train(self, x, y, epochs=1, verbose=False):\n numTrainingPoints = len(x)\n for e in range(epochs):\n # Set accuracy at beginning of epochs to 0s\n accuracy = 0\n # Compute the output for all training points\n allOutputs = self.getOutputNoActivation(x)\n for i in range(numTrainingPoints):\n # Increment iterations for learning rate scheduling\n self.iterations += 1\n # Calculate the new learning rate from scheduling\n lr = self.iterations ** -1\n # Grab the input for the specific training point\n trainingPointInputs = x[i]\n # Grab the output for the specific training point\n trainingPointOutput = allOutputs[i]\n # Get the target outputs for the specific training point\n targets = y[i]\n # Compare each output 1 by 1\n for outputIndex in range(len(trainingPointOutput)):\n # Grab specific output and corresponding target value\n targetVal = targets[outputIndex]\n outputVal = trainingPointOutput[outputIndex]\n # If the outputs match, increment accuracy\n if targetVal == outputVal:\n accuracy += 1\n continue\n # Else, update the weights\n else:\n # For each input weight, compute its delta change, and then apply the change\n for inputWeightIndex in range(len(self.weights[outputIndex])):\n # If the inputWeightIndex is in the range of values for inputs, use the input at that index\n if inputWeightIndex < len(trainingPointInputs):\n trainingPointInput = trainingPointInputs[inputWeightIndex]\n # Else, that value is the bias, and the input should be constant 1.0\n else:\n trainingPointInput = 1.0\n # Compute delta w and apply the change\n inputNorm = 0\n for tpi in trainingPointInputs:\n inputNorm += tpi ** 2\n inputNorm = math.sqrt(inputNorm)\n deltaW = lr * (targetVal - outputVal) * trainingPointInput / inputNorm**2\n self.weights[outputIndex, inputWeightIndex] += deltaW\n # Compute accuracy\n accuracy /= numTrainingPoints \n # If verbose == True, print accuuracy for each training epoch\n if verbose:\n print('Epoch ' + str(e+1) + ' / ' + str(epochs) + ' Accuracy: ' + str(accuracy))\n # Return final accuracy\n return accuracy", "def fit(self, X, y):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n cost_function = 0\n start = time.time()\n for t in range(n_tasks):\n #print('Training task {} with group lasso'.format(t))\n fista = Fista(self, self.lambda_1)\n w_opt = fista.fit(W[:, t], X[t], y[t], self.groups,\n max_iter=self.max_iter)\n W[:, t] = w_opt\n cost_function += self.cost(X[t], y[t], W[:, t])\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def train_step(x, y):\n with tf.GradientTape() as tape:\n predictions = self.model(x)\n loss = self.loss_object(y, predictions)\n gradients = tape.gradient(loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n\n self.train_loss(loss)\n self.train_acc(y, predictions)", "def fit(self, x, y, epochs=2000, x_test=None, y_test=None, optimizer='adam', learning_rate=0.1,\n load_best_weights=False, val_freq=1000, log_freq=1000, verbose=1):\n\n x = self.tensor(x)\n y = self.tensor(y)\n\n self.start_time = time.time()\n self.prev_time = self.start_time\n\n if optimizer == 'adam':\n self.train_adam(x, y, epochs, x_test, y_test, learning_rate, val_freq, log_freq, verbose)\n elif optimizer == 'lbfgs':\n self.train_lbfgs(x, y, epochs, x_test, y_test, learning_rate, val_freq, log_freq, verbose)\n\n if load_best_weights is True:\n self.load_weights()", "def train(self, X, y):\n # the nearest neighbor classifier simply remembers all the training data\n self.Xtr = X\n self.ytr = y", "def train(self, train_X, train_y):\n if self.feat_sel:\n train_X = self.do_feat_sel(train_X, train_y)\n\n train_X, train_y = self.sample.fit_resample(train_X, train_y)\n self.clf.fit(train_X, train_y)", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y, max_iter=MAX_ITER):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n start = time.time()\n cost_function = 0\n for t in range(n_tasks):\n #print('Training {} task with lasso regression'.format(t))\n lasso = Lasso(alpha=self.lambda_1, positive=self.positive, max_iter=max_iter)\n lasso.fit(X[t], y[t])\n W[:, t] = lasso.coef_\n cost_function += np.linalg.norm(np.dot(X[t], W[:, t]) - y[t]) \\\n + sum(abs(W[:, t]))\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def train(self, x, y):\n h = self.neurons_output(x, self.w)\n pseudoinverse = np.linalg.pinv(h)\n self.c = np.dot(pseudoinverse, y)\n return", "def train(self, train_x,train_y):\n self._model.fit(train_x,train_y,batch_size=8,epochs = self._epochSize)\n return None", "def fit(self, x, y):\n def initiate_theta(dim):\n self.theta = np.zeros(dim)\n # print('self.theta initiated is {}'.format(self.theta))\n \n def implement_sigmoid(x):\n if self.theta is None:\n initiate_theta(x.shape[1])\n z = np.matmul(np.transpose(self.theta), np.transpose(x))\n return 1/(np.ones(x.shape[0]) + np.exp(-z))\n \n def implement_partial_loss(x, y):\n return -np.matmul(np.transpose(y - implement_sigmoid(x)), x)/x.shape[0]\n \n def implement_transposed_hess(x):\n sigmoid_hadamard = implement_sigmoid(x) * (np.ones(x.shape[0]) - implement_sigmoid(x))\n hess2 = np.diag(sigmoid_hadamard)\n hess = np.matmul(hess2,x)\n hess = np.matmul(np.transpose(x),hess)/x.shape[0]\n hess_inverse = np.linalg.inv(hess)\n return hess_inverse\n \n def train(x, y):\n count = 0\n if self.theta is None:\n initiate_theta(x.shape[1])\n while count < self.max_iter:\n if self.verbose:\n loss_y1 = np.matmul(np.transpose(y), np.log(implement_sigmoid(x)))\n loss_y0 = np.matmul(np.transpose(np.ones(x.shape[0]) - y), np.log(np.ones(x.shape[0]) - implement_sigmoid(x)))\n loss = -(loss_y1 + loss_y0 )/x.shape[0]\n print('Average empirical loss for step {} is {}'.format(count, loss))\n delta = np.matmul(implement_transposed_hess(x), implement_partial_loss(x, y))\n new_theta = self.theta - delta * self.step_size\n delta_theta = np.linalg.norm(new_theta - self.theta)\n # print('delta is {}'.format(delta_theta))\n if delta_theta < self.eps:\n return new_theta\n else:\n self.theta = new_theta\n count += 1\n return self.theta\n \n return train(x, y)", "def fit(self, X, y):\n\t\tself._initialize_weights(X.shape[1])\n\t\tself.cost_ = []\n\n\t\tfor i in range(self.n_iter):\n\t\t\tif self.shuffle:\n\t\t\t\tX, y = self._shuffle(X,y)\n\t\t\tcost = []\n\t\t\t#calculate for each sample\n\t\t\tfor xi, target in zip(X, y):\n\t\t\t\tcost.append(self._update_weights(xi, target))\n\t\t\tave_cost = sum(cost)/len(y)\n\t\t\tself.cost_.append(ave_cost)\n\t\treturn self", "def train_on_batch(\n network,\n optimizer,\n loss_fn,\n X,\n y_target,\n current_epoch\n):\n training_stats = ValueRegistry.get_instance(\"training_stats\")\n training_stats[\"current_epoch\"] = current_epoch\n optimizer.zero_grad()\n # Do the forward pass to predict the primitive_parameters\n y_hat = network(X)\n loss = loss_fn(y_hat, y_target)\n # Do the backpropagation\n loss.backward()\n nn.utils.clip_grad_norm_(network.parameters(), 1)\n # Do the update\n optimizer.step()\n\n return (\n loss.item(),\n [x.data if hasattr(x, \"data\") else x for x in y_hat],\n )", "def fit(self, X, y):\n\n # compute number of minibatches for training, validation and testing\n input_size = X.shape.eval()[0]\n num_training_batches = input_size/self.mini_batch_size\n\n # define the (regularized) cost function, symbolic gradients, and updates\n l2_norm_squared = sum([(layer.w**2).sum() for layer in self.layers])\n cost = self.layers[-1].cost(self)+\\\n 0.5*self.l2*l2_norm_squared/num_training_batches\n grads = T.grad(cost, self.params)\n updates = [(param, param-self.eta*grad)\n for param, grad in zip(self.params, grads)]\n\n # define functions to train a mini-batch, and to compute the\n # accuracy in validation and test mini-batches.\n i = T.lscalar() # mini-batch index\n train_mb = theano.function(\n [i], cost, updates=updates,\n givens={\n self.x:\n X[i*self.mini_batch_size: (i+1)*self.mini_batch_size],\n self.y:\n y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]\n })\n \n # Do the actual training\n for epoch in xrange(self.epochs):\n print(\"Training epoch number: {0}\".format(epoch))\n for minibatch_index in xrange(num_training_batches):\n iteration = num_training_batches*epoch+minibatch_index\n cost_ij = train_mb(minibatch_index)", "def train(self, x, y, lr):\n self.forward(x)\n self.backward(y)\n self.update(lr)\n return", "def fit(self, X, y):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n cost_function = 0\n start = time.time()\n for t in range(n_tasks):\n fista = Fista(self, self.lambda_1)\n w_opt = fista.fit(W[:, t], X[t], y[t], self.groups,\n max_iter=self.max_iter)\n W[:, t] = w_opt\n cost_function += self.cost(X[t], y[t], W[:, t])\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def fit ( self, X: np.ndarray, y: np.ndarray ):\n \n self.X = X\n self.y = y", "def fit(self, X, y):\n self.X_data = X\n self.y = y", "def fit(self, X, y=None):\n # Check that X and y have correct shape\n X, y = check_X_y(X, y)\n # Store the classes seen during fit\n self.classes_ = unique_labels(y)\n \n if self.shuffle:\n X, y = shuffle(X, y)\n\n self.X_ = X\n self.y_ = y\n \n self._initialize_map()\n self._create_location_vectors()\n self._initialize_sigma()\n \n for i in range(self.num_epoch):\n j= 0\n print(f\"Epoch:{i}\")\n while(j < self.X_.shape[0]):\n current_batch_end = j+self.batch_size if j+self.batch_size < self.X_.shape[0] else self.X_.shape[0]\n current_batch = self.X_[j:current_batch_end]\n self._feedforward(current_batch)\n self._backprop(j, self.X_.shape[0], current_batch)\n j = current_batch_end \n return self", "def fit(self, X, y, max_iter=MAX_ITER):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n start = time.time()\n cost_function = 0\n for t in range(n_tasks):\n #print('Training {} task with lasso regression'.format(t))\n lasso = LogisticRegression(C=self.lambda_1,\n penalty='l1',\n max_iter=max_iter)\n lasso.fit(X[t], y[t])\n W[:, t] = lasso.coef_\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def train(self, X_train, y_train,X_test,y_test):\n K = np.arange(self.epochs)\n F= y_train.shape[0]\n for s in tqdm(range(self.epochs)):\n for i in range( 0, F, 10):\n grad_W = self.calc_gradient(self.W, np.insert(X_train[i:i+10], 0, 1, axis=1), y_train[i:i+10], self.reg_const)\n self.W = self.W - self.alpha * grad_W\n K[s] = self.get_acc(self.predict(X_test),y_test)\n return K", "def fit(self, x, y, **kwargs):\n if self.build_fn is None:\n self.model = self.__call__(**self.filter_sk_params(self.__call__))\n elif (not isinstance(self.build_fn, types.FunctionType) and\n not isinstance(self.build_fn, types.MethodType)):\n self.model = self.build_fn(\n **self.filter_sk_params(self.build_fn.__call__))\n else:\n self.model = self.build_fn(**self.filter_sk_params(self.build_fn))\n\n loss_name = self.model.loss\n if hasattr(loss_name, '__name__'):\n loss_name = loss_name.__name__\n if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:\n y = to_categorical(y)\n\n fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))\n fit_args.update(kwargs)\n\n history = self.model.fit(x, y, **fit_args)\n\n return history", "def train(self,X,y):\n self.X_train = X\n self.y_train = y\n self.class_labels = np.unique(self.y_train)", "def fit(self, X, y):\n y = np.array(y)\n rows, columns = X.shape\n num_batches = int(np.ceil(rows / self.batch_size))\n batches = np.arange(num_batches + 1) * self.batch_size\n indxs = np.arange(rows)\n\n self.W = np.zeros(columns)\n self.b = rng.random(1)\n\n # stochastic gradient descent logic\n for _ in range(self.num_epochs):\n rng.shuffle(indxs)\n\n for i, j in zip(batches[0:-1], batches[1:]):\n batch_indxs = indxs[i:j]\n x_batch = X[batch_indxs]\n y_batch = y[batch_indxs]\n self.update(x_batch, y_batch)\n\n # track loss history during training\n self.loss_history.append(self.loss(self.predict_proba(X), y))\n self.accuracies.append(self.accuracy(X, y))", "def fit(self,X,y):\n self.X = X\n self.y = y\n return self", "def predict(x_train, y_train, x_test, y_test, fn, params):\n y_train_predicted = fn(x_train, None, *params)\n y_train_predicted = (y_train_predicted >= 0.5) * 1\n y_test_predicted = fn(x_test, None, *params)\n y_test_predicted = (y_test_predicted >= 0.5) * 1\n\n train_acc = np.sum(y_train_predicted == y_train) / x_train.shape[0]\n test_acc = np.sum(y_test_predicted == y_test) / x_test.shape[0]\n print('train accuracy =', train_acc)\n print('test accuracy =', test_acc)\n scatter_plot(x_train, y_train_predicted, x_test, y_test_predicted, 'predicted 0', 'predicted 1')", "def fit(self, X, y, X_val=None, y_val=None):\n #Adding an extra column of 1s for constant term\n n_samples = X.shape[0]\n n_features = X.shape[1]\n temp = np.empty((n_samples, n_features+1), dtype=float) # adding extra 1s to X\n for i in range(n_samples):\n temp[i] = np.append(X[i], 1)\n X = temp\n y = y.reshape((-1,1)) # reshaping y \n\n try: # if validation set is present\n if X_val.all() != None and y_val.all() != None:\n # adding extra 1s and reshaping y\n val_n_samples = X_val.shape[0]\n val_n_features = X_val.shape[1]\n temp = np.empty((val_n_samples, val_n_features+1), dtype=float)\n for i in range(val_n_samples):\n temp[i] = np.append(X_val[i], 1)\n X_val = temp\n y_val = y_val.reshape((-1,1))\n except AttributeError:\n pass\n\n self.theta = np.zeros((n_features+1,1)) # these are the model parameters\n self.train_history = np.zeros(self.n_iterations) # initialising array store train loss history\n self.val_history = np.zeros(self.n_iterations) # initialising array store val loss history\n \n if self.cost_fn == 'RMSE':\n self.gradient_descent_rmse(X, y, X_val, y_val) # RMSE gradient call\n h_theta_of_x = np.dot(X, self.theta)\n cost = self.RMSE(n_samples, h_theta_of_x, y) # final RMSE loss\n # print('training_loss', cost)\n\n elif self.cost_fn == 'MAE':\n self.gradient_descent_mae(X, y, X_val, y_val) # MAE gradient call\n h_theta_of_x = np.dot(X, self.theta)\n cost = self.MAE(n_samples, h_theta_of_x, y) # final MSE loss\n # print('training_loss', cost)\n\n\n # fit function has to return an instance of itself or else it won't work with test.py\n return self", "def fit(self, trainA, trainY , trainX = None, valA = None, valX = None, valY = None):\n \n n_nodes = trainA.shape[1]\n \n if self.feature_type == 'identity':\n n_features = n_nodes\n trainX = np.eye(n_features)\n n_classes = trainY.shape[1]\n feature_type = self.feature_type\n n_hidden = self.n_hidden\n act_func = self.act_func\n reg_type = self.reg_type\n reg_beta = self.reg_beta\n drop_rate = self.drop_rate\n \n \n #Build the model\n self.build_model(n_nodes, n_features, n_classes, feature_type, n_hidden,\n act_func, reg_type, reg_beta, drop_rate)\n \n #Test the model\n prediction = self._predict(trainA, trainX)\n \n #Train the model\n # self._train_model(trainA, trainX, trainY, valA, valX, valY)\n \n return prediction", "def train_fn(self, x_train, y_train, compute_error):\n def train(theta_value, theta):\n theta.set_value(theta_value, borrow=True)\n return compute_error(x_train, y_train)\n\n return train", "def fit(self, X, y):\n if self._intercept:\n X = self._add_intercept(X)\n\n if self._theta is None or (self._theta and self._theta.shape != X.shape[1]):\n self._theta = np.zeros(X.shape[1]) # Initialize parameters\n\n for n in range(self._n_iter):\n h = sigmoid(np.dot(X, self._theta))\n dW = np.dot(X.T, (y-h))\n self._theta += self._lr * dW\n if (n % (self._n_iter/10) == 0): # Print every 10% of total training iterations\n print(\"Train Accuracy: \", binary_accuracy(y, h)) \n print(\"Train Loss: \", binary_cross_entropy(y, h))", "def _train(self,\n Xs: Array,\n Ys: Array,\n metric: Callable = None,\n **kwargs):\n self.model.fit(Xs, Ys, **kwargs)\n return None", "def fit(self, X):", "def train(self, trainX, trainY):\n self.model = KNeighborsRegressor(n_neighbors=5)\n self.model.fit(trainX, trainY)", "def train(X, y, W1, W2):\n \n # Forward propagation\n \n # Dot product of X (input) and first set of 3x2 weights\n Z2 = np.dot(X, W1)\n # activation function\n A2 = sigmoid(Z2) \n # dot product of hidden layer (Z2) and second set of 3x1 weights\n Z3 = np.dot(A2, W2) \n # final activation function\n A3 = sigmoid(Z3) \n\n \n # Back propagation\n \n # error in output\n o_error = y - A3 \n # applying derivative of sigmoid to error\n o_delta = o_error*sigmoid_gradient(A3) \n\n\n # z2 error: how much our hidden layer weights contributed to output error\n z2_error = o_delta.dot(W2.T) \n # applying derivative of sigmoid to z2 error\n z2_delta = z2_error*sigmoid_gradient(A2) \n\n\n # Update the weights\n W1 += X.T.dot(z2_delta) \n W2 += A2.T.dot(o_delta) \n\n return A3", "def fit(self,X_train,y_train):\n assert X_train.shape[0] == y_train.shape[0], \\\n \"the size of X_train must equal to the size of y_train\"\n assert self.k <= X_train.shape[0],\\\n \"the size of X_train must be at least k\"\n self._X_train =X_train\n self._y_train =y_train\n\n return self", "def fit(self, X, y) :\n\n ### ========== TODO : START ========== ###\n # part b: set self.probabilities_ according to the training set\n # create a dictionary of frequencies and convert to probabilities\n frequencies = Counter(y)\n self.probabilities_ = {key:float(value)/len(y) for (key,value) in frequencies.items()}\n ### ========== TODO : END ========== ###\n\n return self", "def fit(self, x, y):\n # *** START CODE HERE ***\n num_examples = x.shape[0]\n num_features = x.shape[1]\n iteration = 1\n if self.theta == None:\n self.theta = np.zeros((num_features,))\n while iteration <= self.max_iter:\n h_theta = np.dot(x, self.theta)\n g_theta = self.sigmoid(h_theta)\n J_cost = -np.mean(y*np.log(g_theta) + (1 - y)*np.log(1 - g_theta))\n H = 1/num_examples*(np.dot(np.transpose(g_theta*(1-g_theta))*np.transpose(x), x))\n J_prime = - 1/num_examples*np.dot(np.transpose(y - g_theta), x)\n d_theta = - np.linalg.solve(H, J_prime)\n self.theta += d_theta\n if np.linalg.norm(d_theta, 1) < self.eps:\n break\n if self.verbose:\n print(\"Loss value: \", J_cost)\n iteration += 1\n # *** END CODE HERE ***", "def fit(self, X, y):\n self.model_x = X\n self.model_y = y", "def fit(self, X, y, **fit_params):\n ...", "def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None:\n # Stores training data and targets for use in derived classes\n self.x_train = x_train\n self.y_train = y_train", "def fit(self, X, y):\n self.__X = X\n self.__y = y\n self.__trained = True", "def fit(self, X, y=None):\n # train on a training dataset\n self.logger.info(\n self.__name__ + ' is trained on {:d} samples with {:d} features.'.format(X.shape[0], X.shape[1]))\n pass", "def fit(self,X_train,y_train):\r\n \r\n self.X_train_data=X_train.reset_index(drop=True)\r\n self.y_train_data=y_train.reset_index(drop=True)\r\n \r\n temp_fitted_model=[]\r\n for each_model in self.model_list:\r\n each_model.fit(self.X_train_data,self.y_train_data)\r\n temp_fitted_model.append(each_model)\r\n \r\n self.fitted_model=temp_fitted_model", "def fit_predict(self, train_x: pd.DataFrame, train_y: pd.Series, test_x: pd.DataFrame, test_y: pd.Series) -> dict:\n self.evaluator.fit(train_x, train_y, test_x, test_y)\n predictions = self.evaluator.predict(test_x)\n print(predictions)\n metrics = metrics_stat(predictions, test_y)\n return metrics", "def ytrain(self,value: list)->None:", "def train(self, train_x, train_y, optimzer='adam'):\n self.history = self.model.fit(train_x, train_y, epochs=self.epochs, batch_size=self.batch_size,\n verbose=self.verbose, shuffle=False)", "def train(self, xFeat, y):\n if type(xFeat) != np.ndarray: # if the data isn't a numpy array, eg dataframe, convert to numpy\n self.xTrain = xFeat.to_numpy()\n else:\n self.xTrain = xFeat\n if type(y) != np.ndarray:\n self.yTrain = y.to_numpy()\n else:\n self.yTrain = y\n # set the train set columns to [1 x classification, d x features, 1 x placeholder for distances]\n self.trainSet = np.column_stack((np.atleast_1d(self.yTrain), self.xTrain, np.empty(len(self.xTrain))))\n return self", "def train(self, X_train, y_train):\n\n self.model_pipeline.fit(X_train, y_train)", "def train(self, x, y):\n len_dim_0 = x.shape[0]\n x = np.reshape(x, (len_dim_0, 784))\n with tf.GradientTape() as tape:\n tape.watch(self.layers)\n y_out = self.forward(x)\n loss = self.mean_squared_error(y_out, y)\n grads = tape.gradient(loss, self.layers)\n temp_layers = []\n for layer, grad in zip(self.layers, grads):\n new_w = layer.weights - (self.learning_rate * grad.weights)\n new_b = layer.biases - (self.learning_rate * grad.biases)\n temp_layers.append(self.Layer(new_w, new_b))\n self.layers = temp_layers\n\n return loss", "def _fit_function(self,x,a,b):\n return b + a*x", "def fit(self, X, y):\n # Code to fit the model.\n\n train_stuff = self._vectorizer.fit_transform(X, y)\n\n self._classifier.fit(train_stuff, y = y)\n\n\n return self", "def fit(self, X, y):\n\t\trgen = np.random.RandomState(self.random_state)\n\t\tself.w_ = rgen.normal(loc=0.0, scale=0.01, size=1+X.shape[1])\n\t\tself.cost_ = []\n\n\t\tfor i in range(self.n_iter):\n\t\t\tnet_input = self.net_input(X)\n\t\t\toutput = self.activation(net_input)\n\t\t\terrors = y - output\n\t\t\t#update weights\n\t\t\tself.w_[1:] += self.eta * X.T.dot(errors)\n\t\t\tself.w_[0] += self.eta * errors.sum()\n\t\t\tcost = (errors**2).sum() / 2.0\n\t\t\tself.cost_.append(cost)\n\t\treturn self", "def ytrain(self,)->list:", "def fit(self, X_train, y_train, X_test, y_test, n_epochs=1):\n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n init.run()\n for epoch in range(n_epochs):\n # Create Batches with size of BATCH_SIZE\n X_train_batches, y_train_batches = generate_random_batches(X_train, y_train, self.batch_size)\n # print(\"-------------------X_train shape: \", X_train.shape)\n # print(\"-------------------y_train shape: \", y_train.shape)\n\n # Iterage through the batches and performn training each time\n for X_batch, y_batch in zip(X_train_batches, y_train_batches):\n # print(\"X_batch shape: \", X_batch.shape)\n # print(\"y_batch shape: \", y_batch.shape)\n # print(X_batch)\n # print(y_batch)\n # Calculate Next Gradient Descent Step\n feed_dict = {self.X_tf: X_batch, self.y_tf: y_batch, self.keep_prob: 0.5}\n summary, _ = sess.run([self.merged_summaries, self.training_op], feed_dict=feed_dict)\n self.writer_train.add_summary(summary, epoch)\n\n # Log Accuracy of Test Data\n feed_dict = {self.X_tf: X_test, self.y_tf: y_test, self.keep_prob: 0.5}\n summary, acc = sess.run([self.merged_summaries, self.accuracy], feed_dict=feed_dict)\n self.writer_test.add_summary(summary, epoch)\n\n # if epoch % 1 == 0:\n acc_train = self.accuracy.eval(feed_dict={self.X_tf: X_train, self.y_tf: y_train, self.keep_prob: 1.0})\n acc_test = self.accuracy.eval(feed_dict={self.X_tf: X_test, self.y_tf: y_test, self.keep_prob: 1.0})\n print(\"Epoch: \", epoch, \"Train accuracy:\", acc_train, \"Test accuracy:\", acc_test)\n\n #Save the final model\n self.saver.save(sess, self.log_dir + '/model')", "def fit(self, X_train, y_train):\n if X_train.shape[0] != y_train.shape[0]:\n raise AssertionError\n\n X_train = np.hstack((np.ones((X_train.shape[0], 1)), X_train))\n rows = X_train.shape[0]\n cols = X_train.shape[1]\n self._weight = np.random.normal(size=cols)\n for epoch in range(self._max_iter):\n data = np.hstack((X_train, y_train.reshape((-1, 1))))\n np.random.shuffle(data)\n X_train = data[:, : -1]\n y_train = data[:, -1].flatten()\n for i in np.arange(0, rows, self._batch_size):\n batch = X_train[i:i + self._batch_size]\n y = y_train[i:i + self._batch_size]\n pred = batch @ self._weight.T\n f_error = pred - y\n gradient = 2 * f_error.dot(batch) + self._alpha * self._l1_ratio * np.sign(\n self._weight) + self._alpha * (1 - self._l1_ratio) * self._weight\n self._weight -= self._lc * gradient / batch.shape[0]\n self._loss.append(self.mse(X_train @ self._weight.T, y_train))", "def _fit_predict(X_train, y_train, X_test):\n raise NotImplementedError()", "def train(self, X_train, Y_train, X_test = None, Y_test = None, epochs = 100, batch_size = 32, learning_rate = 0.005):\n m_train = X_train.shape[1]\n for epoch in range(epochs + 1):\n batch = np.arange(0, m_train)\n np.random.shuffle(batch)\n for k in range(m_train // batch_size + 1):\n if k * batch_size < m_train:\n X_mini_batch = X_train[:,batch[k * batch_size:(k + 1) * batch_size]]\n Y_mini_batch = Y_train[:,batch[k * batch_size:(k + 1) * batch_size]]\n self.update_weights(X_mini_batch, Y_mini_batch, learning_rate)\n \n if epoch % 10 == 0: \n # Loss function\n A2 = self.feedforward(X_train)\n cost = (1 / m_train) * np.sum(-np.multiply(Y_train, np.log(A2)) - np.multiply(1 - Y_train, np.log(1 - A2)))\n print(f\"epoch:{epoch}, Cost: {cost}, \", end = '')\n # Accutacy on training data\n if X_test is not None and Y_test is not None:\n A2_test = self.feedforward(X_test)\n class_pred = A2_test.argmax(axis = 0)\n class_actual = Y_test.argmax(axis = 0)\n acc = sum(class_actual == class_pred)\n print(f\"accuracy:{acc}/{X_test.shape[1]}\")", "def train(self, x_data, y_data):\n self.model.fit(np.array(x_data), np.array(y_data),\n batch_size=2,\n epochs=3,\n verbose=1)\n self.model.save_weights(self.model_filename)", "def train_gd(self,f,x,y,learnrate=0.0001,T=1000):\n f = copy.deepcopy(f)\n loss = numpy.zeros(T)\n t = 0\n deltal = -numpy.inf\n while (t<T) and (deltal<1e-7):\n (loss[t],dldw) = self(f,x,y)\n f.w = f.w -learnrate*dldw\n\n if (numpy.remainder(t,100)==0):\n print('Epoch %d: loss=%f' % (t,loss[t]))\n if (t>0):\n deltal = loss[t]-loss[t-1]\n t += 1\n \n return (f,loss)" ]
[ "0.73206455", "0.7135874", "0.7122955", "0.6952598", "0.682414", "0.6773915", "0.6773915", "0.6700229", "0.6648014", "0.6648014", "0.6648014", "0.66138375", "0.65697306", "0.6558614", "0.64612556", "0.6438456", "0.6420302", "0.6420302", "0.6370733", "0.6363991", "0.6362005", "0.6357792", "0.63466406", "0.63098955", "0.6283909", "0.6280449", "0.62759", "0.62682116", "0.6262606", "0.62576383", "0.62418723", "0.6241615", "0.62381065", "0.6227186", "0.62212473", "0.62099427", "0.6206436", "0.6197607", "0.619249", "0.61914974", "0.61914974", "0.61914974", "0.61914974", "0.61914974", "0.61914974", "0.61914974", "0.61914974", "0.61914974", "0.61914974", "0.61895007", "0.61893827", "0.6179277", "0.6177095", "0.61653674", "0.6150506", "0.6132444", "0.6120795", "0.611732", "0.6108555", "0.61065775", "0.6097904", "0.60871816", "0.6085194", "0.6079756", "0.6072928", "0.60679483", "0.60549015", "0.6032596", "0.6030837", "0.60296506", "0.60286695", "0.6025699", "0.6021564", "0.60079867", "0.6005981", "0.6000002", "0.5985766", "0.59762645", "0.5976121", "0.59760606", "0.5973517", "0.59725577", "0.59724677", "0.5971301", "0.59653866", "0.59390587", "0.5936166", "0.59300256", "0.592697", "0.59165853", "0.5913665", "0.5911989", "0.5907354", "0.5901609", "0.5898475", "0.5894368", "0.5879885", "0.5879296", "0.58733755", "0.58565444", "0.5854347" ]
0.0
-1
Stops training (can be called by another thread)
def stop_fit(self): self._stop_fit = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def end_training(self):\n self.training = False", "def stop_training_job(TrainingJobName=None):\n pass", "def stop(self):\n self.requested_state = 'Stopped'\n self.ml_interface.stop()", "def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)", "def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)", "def stop(self):\n\n self.stop_thread = True", "def stop(self) -> None:\n ...", "def stop(self):\n self.running = False\n self.cam.stop()\n self.amplifier.stop()\n pass", "def stop(self):\n self._Thread__stop()", "def stop() -> None:", "def stop(self) -> None:", "def stop(self) -> None:", "def stop():", "def stop():", "def stop():", "def stop():", "def stop(self):\n self.running = False\n self.join()", "def stop(self):\r\n self.running = False", "def stop(self):\r\n self.running = False", "def stop(self):\n self._stop_flag = True", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self._run = False", "def stop(self):\n # close the tf session\n self.sess.close()", "def stop(self):\n\t\tself._run_flag = False\n\t\tself.wait()", "def stop(self):\n self.stop_recognising.set()\n self.thread.join()", "def shutdown_training(self):\n\n self._train_data_set = None\n self._test_data_set = None", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop (self):\n pass", "def stop (self):\n pass", "def stop(self):\r\n pass", "def stop(self):\n self._run = False\n self.IA.stop()", "def stop(self):\n self.__running = False", "def stop(self):\n self.controller.stop()", "def stop(self):\n self._running = False", "def stop(self):\n self._running = False", "def stop(self):\n self._run_flag = False\n self.wait()", "def stop(self):\n self._run_flag = False\n self.wait()", "def stop(self):\n self._run_flag = False\n self.wait()", "def stop(self):\n self._run_flag = False\n self.wait()", "def stop(self):\n self._run_flag = False\n self.wait()", "def stop(self):\n self._run_flag = False\n self.wait()", "def stop(self):\n self.thread.join()", "def stop(self) -> None:\n pass", "def foreceStop(self):\n self.__success = False\n self.stop()", "def stop(self):\r\n self.terminating = True", "def stop(self):\n\n self.keep_running = False", "def stop(self):", "def stop(self):", "def stop(self):\n self.api.stop()", "def stop_thread(self):\n t, e = self.workers[0]\n e = e.set() # put event to set True for stop thread\n del self.workers[0]", "def testTrainingStop(self):\n # The `train()` function raises a RuntimeError\n with self.assertRaises(RuntimeError):\n train(\n self.params,\n RayDMatrix(self.x, self.y),\n callbacks=[_kill_callback(self.die_lock_file)],\n num_boost_round=20,\n ray_params=RayParams(max_actor_restarts=0, num_actors=2))", "def stop(self):\n\t\tpass", "def _stop(self):\n if self._classifier:\n self._classifier.__exit__()\n self._classifier = None", "def stop(self):\n raise NotImplementedError", "def stop(self):\n raise NotImplementedError", "def stop(self) -> None:\n self._running = False", "def stop(self) :\n raise NotImplementedError(\"stop not implemented\")", "def _end_training(self):\n # Reset this variable as it is reused during evaluation phase\n self.is_filtered = False\n self.eval_config = {}\n \n #close the tf session\n self.sess_train.close()\n \n #set is_fitted to true to indicate that the model fitting is completed\n self.is_fitted = True", "def stop(self):\r\n self.stopped = True", "def stop(self):\n self.send_stop()\n self.join()", "def stop(self):\n self._stop_event.set()", "def _stop(self):", "def stop(self):\n self.stopped = True", "def stop(self):\n self.stopped = True", "def stop(self):\n self.stopped = True", "def stop(self):\n debug(\"CBA4.__worker_thread.stop()\")\n self.__run = False\n #end stop()", "def stop(self, **kwargs):\n self.turn_off()", "def training_end(self):\n pass", "def stop(self):\n return", "def stop(self) -> None:\n raise NotImplementedError()", "def stop(self) -> None:\n raise NotImplementedError()", "def stop(self):\n self._log.info(\"Stopping\")\n self._running.clear()", "def stop(self):\n raise NotImplementedError()", "def stop(self):\n raise NotImplementedError()", "def stopController(self):\n self.running = False", "def stop(self):\r\n self._stop.set()", "def stop(self):\r\n raise NotImplementedError('method stop() is not implemented')", "def stop(self):\n self._should_run = False", "def stop_threading(self):\n if self.algorithm_thread != None:\n self.running = False\n self.algorithm_thread.join()\n self.algorithm_thread = None\n if self.step_event.is_set():\n self.step_event.clear()", "def stop(self):\n self._context.state = STOPPED", "def Stop(self) :\n\t\t...", "def stop(self):\n print_message_received(\"stop\")\n self.robot.drive_system.stop()", "def terminate(self):\n self._running = False", "def stop(self) -> None:\n raise NotImplementedError(\"Base method not implemented\")", "def stop(self):\n self._stop_event.set()", "def stop(self):\n self._stop_event.set()", "def stop(self):\n self.on_stop()" ]
[ "0.8000698", "0.759568", "0.73393655", "0.72586185", "0.72586185", "0.71302235", "0.7112129", "0.70853776", "0.7046241", "0.70452595", "0.70376647", "0.70376647", "0.70227903", "0.70227903", "0.70227903", "0.70227903", "0.7018452", "0.70008683", "0.70008683", "0.69937384", "0.6986578", "0.6986578", "0.6986578", "0.6986578", "0.6986578", "0.6986434", "0.6982969", "0.6960608", "0.6960573", "0.69586575", "0.69419134", "0.69419134", "0.69419134", "0.69419134", "0.69419134", "0.69419134", "0.69419134", "0.69419134", "0.69419134", "0.69419134", "0.69419134", "0.6939639", "0.6939639", "0.69274056", "0.69113445", "0.69099647", "0.690979", "0.6907185", "0.6907185", "0.690501", "0.690501", "0.690501", "0.690501", "0.690501", "0.690501", "0.6904709", "0.68968636", "0.68968236", "0.6890827", "0.6885443", "0.68826675", "0.68826675", "0.68812466", "0.68802905", "0.68687147", "0.6868241", "0.6862446", "0.68602234", "0.68602234", "0.683121", "0.68159026", "0.68014914", "0.6765009", "0.676297", "0.6757274", "0.67532754", "0.6752545", "0.6746216", "0.6746216", "0.67445487", "0.67429084", "0.67426497", "0.6708166", "0.67060226", "0.67060226", "0.669585", "0.66904", "0.66904", "0.66799796", "0.66776484", "0.6676523", "0.6671257", "0.66652554", "0.6664433", "0.665664", "0.66496354", "0.6639221", "0.6625595", "0.6616147", "0.6616147", "0.661446" ]
0.0
-1
Saves an 'allbatteriesincluded' regressor at a given path (folder).
def save(self, path: str): os.makedirs(path, exist_ok=True) frozen = encode_indent(self) lprint(f"Saving regressor to: {path}") with open(join(path, "regressor.json"), "w") as json_file: json_file.write(frozen) for i, model in enumerate(self.models): channel_path = join(path, f"channel{i}") os.makedirs(channel_path, exist_ok=True) frozen_model = encode_indent(model) with open(join(channel_path, "regressor_model.json"), "w") as json_file: json_file.write(frozen_model) model._save_internals(channel_path) return frozen
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, folder):\n self.generator.save_weights('%s/generator.h5'%folder)\n self.critic.save_weights('%s/critic.h5'%folder)", "def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individual.fitness\n data = {'name': self.ds.name,\n 'num_labels': len(order),\n 'order': order,\n 'fitness': fitness\n }\n with open(path, 'w') as f:\n json.dump(data, f)", "def save(self, path):\n for i, m in enumerate(self.model_save):\n m.save(os.path.join(path, str(i) + \"-\" + m.name))", "def save(self, directory):\n for field in self.save_fields:\n np.save(pjoin(directory, field+'.npy'), self.__dict__[field])", "def save(self, path_to_save):\n for item in self.data_array:\n item.save(path_to_save+item.file_name)", "def save(self, path):\n pass", "def save(self, path):\n pass", "def save(self, path):\n pass", "def save_model_wandb(save_path: str):\n wandb.save(os.path.abspath(save_path))", "def save_pickle(self, path):\n with open(path, 'wb') as f:\n pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)", "def save(self, export_path: str):", "def save_all(cls, dirpath=\".\"):\n for n, v in cls.__data.items():\n pickle.dump(v, open(cls.dirpath + n + '.p', 'wb'))\n print \"Data saved to: %s\" % dirpath", "def save(self, path):\n print(\"Warning: Default save used\")\n with open(path, 'wb') as f:\n pickle.dump(self, f)", "def save(self, path=''):\n if not self.__isBuilt:\n self._rebuild()\n if not path:\n self.w.save(self.path)\n else:\n if not path.endswith('.shp'):\n path = os.path.splitext(path)[0] + '.shp'\n self.w.save(path)", "def save_band_for_path(self, path, filename):\n with open(filename, 'ab') as file_hander:\n for point in path:\n energies = self.problem.energy_eigenvalues(point[0], point[1])\n np.savetxt(file_hander, energies)", "def save_attributes_for_aggregator(self, paths):\r\n # These functions save the objects we will later access using the aggregator. They are saved via the `pickle`\r\n # module in Python, which serializes the data on to the hard-disk.\r\n\r\n with open(f\"{paths.pickle_path}/dataset.pickle\", \"wb\") as f:\r\n pickle.dump(self.dataset, f)\r\n\r\n with open(f\"{paths.pickle_path}/settings.pickle\", \"wb+\") as f:\r\n pickle.dump(self.settings, f)", "def save(self, directory):\n pass # pragma: no cover", "def save_pickle(file, path):\n with open(path, 'wb') as f:\n pickle.dump(file, f)\n file_name = re.findall(r\"/?[^/]+\", path)[-1].strip(\"/\")\n print(f\"Stored {file_name}.\")", "def save(self, path):\n (folder, filename) = os.path.split(path)\n if not filename:\n filename = _clean_filename(self.name)\n path = os.path.join(folder, filename)\n return self.waveform.save(path)", "def save(self,model_path):\n pass\n # filename = \"Models/\"+model_path+\"1.sav\"\n # pickle.dump(self.crf_model, open(filename, 'wb'))", "def save(self, path: str):\n pass", "def save(self, savedir='.', savename='savehyperparams.json'):\n\n with open(os.path.join(savedir, savename), mode='w') as f:\n json.dump(self, f, indent=1, sort_keys=True)", "def save(self, fpath):\n logging.info(\"Saving agent with filepath={}\".format(fpath))\n self.agent.save_weights(fpath, overwrite=True)", "def save(self, dir):\n raise NotImplementedError", "def save(self):\n raise NotImplementedError(\"Saving and updating DerivedBands is not permitted\")", "def save_individual(ind, path):\n with open(path, 'wb') as output:\n pickle.dump(ind, output, pickle.DEFAULT_PROTOCOL)\n output.close()", "def save(self, path):\n pickle.dump(self, open(path, 'wb'))", "def save_agent(self, path):\n # save all parameters needed to reconstruct the agent\n pickle_save(self.save_attrs, path)\n # initialize tensorflow saver\n saver = tf.train.Saver(var_list=self._variables_to_save())\n saver.save(self.sess, path + CHECKPOINT_EXTENSION)", "def save(self, path):\n # create path if not exists\n try:\n os.stat(path)\n except:\n os.mkdir(path)\n # save models\n for key in self.parameter_dict:\n self.models[key].save(os.path.join(path, type(self).__name__ + '_%s.h5' % type(self).key_to_string(key)))\n # save historys\n with open(os.path.join(path, type(self).__name__ + ModelGrid._history_suffix), 'wb') as fp:\n pickle.dump(self.history, fp)\n # save parameter and hyperparameter dict\n with open(os.path.join(path, type(self).__name__ + ModelGrid._parameter_suffix), 'wb') as fp:\n pickle.dump((self.parameter_dict, self.hyperparameter_dict), fp)", "def save_model(clf, save_folder, filename):\n import pickle\n path = save_folder + filename\n with open(path, 'wb') as handle:\n pickle.dump(clf, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def save(self, path: utils.URLPath):\n save_somclassifier_config(self.config, path / \"config.json\")\n self.model.save(str(path / \"model.h5\"))\n io_functions.save_joblib(self.binarizer, path / \"binarizer.joblib\")\n\n io_functions.save_json(self.data_ids[\"validation\"], path / \"ids_validate.json\")\n io_functions.save_json(self.data_ids[\"train\"], path / \"ids_train.json\")", "def saveStuff(stuff, path=None):\n if path == None:\n # TODO take name from something\n output = open('results/i-will-be-overwritten.pkl', 'wb')\n else:\n output = open(path, 'wb')\n\n # Pickle the list using the highest protocol available.\n cPickle.dump(stuff, output, -1)\n output.close()", "def saveStuff(stuff, path=None):\n if path == None:\n # TODO take name from something\n output = open('results/i-will-be-overwritten.pkl', 'wb')\n else:\n output = open(path, 'wb')\n\n # Pickle the list using the highest protocol available.\n cPickle.dump(stuff, output, -1)\n output.close()", "def write_regions(pathfolder, key_firms, regions, methodvalues):\n ## Generate namefile\n namefile = generate_namefile(pathfolder, methodvalues)\n\n ## Writting\n db = shelve.open(namefile)\n db['nif'] = key_firms\n db['regions'] = regions\n db['methodvalues'] = methodvalues\n db.close()", "def save_to(self, save_path=\"./\", run_flag='', save_method=\"pickle\"):\n # TODO: Finish the save_method parameters\n time_stamp = self.time_stamp\n time_stamp = self.time_stamp + \"_\" + run_flag\n save_path = os.path.join(save_path, time_stamp)\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n if self.feature_importance_pool:\n file_path = os.path.join(save_path, \"feature_importances.pkl\")\n save_file(file_path, self.feature_importance_pool)\n\n if self.feature_importance_hist:\n file_path = os.path.join(save_path, \"feature_importances_hist.png\")\n save_file(file_path, self.feature_importance_hist[0])\n\n if self.area_under_curve_pool:\n file_path = os.path.join(save_path, \"auc_fpr_tpr.pkl\")\n save_file(file_path, self.area_under_curve_pool)\n\n if self.receiver_operating_characteristic_curve:\n file_path = os.path.join(save_path, \"roc_curve.png\")\n save_file(file_path, self.receiver_operating_characteristic_curve[0])\n\n if self.training_report_pool:\n file_path = os.path.join(save_path, \"training_report.pkl\")\n save_file(file_path, self.training_report_pool)\n\n if self.learning_line:\n file_path = os.path.join(save_path, \"learning_curve.png\")\n save_file(file_path, self.learning_line[0])\n\n file_path = os.path.join(save_path, time_stamp + \"_object.pkl\")\n with open(file_path, 'wb') as opfh:\n pickle.dump(self, opfh)", "def save_model(self, folder_name):\n raise NotImplementedError()", "def save(self, path, exclude=None):\n assert self.model, \"Model was not initialized\"\n topology = {k: v for k, v in self.__dict__.items()}\n topology.pop(\"model\")\n if exclude and isinstance(exclude, list):\n for x in exclude:\n topology.pop(x)\n save_model(self.model, topology=topology, filepath=path)", "def save_model(self, file_name):\n with open(file_name, 'wb') as file:\n pickle.dump(self.lin_reg, file)", "def _save_tree_pickle(self, folderpath):\n\n if not os.path.exists(folderpath):\n raise EnvironmentError('Unable to save pickle file to {}, does not exist'.format(folderpath))\n self.tree.manager = None\n\n picklefile = open(os.path.join(folderpath, \"grid.pickle\"), \"wb\")\n data = {}\n for ky, val in self.__dict__.items():\n if ky not in ['data', 'node_data']:\n data[ky] = val\n pickle.dump(data, picklefile, -1)", "def save(self):\n\n if (self._save != '0'):\n p = self._save+self._path[-3:-1]+'_'+str(self._qn)+'.dat'\n np.savetxt(p, self._gf)\n else:\n sys.exit(\"Wrong path to save\")", "def save(self, path: Union[str, pathlib.Path]):\n super().save(path)\n path = pathlib.Path(path)\n elite_path = path / self._ELITE_FNAME\n if self.elite_models:\n warnings.warn(\n \"Future versions of GaussianMLP will save elite models in the same \"\n \"checkpoint file as the model weights.\"\n )\n with open(elite_path, \"wb\") as f:\n pickle.dump(self.elite_models, f)", "def save(self, folder):\n if not path.exists(folder):\n os.makedirs(folder)\n param = {\n \"model\": self.__class__.__name__,\n \"nr_labels\": self.nr_labels,\n \"nr_features\": self.nr_features,\n \"nr_codes\": self.nr_codes,\n \"bias\": self.bias,\n \"pred_kwargs\": self.pred_params.to_dict(),\n }\n param = self.append_meta(param)\n with open(\"{}/param.json\".format(folder), \"w\") as f:\n f.write(json.dumps(param, indent=True))\n smat_util.save_matrix(\"{}/W.npz\".format(folder), self.W)\n smat_util.save_matrix(\"{}/C.npz\".format(folder), self.C)", "def export_library(self):\n filename = tkFileDialog.asksaveasfilename(initialdir = self.cwd, title = \"Save glycan library\", filetypes = ((\"db files\",\"*.db\"),(\"all files\",\"*.*\")))\n self.export_glycans(filename, self.user_glycans)", "def save_maps(save_path, *params):\n pass\n # with codecs.open(save_path, \"w\", encoding=\"utf8\") as f:\n # pickle.dump(params, f)", "def save(self, path):\n with tempfile.TemporaryDirectory() as td:\n U.save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n dill.dump((model_data, self._act_params), f)", "def save(self, folder):\n if self.is_predict_only:\n raise Exception(\"Model is predict only! save not supported!\")\n if not path.exists(folder):\n os.makedirs(folder)\n param = {\n \"model\": self.__class__.__name__,\n \"depth\": self.depth,\n \"nr_features\": self.nr_features,\n \"nr_codes\": self.nr_codes,\n \"nr_labels\": self.nr_labels,\n }\n param = self.append_meta(param)\n open(f\"{folder}/param.json\", \"w\", encoding=\"utf-8\").write(json.dumps(param, indent=True))\n for d in range(self.depth):\n local_folder = f\"{folder}/{d}.model\"\n self.model_chain[d].save(local_folder)", "def saveModel(self, save_path):\n if not os.path.exists('/'.join(os.path.split(save_path)[:-1])):\n os.makedirs('/'.join(os.path.split(save_path)[:-1]))\n with open(save_path, 'wb') as fw:\n pickle.dump(self.clf, fw)", "def save_barcodes(self, path):\n pickle.dump(self.barcodes, open(path, 'wb'))", "def save(self, file_path, filename=\"tracer\"):\r\n with open(path.join(file_path, f\"{filename}.pickle\"), \"wb\") as f:\r\n pickle.dump(self, f)", "def write_saver_defs(self):\n assert self.savers_constructed\n full_saver_def = self.full_saver.as_saver_def()\n full_file = self.params.save_dir+self.params.model_name+\"_v\"+self.params.version+\".def\"\n with open(full_file, \"wb\") as f:\n f.write(full_saver_def.SerializeToString())\n self.logger.log_info(\"Full saver def saved in file %s\"%full_file)", "def saveMacro(self):\r\n\t\tCodeSaver().save('Loadfile(\"'+ self.savePath + '\")',self.macroPath)", "def save_new_nirs(self, nirs_file, nirs_path, root_export_dir, id):\n\n copytree(f\"{nirs_path}/{id}\", f\"{root_export_dir}/{id}\")\n old_nirs_f = [fname for fname in os.listdir(f\"{root_export_dir}/{id}\") if \".nirs\" in fname][0]\n os.remove(f\"{root_export_dir}/{id}/{old_nirs_f}\")\n savemat(f\"{root_export_dir}/{id}/{id}.nirs\", nirs_file)", "def file_to_save(self, title='Save as ...', directory=None, filter=\"All files (*.*)\", datafolder=None):\n pass", "def save(self, path):\n for tube in self.inactive_tube_list:\n with open(path + \"{}.tube\".format(tube.id) , 'w+') as f:\n #f.write(\"Tube:{},{}\\n\".format(tube.id, len(tube))) \n for det in tube.detection_list:\n f.write(\"{}, {}, {}, {}, {}, {}, {}\\n\".format(det.frame_number, det.label, det.x1, det.y1, det.x2, det.y2, det.interpolated))", "def save_arch(model, save_folder):\n with open(save_folder + '/architecture.txt','w') as a_save:\n model.summary(print_fn=lambda x: a_save.write(x + '\\n'))", "def serialize(self, path):\r\n newModelFitter = self.copy()\r\n with open(path, \"wb\") as fd:\r\n rpickle.dump(newModelFitter, fd)", "def save(self, path):\n save(self.actor_net.state_dict(), path + '_actor.pkl')\n save(self.critic_net.state_dict(), path + '_critic.pkl')", "def write_locations(pathfolder, key_firms, years, locs, methodvalues):\n ## Generate namefile\n namefile = generate_namefile(pathfolder, methodvalues)\n\n ## Writting\n db = shelve.open(namefile)\n db['hashes'] = generate_yearnif_hash(years, key_firms)\n db['nif'] = key_firms\n db['year'] = years\n db['locations'] = locs\n db['methodvalues'] = methodvalues\n db.close()", "def save_sample(self, wad, path, root_path = '', wad_info=None):\n os.makedirs(path, exist_ok=True)\n for level in wad['levels']:\n base_filename=path+wad['wad_name'].split('.')[-2]+'_'+level['name']\n # Path relative to the dataset root that will be stored in the database\n relative_path = base_filename.replace(root_path, '')\n # Adding the features\n for map in level['maps']:\n # Adding the corresponding path as feature for further access\n level['features']['path_{}'.format(map)] = relative_path + '_{}.png'.format(map)\n io.imsave(base_filename + '_{}.png'.format(map), level['maps'][map])\n for wadinfo in wad_info:\n # Adding wad info (author, etc) to the level features.\n if wadinfo not in level['features']: # Computed features have priority over provided features\n level['features'][wadinfo] = wad_info[wadinfo]\n # Completing the features with the level slot\n level['features']['slot'] = level['name']\n # Doing the same for the other features\n level['features']['path_json'] = relative_path + '.json'\n with open(base_filename + '.json', 'w') as jout:\n json.dump(level['features'], jout)\n # Saving the text representation\n with open(base_filename + '.txt', 'wb') as txtout:\n txtout.writelines([bytes(row + [10]) for row in level['text']])\n # Saving the graph\n if 'graph' in level:\n with open(base_filename + '.networkx', 'wb') as graphout:\n nx.write_gpickle(level['graph'], graphout)", "def save(self, directory):\n logging.info(f\"Saving to dir {directory}\")\n self.estimator.save(directory)", "def save_model(self, path):\n pass", "def save(self, path):\r\n if path is None:\r\n return\r\n\r\n logging.info(\"Save model to {}\".format(path))\r\n contained_modules = []\r\n\r\n for idx, name in enumerate(self._modules):\r\n module = self._modules[name]\r\n # __name__ is the class or function name,\r\n # __module__ is the .py name, possibly including the relative\r\n # path if executed from the outside folder.\r\n # logging.info(\"module.__name__: {}\".format(\r\n # type(module).__name__))\r\n # model_path is the saving folder\r\n model_path = os.path.join(\r\n path,\r\n str(idx) + \"_\" + type(module).__name__)\r\n os.makedirs(model_path, exist_ok=True)\r\n # modules are saved here, using the save in modules respectively\r\n module.save(model_path)\r\n # __module__ name of module in which this class was defined\r\n # actually it's the imported module name. Here, it's bert\r\n # sometimes you will import the module from folders,\r\n # for instance, you run the __main__ outside sentence_transformers\r\n # folder, in which case the relative import is allowed,\r\n # the __module__ is the relative path, that\r\n # is, the sentence_transformers.models.BERT, equivalent to\r\n # '/sentence_transformers/models/BERT'\r\n # and the BERT is the final \"module name/.py file name\"\r\n # 'sentence_transformers.models.BERT'\r\n # logging.info('type(module).__module__ :{}'.format(\r\n # type(module).__module__))\r\n # If you use __init__ and the classname is the same as the\r\n # .py file. So if you use __init__, and the module when saved\r\n # is imported as the Module_Folder.Classname, then the\r\n # module name will be the\r\n # /sentence_transformers/models/BERT (classname actually)\r\n # in saving, then you can use both the\r\n # __modulefolder__.__module__.__name__\r\n # and the __modulefolder__.__name__ to import the\r\n # filename. So __init__ + save meets both.\r\n # An error may occur when the BERT class is defined in bert.py,\r\n # and you load from __modulefolder__.BERT.__name__\r\n # However,\r\n # If you don't use __init__, then you need to save the path\r\n # as __module__.__name__, and load it use __module__.__name__,\r\n # so loading using __module__.__name__ meets the both\r\n\r\n # here we didn't use __init__\r\n contained_modules.append(\r\n {'idx': idx,\r\n 'name': name,\r\n 'path': os.path.basename(model_path),\r\n 'type': (\r\n type(module).__module__ + '.' + type(module).__name__)})\r\n\r\n # the sequential configuration is saved as the modules.json in\r\n # the out-most folder. The contained_modules dict are saved in\r\n # modules.json. Whilst sequential has no modules to save.\r\n with open(os.path.join(path, 'modules.json'), 'w') as fOut:\r\n json.dump(contained_modules, fOut, indent=2)\r\n\r\n # with open(os.path.join(path, 'config.json'), 'w') as fOut:\r\n # json.dump({'__version__': __version__}, fOut, indent=2)\r", "def SaveAll():\n\tfor file in files:\n\t\tfile.SaveFile()", "def save_pickle(obj, path):\n may_make_dir(osp.dirname(osp.abspath(path)))\n with open(path, 'wb') as f:\n pickle.dump(obj, f, protocol=2)", "def Save(file=CONFIG):\n\tif file in files:\n\t\tfiles[file].SaveFile()", "def save_file(self):\n if self.select_path.text() != \"\":\n filepath = self.select_path.text()\n road_network = self.map_selection.currentText()\n if self.map_selection.currentText() == \"User Defined\":\n road_network = self.map_selection_user_defined.text()\n gen_xml = GenerateXML(filepath, road_network)\n gen_xml.main()\n # remember Road Network for future\n set_metadata(road_network_filepath=road_network)\n else:\n message = \"No export path was selected\"\n iface.messageBar().pushMessage(\"Warning\", message, level=Qgis.Warning)\n QgsMessageLog.logMessage(message, level=Qgis.Warning)", "def save_pickle(companies):\n print(\"Saving companies.pickle...\")\n\n Path(\"output\").mkdir(parents=True, exist_ok=True)\n file_name = 'output/companies.pickle'\n\n companies_dict = {}\n i = 0\n while i < 500:\n company = companies[i]\n companies_dict[company.text] = {\n \"name\": company.text,\n \"url\": company.get_attribute('href'),\n }\n i += 1\n\n with open(file_name, 'wb') as handle:\n pickle.dump(companies_dict, handle)\n\n print('companies.pickle created')", "def save_pickle(obj, path):\n may_make_dir(osp.dirname(path))\n with open(path, 'w') as f:\n pickle.dump(obj, f)", "def save_all(self):\n self.save_client_list_to_file()\n self.save_project_list_to_file()", "def save_model(self, dir_path):\n np.savez(\n dir_path + os.path.sep + \"weights.npz\", W1=self.W1, W2=self.W2, W3=self.W3\n )\n np.savez(\n dir_path + os.path.sep + \"biases.npz\", b1=self.b1, b2=self.b2, b3=self.b3\n )", "def savegraph(self, path):\n\n raise NotImplementedError", "def picklesave(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)", "def save(self):\n joblib.dump(\n self.classifier, \"data/models/badlymappedfinder/badlymappedfinder.joblib\",\n )", "def save():", "def save(self, path):\n with open(path, 'wb') as f:\n pkl.dump(self, f)", "def save_resources(self, save_directory):\n for name, file_name in self.resource_files_names.items():\n save_path = os.path.join(save_directory, file_name)\n shutil.copyfile(getattr(self, \"_%s\" % name), save_path)", "def save_all(self):\n outdir = tkFileDialog.askdirectory()\n # If they cancelled, there is no outdir set and we didn't save.\n # Return False to indicate \"don't quit\".\n if not outdir:\n return False\n # Get the full output file names\n fullpaths = [os.path.join(outdir, name)\n for name in files.INPUT_NAMES.itervalues()]\n # See if any of those files already exist\n exists = [os.path.exists(x) for x in fullpaths]\n # If a file exists and the user says not to overwrite, return True\n # to indicate we can quit.\n # If they say yes, we overwrite everything - there's currently no\n # mechanism for selective overwriting.\n if True in exists and not tkMessageBox.askyesno('Files exist',\n 'Do you wish to replace the existing files?'):\n return False\n # Write updated gain/voltage files for large cells\n filename = os.path.join(outdir, 'largeCellGains.txt')\n with open(filename, 'w') as file:\n self.detectors[NORTH_LARGE].write_gain_table(file)\n with open(filename, 'a') as file:\n self.detectors[SOUTH_LARGE].write_gain_table(file)\n # ... and for small cells\n filename = os.path.join(outdir, 'smallCellGains.txt')\n with open(filename, 'w') as file:\n self.detectors[NORTH_SMALL].write_gain_table(file)\n with open(filename, 'a') as file:\n self.detectors[SOUTH_SMALL].write_gain_table(file)\n self.qt.write(outdir)\n # Write the LeCroy scripts for large cells\n for i in [7005, 7006]:\n printer = lecroytools.Printer(i)\n printer.generate(self.detectors[NORTH_LARGE], outdir)\n for i in [7007, 7008]:\n printer = lecroytools.Printer(i)\n printer.generate(self.detectors[SOUTH_LARGE], outdir)\n # Write script for small cells\n smallcellscript.generate(self.detectors[NORTH_SMALL],\n self.detectors[SOUTH_SMALL],\n outdir)\n self.save_root(os.path.join(outdir, 'tree.root'))\n # Reset the modified flag so we don't prompt the user to save again\n self.image_window.modified = False\n # We saved, it's OK to quit now\n return True", "def save(self, path):\n\n if not isinstance(path, Path):\n path = Path(path)\n\n params = {\n 'model': self.__class__.__name__,\n 'elements': self.elements,\n 'r_cut': self.r_cut,\n 'fitted': self.gp.fitted,\n 'gp': {\n 'kernel': self.gp.kernel.kernel_name,\n 'n_train': self.gp.n_train,\n 'sigma': self.gp.kernel.theta[0],\n 'noise': self.gp.noise,\n 'r0': self.gp.kernel.theta[2]\n },\n 'grid': {\n 'r_min': self.grid_start,\n 'r_max': self.grid_end,\n 'r_num': self.grid_num,\n 'filename': {}\n } if self.grid else {}\n }\n\n gp_filename = \"GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy\".format(\n p=params)\n\n params['gp']['filename'] = gp_filename\n self.gp.save(path / gp_filename)\n\n for k, grid in self.grid.items():\n key = str(k)\n grid_filename = \"GRID_{}_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npz\".format(\n key, p=params)\n params['grid']['filename'][key] = grid_filename\n grid.save(path / grid_filename)\n\n with open(path / \"MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json\".format(p=params), 'w') as fp:\n json.dump(params, fp, indent=4, cls=NpEncoder)\n\n print(\"Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json\".format(p=params))", "def write_pickle(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)", "def save(self, folder_name, **kwargs):\n\n # Create the folder for saving the agent\n if not os.path.isdir(folder_name):\n os.makedirs(folder_name)\n\n # Save DQN and target DQN\n self.DQN.save(folder_name + '/dqn.h5')\n self.target_dqn.save(folder_name + '/target_dqn.h5')\n\n # Save replay buffer\n self.replay_buffer.save(folder_name + '/replay-buffer')\n\n # Save meta\n with open(folder_name + '/meta.json', 'w+') as f:\n f.write(json.dumps({**{'buff_count': self.replay_buffer.count, 'buff_curr': self.replay_buffer.current},\n **kwargs})) # save replay_buffer information and any other information", "def save_items(self, path):\n os.makedirs(path, exist_ok=True)\n np.save(os.path.join(path, 'ids'), self.item_ids)\n np.save(os.path.join(path, 'titles'), self.item_titles)\n np.save(os.path.join(path, 'embeddings'), self.item_vectors)\n np.save(os.path.join(path, 'categories'), self.item_categories)", "def export_patches(self):\n filename = tkFileDialog.asksaveasfilename(initialdir = self.cwd, title = \"Save patches\", filetypes = ((\"inp file\",\"*.inp\"),(\"all files\",\"*.*\")))\n if filename:\n patches = self.myGlycosylator.export_patches(self.linked_glycans)\n with open(filename, 'w') as f:\n f.write('\\n'.join(patches))", "def save(self, fname):\n pass", "def save(self, save_files: Iterable[str]) -> None:\n self.configurator.save_notes = \"\"\n\n ex_errs = self.aug.match(\"/augeas//error\")\n try:\n self.aug.save()\n except IOError:\n self._log_save_errors(ex_errs)\n raise\n\n # Force reload if files were modified\n # This is needed to recalculate augeas directive span\n if save_files:\n for sf in save_files:\n self.aug.remove(\"/files/\"+sf)\n self.aug.load()", "def _saveExperiment(self, experiment, path):\n Experiment.save(experiment, path);", "def writeFile(self, name, folder, collected_entry_list=[]):\n file_io = open(os.path.join(folder, \"system_%s.json\" % name), \"w\")\n json.dump(collected_entry_list, file_io, sort_keys=True, indent=2)\n file_io.close()", "def save(self):\n pickle.dump(self, open(self.path, \"wb\"))", "def _resolve_save(self, basename, what):\n relpath = os.path.join(self.dst, basename) + \".pickle\"\n print \"saving '{}'\".format(relpath)\n try:\n with open(relpath, \"wb\") as fp:\n pickle.dump(what, fp, protocol=2)\n except Exception, e:\n raise IOError(\"can not save '{}':\\n{}\".format(relpath, e))", "def save(self, path, name=None):\n if not os.path.exists(path):\n os.makedirs(path)\n name = name or self.name\n path = os.path.join(path, name)\n with open(path, 'wb') as pickle_file:\n pickle.dump(self, pickle_file)", "def save(self, path):\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n\n if not name:\n raise ValueError, \"name is required\"\n\n path = os.path.join(folder, name + self.extension)\n f = open(path, \"wb\")\n f.write(self.contents)\n f.close()\n\n return path", "def save(self):\n joblib.dump(\n self.classifier, \"data/models/repeatsfinder/repeatsfinder.joblib\",\n )", "def save_fit_results(self, save_path: str = \"./fit_results.json\"):\n assert (\n self._fit_results\n ), \"There are no fit results to be saved, \\\n call fit method first or load the results from the file\"\n assert save_path.endswith(\".json\"), self.JSON_ASSERTION\n with open(save_path, \"w\") as fjson:\n json.dump(self._fit_results, fjson)", "def save_vocabulary(path, vocab):\n print('saving vocabulary..')\n with open(path, 'wb') as handle:\n pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print('vocabulary was saved successfully!')", "def saveScenarioHandler(self):\n\n file_path = self.saveFileExplorer(caption=\"Enter File Path for Scenario\")\n\n # save all active command files\n active_subsystems = self.scenarioController.getActiveSubsystems()\n for subsystem_controller in active_subsystems:\n\n self.saveHandler(subsystem_controller)\n\n self.scenarioController.writeScenarioFile(file_path)", "def save_battle(battle, battle_name):\n path = './data_reader/data/battles/' + battle_name\n\n with open(path, 'wb') as outfile:\n pickle.dump(battle, outfile, -1)", "def _save(self):\n\t\t\n\t\tdirectory = self.Output_path\n\n\t\t# replace with \n\t\t# file_name = hermes.mk_themis_file_name(themis_obj = self)\n\t\tfile_name = f'Themis_{self.CELL_ID[\"experiment\"]}_u{self.CELL_ID[\"unit\"]}_c{self.CELL_ID[\"cell\"]}_r{self.CELL_ID[\"run\"]}.pkl'\n\n\t\tsave_path = directory / file_name\n\n\t\t# Atomic saving (helpful?)\n\t\ttemp_path = save_path.with_suffix(save_path.suffix + '.tmp')\n\t\t\n\t\tself.SavePath = save_path\n\n\t\t\n\t\twith open(temp_path, 'wb') as f:\n\t\t\tpickle.dump(self, f)\n\n\t\ttemp_path.rename(save_path)\n\n\t\tprint(f'Saved {self.RUN_KEY} as {save_path}')", "def save(self, path):\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n np.save(os.path.join(path, 'V.npy'), self.V.cpu().numpy())\n\n if self.W is not None:\n np.save(os.path.join(path, 'W.npy'), self.W.cpu().numpy())\n\n if self.vb is not None:\n np.save(os.path.join(path, 'v_bias.npy'), self.vb.cpu().numpy())\n\n if self.wb is not None:\n np.save(os.path.join(path, 'w_bias.npy'), self.wb.cpu().numpy())\n\n if self.dictionary is not None:\n self.dictionary.save(os.path.join(path, 'dictionary'))", "def save_map(self, path: str):\n self.folium_map.save(path)", "def save(self, path):\n\n if not isinstance(path, Path):\n path = Path(path)\n\n directory, prefix = path.parent, path.stem\n\n params = {\n 'model': self.__class__.__name__,\n 'elements': self.elements,\n 'r_cut': self.r_cut,\n 'fitted': self.gp.fitted,\n 'gp': {\n 'kernel': self.gp.kernel.kernel_name,\n 'n_train': self.gp.n_train,\n 'sigma': self.gp.kernel.theta[0],\n 'theta': self.gp.kernel.theta[1],\n 'noise': self.gp.noise\n },\n 'grid': {}\n }\n\n gp_filename = \"GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy\".format(\n p=params)\n\n params['gp']['filename'] = gp_filename\n self.gp.save(path / gp_filename)\n\n with open(path / 'MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json'.format(p=params), 'w') as fp:\n json.dump(params, fp, indent=4, cls=NpEncoder)\n\n print(\"Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json\".format(p=params))", "def save(self, path):\n if not is_dry():\n with open(path, 'w') as f:\n json.dump(self.to_dict(), f, indent=4)\n return path" ]
[ "0.56438905", "0.5583958", "0.5536748", "0.5505436", "0.5465258", "0.5457156", "0.5457156", "0.5457156", "0.5444318", "0.54246444", "0.54233265", "0.5392205", "0.53767943", "0.53724295", "0.5370799", "0.5340823", "0.5336824", "0.53367984", "0.5329921", "0.5313599", "0.530012", "0.53000766", "0.5293602", "0.52911687", "0.5284843", "0.52817106", "0.52779394", "0.524482", "0.52440864", "0.5243285", "0.5233446", "0.5208558", "0.5208558", "0.5202535", "0.5187145", "0.517412", "0.51612663", "0.5158378", "0.5154444", "0.51509297", "0.51466596", "0.5145717", "0.5139736", "0.51342237", "0.5130359", "0.5117961", "0.5111167", "0.50986683", "0.50920826", "0.50859433", "0.50724834", "0.5066048", "0.5056467", "0.505252", "0.5037453", "0.50336075", "0.50332594", "0.50174004", "0.4997988", "0.49938938", "0.49931592", "0.49843478", "0.49813512", "0.497863", "0.497542", "0.4963238", "0.49563545", "0.49560398", "0.49484795", "0.49450564", "0.49449155", "0.49435785", "0.49432182", "0.49431723", "0.49415067", "0.49384734", "0.4938042", "0.49299306", "0.4921953", "0.49205446", "0.4915903", "0.49125978", "0.49116236", "0.49045208", "0.49039364", "0.49026918", "0.49013704", "0.4900976", "0.4900239", "0.4894726", "0.48890337", "0.48883927", "0.4884705", "0.48819563", "0.4878465", "0.48757648", "0.4866042", "0.48593804", "0.4856945", "0.48532474" ]
0.59676516
0
Returns an 'allbatteriesincluded' regressor from a given path (folder).
def load(path: str): lprint(f"Loading regressor from: {path}") with open(join(path, "regressor.json"), "r") as json_file: frozen = json_file.read() thawed = jsonpickle.decode(frozen) thawed.models = [] for i in range(thawed.num_channels): channel_path = join(path, f"channel{i}") lprint(f"Loading regressor model for channel {i} from: {path}") with open(join(channel_path, "regressor_model.json"), "r") as json_file: frozen_model = json_file.read() thawed_model = jsonpickle.decode(frozen_model) thawed_model._load_internals(channel_path) thawed.models.append(thawed_model) return thawed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_drivers(dirpath):\n\n return all_drivers", "def get_all(folder, filter_funs = []) :\n\n def apply_funs(x, funs) :\n \"\"\"Applies the filter functions.\"\"\"\n res = True\n for f in funs :\n res = f(x)\n if not res :\n break\n return res\n \n final = {}\n files = listdir(folder)\n print(\"Loading Spectras\")\n for f in files :\n try :\n spectra = Spectra(folder + \"/\" + f)\n print(\".\", end=\"\")\n except:\n continue\n if spectra == None :\n continue\n if not apply_funs(spectra, filter_funs) :\n continue\n pot_spectra = final.get(spectra.database_id, None)\n if not pot_spectra :\n final[spectra.database_id] = [deepcopy(spectra)]\n else :\n pot_spectra.append(deepcopy(spectra))\n return final", "def load_all_from_path(self, path):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\t#111: handle expanded paths\n\t\tpath = os.path.abspath(path)\n\t\t#http://stackoverflow.com/questions/301134/dynamic-module-import-in-python\n\t\tif os.path.abspath(path) == self.shutit_main_dir:\n\t\t\treturn\n\t\tif not os.path.exists(path):\n\t\t\treturn\n\t\tif os.path.exists(path + '/STOPBUILD') and not self.build['ignorestop']:\n\t\t\tself.log('Ignoring directory: ' + path + ' as it has a STOPBUILD file in it. Pass --ignorestop to shutit run to override.',level=logging.DEBUG)\n\t\t\treturn\n\t\tfor sub in glob.glob(os.path.join(path, '*')):\n\t\t\tsubpath = os.path.join(path, sub)\n\t\t\tif os.path.isfile(subpath):\n\t\t\t\tself.load_mod_from_file(subpath)\n\t\t\telif os.path.isdir(subpath):\n\t\t\t\tself.load_all_from_path(subpath)", "def _load_from_directory(path):\n values = []\n\n # Load brand names from all the text files in the provided folder.\n for root, _, files in os.walk(path):\n files = [f for f in files if not f[0] == \".\"]\n for f in files:\n with open(os.path.join(root, f)) as infile:\n for item in infile.readlines():\n values.append(item.strip('\\n'))\n\n return values", "def searchfiles(pattern='C:\\\\RoboDK\\\\Library\\\\*.rdk'):\n import glob\n return glob.glob(pattern)", "def from_folder(self, folder, include=None, exclude=None):\n\n self.folder = folder\n self.include = include\n self.exclude = exclude\n\n filepaths = []\n\n # crawl files in subfolders \n for root, dirs, files in os.walk(self.folder):\n for file in files:\n filepath = os.path.join(root, file)\n \n add_filepath = True\n if self.include is not None:\n add_filepath = any((_inc in filepath\n for _inc in self.include))\n if add_filepath:\n if self.exclude is not None:\n add_filepath = all((_exc not in filepath\n for _exc in self.exclude))\n\n if add_filepath:\n #append the file name to the list of paths\n filepaths.append(filepath)\n\n self._obj['filepath'] = filepaths\n \n return self", "def get_drawings(folder):\n # case insensitive in windows system, so \"dwg\" is ok\n return sorted(Path(folder).glob('**/*.dwg'))", "def get_available_patterns() -> list:\n path_folder = os.path.join(config.ROOT_PATH, config.FOLDER_PATTERNS)\n return [f.replace(\".cells\", \"\") for f in os.listdir(path_folder) if os.path.isfile(os.path.join(path_folder, f)) and f.endswith(\".cells\")]", "def eval_genuine(path):\n out = []\n with open(path, 'r') as fp:\n for line in fp:\n fields = line.rstrip().split()\n ii, tt = fields[:2]\n if tt == 'genuine':\n out.append(ii[2:-4]) # remove 'D_' and '.wav'\n\n return out", "def determine_modbase_models_from_modbase_directory( query , out_directory = 'modbase_models' , root_filename = '' ):\n # defaults for written files\n if not root_filename:\n root_filename = 'modbase_' + query\n if not out_directory:\n out_directory = './' # here!\n \n # ta da!\n return [i for i in os.listdir( out_directory ) if root_filename + '_model_' in i and i[-4:] == '.pdb']", "def _get_addpath_adv_all(self):\n return self.__addpath_adv_all", "def load_file_list(path=None, regx='\\.npz'):\n if path == False:\n path = os.getcwd()\n file_list = os.listdir(path)\n return_list = []\n for idx, f in enumerate(file_list):\n if re.search(regx, f):\n return_list.append(f)\n # return_list.sort()\n print('Match file list = %s' % return_list)\n print('Number of files = %d' % len(return_list))\n return return_list", "def list_load_entries(filepath):\n try:\n rewriter = rewriter_factory(filepath)\n dynamic_deps = [dep[6:] for dep in rewriter.dependencies if dep.startswith('@rpath')]\n return {'rpaths': rewriter.rpaths, 'libraries': dynamic_deps}\n except MachoError:\n return {'rpaths': [], 'libraries': []}", "def search_realmlist(PATH):\r\n folder_list = os.listdir(PATH)\r\n\r\n return [\r\n folder\r\n for folder in folder_list\r\n if not re.search(\"[.]\\w+\", folder) and 'SavedVariables' not in folder\r\n ]", "def gen_availablemods(dir_path):\n mod_dir = os.listdir(os.path.join(dir_path, 'modules'))\n mods = [i.replace('.py', '') for i in mod_dir if i.startswith('mod_') and i.endswith('.py')]\n\n live_or_dead_mods = [i for i in mods if not i.startswith('mod_live_') and not i.startswith('mod_dead_')]\n only_live_mods = sorted([i for i in mods if i.startswith('mod_live_')])\n\n available_mods = only_live_mods + live_or_dead_mods\n\n return available_mods", "def _find_all_houses(input_path):\n dir_names = [p for p in listdir(input_path) if isdir(join(input_path, p))]\n return _matching_ints(dir_names, r'^house_(\\d)$')", "def load(self, folder):\n # load the weights from input folder\n self.generator.load_weights('%s/generator.h5'%folder)\n self.critic.load_weights('%s/critic.h5'%folder)", "def get_included_files(self):\n return self._includedfiles", "def Galaxies(name_path):\n\tp = pathlib.Path(name_path)\n\tgalaxies = []\n\tfor f in p.glob('*.fits'):\n\t\thdu = fits.open(f)\n\t\tZ1= hdu[0].data\n\t\tgalaxies.append(Z1)\n \n\treturn galaxies", "def read_eval_folder(path, allow_pickle=True):\n filenames = tf.io.gfile.listdir(path)\n d = {}\n for fn in filenames:\n p = os.path.join(path, fn)\n with tf.io.gfile.GFile(p, 'rb') as f:\n d[fn[:-4]] = np.load(f, allow_pickle=allow_pickle)\n return d", "def __find_eligible_plugins_in_directory(cls, directory_to_search):\n\n plugin_files = [\n x\n for x in os.listdir(directory_to_search)\n if x.endswith(\".py\") and x[0:-3] != \"__init__\"\n ]\n return plugin_files", "def get_first_level_objs(subject_id, task, first_level_dir, regress_rt=False, beta=False):\n\n rt_flag, beta_flag = get_flags(regress_rt, beta)\n files = path.join(first_level_dir, subject_id, task, 'firstlevel*%s_%s*pkl' % (rt_flag, beta_flag))\n return glob(files)", "def grab_files(folder_path, ext='.h5', first4=None):\r\n folder_path=os.path.normpath(folder_path)\r\n if first4 in locals():\r\n # incorporate the excerpt of the filename\r\n dir_List=[f for f in os.scandir(folder_path) if f.is_file()==True and \r\n f.name[-len(ext):]==ext and f.name[:4]==first4] \r\n else:\r\n #do it without the excerpt\r\n dir_List=[f for f in os.scandir(folder_path) if f.is_file()==True and \r\n f.name[-len(ext):]==ext]\r\n \r\n return dir_List", "def GetBotsFromBuildersFile(builders_path):\n builders = ReadBuildersFile(builders_path)\n return GetBotsFromBuilders(builders)", "def get_spiders_files(spiders_directory=None):\n if spiders_directory is None:\n spiders_directory = dirname(__file__) + '/spiders/'\n return [file for file in glob.glob(spiders_directory + \"/*.py\")\n if isfile(file)\n and not file.endswith('__init__.py')]", "def load_glob(self, match):\n\t\tpluginlist = []\n\t\tfor m in match.split(','):\n\t\t\tfor path in glob.glob(m):\n\t\t\t\tif not \"__plugin__.py\" in path and not \"__init__.py\" in path:\n\t\t\t\t\tpluginlist.append(self.load(path))\n\t\treturn pluginlist", "def find_package_data(module, path):\n files = []\n exclude = re.compile(\"\\.pyc$|~$\")\n for dirpath, dirnames, filenames in os.walk(os.path.join(module,path)):\n for filename in filenames:\n if not exclude.search(filename):\n files.append(os.path.relpath(os.path.join(dirpath,filename),module))\n return {module:files}", "def _get_staging_area_files(wit_path, plus_root=True):\n\n for root, _, files in os.walk(os.path.join(wit_path, '.wit', 'staging_area'), topdown=False):\n for name in files:\n if plus_root:\n yield os.path.join(os.path.relpath(root, os.path.join(wit_path, '.wit', 'staging_area')), name)\n else:\n yield name", "def example_bigbeds():\n hits = []\n d = data_dir()\n for fn in os.listdir(d):\n fn = os.path.join(d, fn)\n if os.path.splitext(fn)[-1] == '.bigBed':\n hits.append(os.path.abspath(fn))\n return hits", "def include_dirs(self):", "def get_items(path, only=None):\n path = os.path.expanduser(path)\n ps = [os.path.join(path, n)\n for n in os.listdir(path)\n if not n.startswith('.') and len(n) == 4]\n ps = [p for p in ps if os.path.isdir(p)]\n if only is not None:\n ps = [p for p in ps if nmrex.utils.fname(p) in only]\n return ps", "def scanFiles(directory, includes = [\"*\"], excludes = []):\n\treturn scanAll(directory, includes, excludes)[1]", "def find_wavs(directory, pattern='**/*.wav'):\n return glob(os.path.join(directory, pattern), recursive=True)", "def register_benchmarks(directory=None):\n dirs = places_to_look() if directory is None else [directory]\n for directory in dirs:\n with os.scandir(directory) as scan:\n for entry in scan:\n filename = entry.name\n if (\n filename.startswith(\".\")\n or not entry.is_file()\n or not filename.endswith(\".py\")\n ):\n continue\n if (\n filename.startswith(\"benchmark\")\n or filename.endswith(\"benchmark.py\")\n or filename.endswith(\"benchmarks.py\")\n ):\n import_path(f\"{directory}/{filename}\")", "def _collect_bams(self, wildcards, library_name):\n folder_name = get_ngs_library_folder_name(self.parent.sheets, wildcards.library_name)\n for _, path_infix, filename in self.path_gen.run(folder_name, (\"bam\",)):\n yield os.path.join(self.base_path_in, path_infix, filename).format(**wildcards)", "def include(self: _R, *fn_exrps: str) -> _R:\n include_exprs = []\n include_exprs.extend(self.include_exprs)\n for fn_exrp in fn_exrps:\n if \"*\" not in fn_exrp and \".\" not in fn_exrp:\n fn_exrp = f\"{fn_exrp}/*\"\n include_exprs.append(fn_exrp)\n return self._copy(include_exprs=include_exprs, exclude_exprs=self.exclude_exprs)", "def parse_folder(self, path):\n\n for filename in os.listdir(path):\n self.parse_file(os.path.join(path, filename), filename)\n return self.country_dict, self.hre_dict, self.name_dict", "def get_all_bonn_budget_files(fPath, pattern='PlandatenErgebnisplan', end='csv'):\n rlt = []\n for f in os.listdir(fPath):\n if f.startswith(pattern) and f.endswith(end):\n rlt.append(os.path.abspath(os.path.join(fPath, f)))\n return rlt", "def load(path: str) -> Any:\n config = load_configs(path)\n config.reduce(config.MUTATIONS)\n config.reduce('_reduce')\n for reduces in config.output.get('_reduce') or []:\n for item in reduces or [None]:\n config.reduce(item)\n\n output = config.output\n for post_process in output.get('_post_process') or []:\n file_info = find(post_process)\n file_info.search(file_info.module)(output)\n return output", "def bus_routes():\n route_list = []\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"):\n print(file)\n reader = csv.reader(open(file))\n for line in reader:\n route=extract_bus_route(line[3]) #Journey ID field\n if route not in route_list and route!=\"\": #error handling for extract_bus_routes function\n route_list.append(route)\n return route_list", "def iterate_definitions(self, dir_path):\n for filename in glob.iglob(dir_path, recursive=True):\n if os.path.isfile(filename):\n if filename.endswith('__init__.py') or not filename.endswith('.py'):\n continue\n self.account_definitions.append(filename)\n self.generate_sla_metrics()", "def included(path):\n if path.endswith(Env.IGNORED_TEST_DIRS):\n return False\n return path.endswith('.py') or os.path.isdir(path)", "def fromdirectory(directory):\n files = glob.glob(f\"{directory}/*.fits\")\n ret = LightCurve()\n ret.add_files(*(files)) \n return ret", "def concatenate_weather_files(dir_path):\n # import all the files as datasets\n fnames = get_weather_files(dir_path)\n ds_list = []\n for f in fnames:\n with xr.open_dataset(f, engine='netcdf4') as ds:\n ds_list.append(ds)\n ds_main = xr.concat(ds_list, dim='time')\n groups = ds_main.groupby('time')\n return groups", "def addAllergies(self):\n if int(self.pid)%100 < 85: # no allergies for ~ 85%\n exclusion = NO_ALLERGY.sub({\n 'exclusion':\"no known allergies\",\n 'exclusion_id':\"160244002\",\n }).done()\n self.data.append(SDMX.sub({'models':exclusion}, escape=False).done())\n else: # Sprinkle in some sulfa allergies\n al = DRUG_CLASS_ALLERGY.sub({\n 'reaction': \"skin rash\",\n 'reaction_id': \"271807003\",\n 'category': \"drug allergy\",\n 'category_id': \"416098002\",\n 'allergen': \"sulfonamide antibacterial\",\n 'allergen_id': \"N0000175503\",\n 'severity': \"mild\",\n 'severity_id': \"255604002\",\n }).done()\n self.data.append(SDMX.sub({'models':al}, escape=False).done())\n \n if int(self.pid)%2: # and throw in peanut allergies for every other patient\n al = FOOD_ALLERGY.sub({\n 'reaction': \"anaphylaxis\",\n 'reaction_id': \"39579001\",\n 'category': \"food allergy\",\n 'category_id': \"414285001\",\n 'allergen': \"peanut\",\n 'allergen_id': \"QE1QX6B99R\",\n 'severity': \"severe\",\n 'severity_id': \"24484000\",\n }).done()\n self.data.append(SDMX.sub({'models':al}, escape=False).done())", "def get_skins_and_extensions(base_dir):\n ext_paths = []\n for subdir in ['extensions', 'skins']:\n for name in os.listdir(os.path.join(base_dir, subdir)):\n if os.path.isdir(os.path.join(base_dir, subdir, name)):\n ext_paths.append(os.path.join(subdir, name))\n return ext_paths", "def example_bigwigs():\n hits = []\n d = data_dir()\n for fn in os.listdir(d):\n fn = os.path.join(d, fn)\n if os.path.splitext(fn)[-1] == '.bw':\n hits.append(os.path.abspath(fn))\n return hits", "def search_folder(self, folder: str, package: Optional[str]):\n\n if package is None and folder not in sys.path:\n sys.path.append(folder)\n\n for root, dirs, files in os.walk(folder):\n for filename in files:\n if filename.endswith(\".py\"):\n module = filename[:-3]\n sub_folder = root[len(folder) :]\n if sub_folder:\n sub_folder = sub_folder.strip(\"/\").replace(\"/\", \".\")\n\n if sub_folder:\n module = sub_folder + \".\" + module\n\n if package:\n module = package + \".\" + module\n\n imported = importlib.import_module(module)\n for _, cls_obj in inspect.getmembers(imported, inspect.isclass):\n if cls_obj.__name__ in self.class_cache:\n continue\n self.class_cache.add(cls_obj.__name__)\n\n if issubclass(cls_obj, Driver) and not inspect.isabstract(cls_obj):\n spec = inspect.getfullargspec(cls_obj.__init__)\n if len(spec.args) == 1:\n self.register(cls_obj)\n else:\n # Can't handle argument in constructor\n log.warning(f\"Invalid driver, __init__ with extra arguments: {module}\")", "def get_manifests(arcroot):\n manifests = []\n for root, dirs, files in os.walk(arcroot):\n if 'manifest.json' in files:\n manifests.append(os.path.join(root, 'manifest.json'))\n \n return manifests", "def load_from_folder(folder):\n refs = []\n for input_file in os.listdir(folder):\n if input_file.endswith(\".ref\"):\n refs.append(Reference(folder, input_file))\n return refs", "def all_possible_beards(paths):\n literal_paths = get_literal_beard_paths(paths)\n\n for path in literal_paths:\n for f in os.listdir(path):\n if is_module(os.path.join(path, f)):\n yield os.path.basename(f)", "def get_all_metrics(dir):\r\n file_lst = os.listdir(dir)\r\n file_lst = list(filter(lambda x: re.findall(r'\\.csv$',x), file_lst))\r\n return file_lst", "def get_backup(serverdir: Path) -> dict[str, List[Path]]:\n\n world = serverdir / 'world'\n sb = serverdir / 'spiffy_backup'\n\n def _read_specification() -> dict[str, List[Path]]:\n with sb.open() as file:\n entries = file.read().split()\n\n specs = {'include': [], 'exclude': []}\n\n for e in entries:\n if e.startswith('-'):\n e_path = serverdir / e[1:]\n if e_path.exists():\n specs['exclude'].append(e_path)\n else:\n if e.startswith('+'): # Seems logical one might do this, so why not?!\n e = e[1:]\n i_path = serverdir / e\n if i_path.exists():\n specs['include'].append(i_path)\n\n return specs\n\n match world.exists(), sb.exists():\n case True, True:\n specification = _read_specification()\n if world not in specification['include']:\n specification['include'].append(world)\n case True, False:\n specification = {'include': [world], 'exclude': []}\n case False, True:\n specification = _read_specification()\n case False, False:\n raise NothingToBackup(serverdir)\n\n return specification", "def glob1(self, dirname, pattern):\n names = self.listdir(dirname)\n if pattern[0] != '.':\n names = filter(lambda x: x[0] != '.',names)\n return fnmatch.filter(names, pattern)", "def include_regexes(self) -> Optional[List[str]]:\n return pulumi.get(self, \"include_regexes\")", "def load(self):\n\t\t# Initialize empty list\n\t\tdata_files = []\n\n\t\t# Append the Drusen files to the list\n\t\tfor single_file in os.listdir(self.data_dir):\n\t\t\tdata_files.append(single_file)\n\t\treturn data_files", "def load_all_dust(dust_out_dir):\n dust_file_names = os.listdir(dust_out_dir)\n\n dust_file_names = [f for f in dust_file_names if not f.startswith('.')]\n\n all_output = []\n\n for dust_file in sorted(dust_file_names):\n filepath = os.path.join(dust_out_dir, dust_file)\n all_output.append(load_dust_output(filepath))\n\n return all_output", "def include_filter(incl_filter, paths):\n hits = set()\n for p in paths:\n if re.search(incl_filter, p):\n hits.add(p)\n\n return hits", "def scan_folder(folder):\n LOGGER.debug(\"Scanning folder: %s\", folder)\n for file in os.listdir(folder):\n if file.endswith(\".csv\"):\n yield os.path.join(folder, file)", "def scanDirectories(directory, includes = [\"*\"], excludes = []):\n\treturn scanAll(directory, includes, excludes)[2]", "def get_exlusions(self):\n files = os.listdir(self.exclusions_path)\n for filename in files:\n image = cv2.imread(self.exclusions_path + filename)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21,21), 0)\n\n if filename.startswith('day'):\n self.gray_refs['day'].append(gray)\n elif filename.startswith('night'):\n self.gray_refs['night'].append(gray)", "def scan_plugin(self):\n pluginpath=_module_path()\n plugins=[]\n for f in os.listdir(pluginpath):\n if os.path.isfile(os.path.join(pluginpath,f)) and os.path.splitext(os.path.join(pluginpath,f))[-1]=='.py' :\n if 'plugin_' in os.path.basename(f):\n logger.debug(\"found plugin : %s\",f)\n plugins.append(f)\n return plugins", "def find(self, path, all=False):\n matches = []\n for app in self.apps:\n app_location = self.storages[app].location\n if app_location not in searched_locations:\n searched_locations.append(app_location)\n match = self.find_in_app(app, path)\n if match:\n if not all:\n return match\n matches.append(match)\n return matches", "def instances_in(directory):\n items = os.listdir(directory)\n instances = [e for e in items if e.endswith('.cnf') or e.endswith('.cnf.gz')]\n instances = ['{}/{}'.format(directory, e) for e in instances]\n return instances", "def scan_fixtures(path):\n results = list()\n for root, dirs, files in os.walk(path):\n relative_path = root.replace(path + \"/\", \"\")\n if relative_path.startswith(\"static\") or relative_path.startswith(\"theme\"):\n continue\n\n for f in files:\n if not f.endswith(\".json\"):\n continue\n\n app_name = os.path.basename(os.path.dirname(relative_path))\n\n results.append((app_name, f, relative_path))\n\n return results", "def find_modules(x):\n return Path(x).rglob('*.py')", "def get_include(path):\n if path not in include_cache:\n full_path = os.path.join(\"templates\", \"includes\", path)\n\n with open(full_path, \"r\") as include_file:\n include_file_data = include_file.read()\n\n include_cache[path] = include_file_data\n\n return include_cache[path]", "def get_gswe_paths(dirpath, extension = '.tif'):\n\n\textension = '*' + extension\n\tpath = os.path.join(dirpath, extension)\n\tfile_path_list = glob.glob(path)\n\tfile_path_list.sort()\n\treturn file_path_list", "def get_bibfiles(folder: str) -> t.List[str]:\n full_pathname = os.path.normpath(os.path.abspath(folder))\n bib_files = []\n for f in os.listdir(full_pathname):\n fullname = os.path.join(full_pathname, f)\n if f.endswith(\".bib\") and os.path.isfile(fullname):\n logging.debug(f'get bibfile \"{f}\" from directory \"{full_pathname}\"')\n bib_files.append(fullname)\n return bib_files", "def recurse(path):\n for dirpath, dirnames, filenames in os.walk(path):\n for filename in filenames:\n if filename.endswith('.robot'):\n filepath = os.path.join(dirpath, filename)\n reformat(filepath)", "def get_maps(path: str, ext: str) -> list:\n result = [\n y for x in os.walk(path) for y in glob(os.path.join(x[0], \"*.{}\".format(ext)))\n ]\n\n return result", "def get_batches(self, check_for_imported=True):\n\n #return [elem for elem in listdir(IN_PATH) if (isdir(\"%s%s\" % (IN_PATH, elem))) and (elem != \"KEEP\")]\n #return [{'name': (elem for elem in listdir(IN_PATH) if elem != \"KEEP\"), 'imported': False}]\n return [{'name': elem, 'imported': self.is_batch_already_imported(elem) if check_for_imported else None} for elem in listdir(IN_PATH) if not elem in INODES_IGNORE]", "def Whitelisted(path):\n return os.path.basename(path) == 'OWNERS'", "def getImmediateSubdirectories(dir):", "def expand_path(__file__, path_with_globs):\n return glob.glob(relative_path(__file__, path_with_globs))", "def summarize(path: str) -> dict:\n results = parse_bactopia_directory(path)", "def readFullRelaxFiles(folder_path):\n\n run_arr = []\n Nrun_arr = []\n dod_arr = []\n crate_arr = []\n count=0\n\n # find number of files that starts with run\n # (this is the data file we want to read)\n for file in os.listdir(folder_path):\n if file.startswith(\"relaxrun\"):\n count+=1\n\n # order the data files by run number, so we get descending crates\n Nrun=1\n for i in range(count+5):\n for file in os.listdir(folder_path):\n if file.startswith(\"relaxrun_\"+str(Nrun)+\"-\"):\n run_arr.append(file)\n dod = re.search('dod=(.*).txt', file).group(1)\n crate = re.search('Crate=(.*)_',file).group(1)\n Nrun_arr.append(np.round(int(Nrun),decimals=0))\n dod_arr.append(float(dod))\n crate_arr.append(float(crate))\n Nrun+=1\n print(len(run_arr))\n\n return run_arr, Nrun_arr, dod_arr, crate_arr", "def find_regulation_files(self, stub_base, regulation):\n\n # regulations-parser outputs JSON files in the following directory\n # structure:\n # regulation/\n # [regulation part number]/\n # [notice number]\n # ...\n # notice/\n # [notice number]\n # ...\n # layer/\n # [layer name]/\n # [regulation part number]/\n # [notice number]\n # ...\n # diff/\n # [regulation part number]/\n # [notice number]/\n # [notice number]\n # ...\n #\n\n regulation_files = []\n notice_names = None\n\n # Get the regulation/ JSON and notice numbers\n logger.info(\"getting files for regulation {}...\".format(regulation))\n regulation_base = os.path.join(stub_base, 'regulation', regulation)\n if not os.path.isdir(regulation_base):\n logger.error(\"Can't find regulation JSON for {} at {}\".format(regulation, regulation_base))\n return []\n for dirname, subdirs, files in os.walk(regulation_base):\n notice_names = files\n regulation_files.extend([os.path.join(dirname, f) for f in files])\n\n # Get notice JSON\n logger.info(\"getting notice files for regulation {}...\".format(regulation))\n for dirname, subdirs, files in os.walk(os.path.join(stub_base, 'notice')):\n # Notices did not used to be stored in a regulation-part-number\n # subdirectory. Use notice_names, from above, to just grab the\n # ones we want.\n notice_files = [os.path.join(dirname, f) for f in files if f in notice_names]\n regulation_files.extend(notice_files)\n\n # Check to see if we have newer-generated notices that *are*\n # in a regulation-part-number subdirectory.\n if dirname.endswith(regulation):\n notice_files = [os.path.join(dirname, f) for f in files if f in notice_names]\n regulation_files.extend(notice_files)\n \n\n # Get layer JSON\n logger.info(\"getting layer files for regulation {}...\".format(regulation))\n for dirname, subdirs, files in os.walk(os.path.join(stub_base, 'layer')):\n # For layers, dig into each subdirectory of the layer path until\n # we find one with our regulation part number.\n if dirname.endswith(regulation):\n layer_files = [os.path.join(dirname, f) for f in files if f in notice_names]\n regulation_files.extend(layer_files)\n\n # Get diff JSON\n logger.info(\"getting diff files for regulation {}...\".format(regulation))\n for dirname, subdirs, files in os.walk(os.path.join(stub_base, 'diff', regulation)):\n # For diffs, each regulation directory has a notice directory\n # with json files corrosponding to each other notice.\n diff_files = [os.path.join(dirname, f) for f in files]\n regulation_files.extend(diff_files)\n\n return regulation_files", "def from_path(path: str):\n if not os.path.exists(path):\n raise IOError(ctext(f\"'{path}' does not exist!\", error))\n\n load_file = None\n\n for r, d, f, in os.walk(path):\n for file in f:\n if '.smurfs' in file:\n load_file = os.path.join(r, file)\n\n if load_file is None:\n raise IOError(ctext(f\"Can't find any .smurfs file in {path}!\", error))\n\n return pickle.load(open(load_file, 'rb'))", "def walk_dir(path):\r\n\tassets = []\r\n\r\n\tfor file in os.listdir(path):\r\n\t\tif os.path.isdir(path + \"/\" + file):\r\n\t\t\tif not file.startswith(\".\"):\r\n\t\t\t\t# Ignore . dirs (e.g .svn)\r\n\t\t\t\tassets.extend(walk_dir(path + \"/\" + file))\r\n\t\telif file.endswith('.blend'):\r\n\t\t\tassets.append(path + \"/\" + file)\r\n\r\n\treturn assets", "def get_scraper_list(scraper_dir):\n scraper_files = [name for name in os.listdir(scraper_dir) if name.endswith('.py')]\n scrapers = []\n for filename in scraper_files:\n scr = filename.replace('.py', '')\n scr_inst = SourceFileLoader(fullname=scr, path='{0}/{1}'.format(scraper_dir, filename)).load_module()\n if hasattr(scr_inst, \"get_deals\"):\n scrapers.append(scr_inst)\n\n return scrapers", "def scandir(path_):\n return os.listdir", "def import_registered_jammers(filename=\"jammerskillz.csv\"):\n\tfrom utils import gf_fieldnames\n\treturn import_jammers(filename, fieldnames=gf_fieldnames())", "def get_files_by_folder(path):\n\n f = []\n for (dirpath, dirnames, filenames) in walk(path):\n f.extend(filenames)\n break\n return f", "def find_extra_include(file_name):\r\n extra_includes = []\r\n with open(file_name) as f:\r\n for m in re.finditer(regex.extra_include, f.read()):\r\n extra_includes.append(m.groups(1))\r\n return extra_includes", "def get_templates(template_folder, search_term=''):\n return [template for template in os.listdir(template_folder)\n if search_term in template]", "def scan(self,path):\n if os.path.exists(path):\n self.path=path\n logger.debug('builder.scan: %s' %self.path)\n self.pages=scan_path(path,self.ext_lst)\n else:\n logger.error('builder.scan: path does not exist : %s' %path)\n return self.pages", "def identificator():\n register = []\n for file in os.listdir(\".\"):\n if file.endswith(\".svg\"):\n register.append(file)\n\n register.sort()\n return register", "def get_l1_seeds(process,path):\n for modname in path.moduleNames():\n mod = getattr(process,modname)\n if mod.type_() == \"HLTL1TSeed\" and mod.L1GlobalInputTag.value()!=\"hltGtStage2ObjectMap\":\n return parse_l1_logical_express(mod.L1SeedsLogicalExpression.value())", "def get_samples_file(foldername, filter=None):\n samples = []\n for file in os.listdir(foldername):\n if filter and file.find(filter) == -1:\n continue\n for sample in sfile(foldername + '/' + file, None).get_samples():\n samples.append(sample)\n return samples", "def get_included_files(space):\n files = space.ec.interpreter.included_files\n arr_list = []\n for f in files:\n arr_list.append(space.newstr(f))\n return space.new_array_from_list(arr_list)", "def get_known_alleles(allele_dir):\n known_alleles = {}\n\n alleles = [f for f in os.listdir(allele_dir) if '.f' in f]\n\n for allele in alleles:\n \n name = mistutils.basename(allele)\n\n path = os.path.join(allele_dir, allele)\n\n known = init_sets(path)\n known_alleles[name] = known\n\n return known_alleles", "def listFeatures() :\n global features\n features = [feature.split(\".\")[0] for feature in os.listdir(os.path.abspath(__file__)[:-11])\n if feature.endswith(\".py\") and feature != \"__init__.py\"]", "def parse_folder(self, path):\n\n for filename in os.listdir(path):\n self.parse_file(os.path.join(path, filename), filename)\n return self.relations", "def get_bands(self, roi_dir, **kwargs):\n dset = self.dataset\n dset.load()\n band_kwargs = dict(emin=dset.emin, emax=dset.emax, minROI=dset.minROI, maxROI=dset.maxROI)\n band_kwargs.update(kwargs)\n radius = band_kwargs['minROI'] # fixed radius now\n bandlist = []\n for band in dset.dmap:\n emin,emax, event_type = band.emin(), band.emax(), band.event_class()&5\n if (emin + 1) < band_kwargs['emin'] or (emax - 1) >band_kwargs['emax']: continue\n #print (int(emin), event_class)\n energy= np.sqrt(emin*emax)\n bandlist.append( bands.BandSet(band, self.psfman(event_type,energy), self.exposureman(event_type,energy), \n roi_dir, radius))\n return np.asarray(bandlist)", "def get_files(self, include=[], exclude=[]):\r\n for (basepath, dpaths, fpaths) in os.walk(self.path, topdown=True):\r\n for subpath in dpaths + fpaths:\r\n path = os.path.join(self.chroot_path(basepath), subpath)\r\n if filter_path(path, include, exclude):\r\n yield path", "def filter(self, f, include_directories=False):\n return self._filter(f=f, include_directories=include_directories)", "def ReadRecipesFromDirectory(self, path: str) -> None:\n for file_path in glob.glob(os.path.join(path, '*.json')):\n self.ReadRecipeFromFile(file_path)", "def get_processed_libraries(self, project=None, sub_path=\"inputFastqs\"):\n flowcell_path = self.get_flowcell_path()\n projects = self.get_processed_projects()\n if project is not None:\n logger.debug(\"subsetting projects\")\n projects = [p for p in projects\n if re.search(project, p)]\n logger.debug(\"collecting list of libraries\")\n logger.debug(\"searching in projects {}\".format(projects))\n return [l for p in projects\n for l in os.listdir(os.path.join(flowcell_path, p, sub_path))\n if len(parsing.get_library_id(l))]", "def scanAll(directory, includes = [\"*\"], excludes = []):\n\tfrom os import walk\n\tfrom os.path import join\n\t\n\texcludes = normalizeList(excludes)\n\tincludes = normalizeList(includes)\n\tdirectory = adaptPath(directory)\n\t\n\tdef add(filename, includes, excludes, lst):\n\t\tfrom fnmatch import fnmatchcase\n\t\tfrom os.path import split\n\t\texcluded = None\n\t\tincluded = None\n\t\tadding = False\n\t\t\n\t\tfor exclude in excludes:\n\t\t\tif fnmatchcase (filename, exclude):\n\t\t\t\texcluded = (filename, exclude)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tif fnmatchcase (split(filename)[1], exclude):\n\t\t\t\t\texcluded = (split(filename)[1], exclude)\n\t\t\t\t\tbreak\n\t\t\n\t\tfor include in includes:\n\t\t\tif fnmatchcase (filename, include):\n\t\t\t\tincluded = (filename, include)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tif fnmatchcase (split(filename)[1], include):\n\t\t\t\t\tincluded = (split(filename)[1], include)\n\t\t\t\t\tbreak\n\t\t\n\t\tif excluded == None and included != None:\n\t\t\tadding = True\n\t\telif excluded != None and included == None:\n\t\t\tpass\n\t\telif excluded != None and included != None:\n\t\t\ta = fnmatchcase(excluded[1],included[1])\n\t\t\tb = fnmatchcase(included[1],excluded[1])\n\t\t\t\n\t\t\tif a and not b: pass\n\t\t\telif not a and b: adding = True\n\t\t\telif a and b: adding = True\n\t\tif adding:\n\t\t\tlst.append(filename)\n\t\n\tif isString(includes) : includes = [includes]\n\telif type(includes) == type(None): includes = []\n\t\t\n\tif isString(excludes) : excludes = [excludes]\n\telif type(excludes) == type(None): excludes = []\n\t\n\tfiles = []\n\tdirectories = []\n\tfor i in walk(directory):\n\t\tdirpath, dirnames, filenames = i\n\t\tfor dirname in dirnames:\n\t\t\tadd(join(dirpath, dirname), includes, excludes, directories)\n\t\t\t\n\t\tfor filename in filenames:\n\t\t\tadd(join(dirpath, filename), includes, excludes, files)\n\tall_ = directories + files\n\treturn all_, files, directories" ]
[ "0.5379837", "0.5044664", "0.49726188", "0.49551898", "0.4940935", "0.4874986", "0.48747087", "0.47927487", "0.47669888", "0.47309345", "0.46937668", "0.46731636", "0.4670935", "0.46418554", "0.46351552", "0.45951054", "0.45918998", "0.45842183", "0.4548384", "0.45425668", "0.45298713", "0.45234245", "0.45231512", "0.45099726", "0.44971707", "0.44933578", "0.44915095", "0.44828245", "0.44767502", "0.44640693", "0.44566014", "0.44534606", "0.44493192", "0.444686", "0.4444244", "0.44375375", "0.44252074", "0.44094193", "0.44071752", "0.438914", "0.43729833", "0.4361225", "0.43561336", "0.43359935", "0.43324634", "0.43189254", "0.43145153", "0.4313272", "0.4302368", "0.4301753", "0.4301376", "0.4298052", "0.42875397", "0.42862582", "0.42844236", "0.42795283", "0.42778787", "0.427516", "0.42699352", "0.42689753", "0.42656416", "0.42612752", "0.426054", "0.42572346", "0.42526674", "0.4244494", "0.42370886", "0.42365086", "0.42364636", "0.42340675", "0.42331538", "0.42281207", "0.42280197", "0.4227021", "0.4223291", "0.4217878", "0.42108545", "0.42061898", "0.4202922", "0.42023352", "0.42014492", "0.41995353", "0.41989598", "0.41948494", "0.41933218", "0.4187621", "0.41823077", "0.4180305", "0.41796732", "0.4179466", "0.4172523", "0.41700742", "0.4166592", "0.41634452", "0.41633362", "0.4160602", "0.41594458", "0.4158122", "0.41574392", "0.4156768" ]
0.5162948
1
simulation of a single game
def mc_trial(board, player): if len(board.get_empty_squares()) > 0: gra_w_toku = True else: gra_w_toku = False while gra_w_toku: tupka = random.choice(board.get_empty_squares()) board.move(tupka[0], tupka[1], player) status = board.check_win() if status == player or status == provided.DRAW: gra_w_toku = not gra_w_toku player = provided.switch_player(player) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_simulation(self, state):\n \"*** YOUR CODE HERE ***\"\n player = 0\n visited_states = [(player, state)]\n depth_limited = self.depth != -1\n depth = self.depth\n expand = True\n while not visited_states[-1][1].isWin() and not visited_states[-1][1].isLose():\n if depth_limited and depth == 0: break\n state = self.UCB1(state, player) # Selection & Simulation\n if expand and state not in self.plays: # Expansion\n expand = False\n self.plays[state] = 0\n self.wins[state] = 0\n visited_states.append((player, state))\n player = (player + 1) % state.getNumAgents()\n if not expand and depth_limited and player == 0: depth -= 1\n \n for player, state in visited_states:\n if state in self.plays: # Not simulated nodes\n self.plays[state] += 1\n eval = self.evaluationFunction(visited_states[-1][1])\n if depth_limited:\n if player == 0: self.wins[state] += eval\n if player != 0: self.wins[state] -= eval\n else:\n if player == 0: self.wins[state] += eval\n if player != 0: self.wins[state] += (1 - eval)", "def run_simulation(self, num_games=10):\n for _ in range(num_games):\n self.result.append(self.single_game())", "def test_single_game_works(self):\n sim = ss.Simulation(seed=154)\n game1 = sim.single_game()\n sim = ss.Simulation(seed=79)\n game2 = sim.single_game()\n assert game1 != game2, 'Your method single_game is not working.'", "def simulate(self):\r\n\t\tprint(\"##################################\")\r\n\t\tprint(\"SIMULATING GAME - SpaceInvaders..\")\r\n\t\tprint(\"##################################\")\r\n\t\t\r\n\t\t# Play 3 episodes:\r\n\t\tfor i in range(3):\r\n\t\t\tprint(\"Playing Episode %d\" % i)\r\n\t\t\tstate = self.env.reset()\r\n\t\t\t#self.env.render()\r\n\t\t\tdone = False\r\n\t\t\ttot_reward = 0\r\n\t\t\tstate,_ = stack_frames(self.stack_size,self.stacked_frames, \r\n\t\t\t\t\t\t\t\t\t\tstate, True)\r\n\t\t\t# play until dead.\t\t\t\r\n\t\t\twhile not done:\r\n\t\t\t\t# get the value predicted by the model and perform that action.\r\n\t\t\t\t# keras conv2d expects a 4D input. So add an empty axis. \r\n\t\t\t\tstate = np.expand_dims(state, axis=0)\r\n\t\t\t\t# predict action directly from the saved neural network.\r\n\t\t\t\taction = np.argmax(self.dqn.getModel().predict(state)[0])\r\n\t\t\t\t# perform that action.\r\n\t\t\t\tstate, reward, done, _ = self.env.step(action)\r\n\t\t\t\tself.env.render()\r\n\t\t\t\tstate,_ = stack_frames(self.stack_size,self.stacked_frames, \r\n\t\t\t\t\t\t\t\t\t\tstate, False)\r\n\t\t\t\ttot_reward+=reward\r\n\t\t\tprint(\"Reward: \", tot_reward)\r\n\t\tself.env.close() # to avoid sys.meta_path error\r", "def play_game():\n pass", "def run():\r\n \r\n match = a4_acc.Game() # Instantiate a Game object \r\n setup(match)\r\n\r\n if constants.SHOW_GRAPHICS:\r\n axes= startGraphics(match.board) #step 0\r\n \r\n \r\n for k in range(constants.STEPS):\r\n update(match)\r\n updateGraphics(board, k, caxes)\r\n \r\n ########\r\n # TO DO: \r\n # Simulate game given the intial state for constants.STEPS iterations\r\n \r\n # Example code to call the updateGraphics function; the second argument\r\n # needs to be replaced:\r\n # if constants.SHOW_GRAPHICS:\r\n # updateGraphics(match.board, None, axes) \r\n \r\n # Do not change or add code below here for function run\r\n endNow= raw_input('Press ENTER to continue.')", "def make_simulation(self):\n pass", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass", "def play_game(self):\n TF = self.TF\n # keep updating\n actions = collections.defaultdict(dict)\n for i in range(10):\n for j in range(self.N):\n actions[i][j] = 0\n\n sums = []\n for time in range(self.MAX):\n print(\"begin time epoch: \" + str(time))\n train_state_pool = collections.defaultdict(dict)\n flow_num = 0\n sum_all = 0\n for i in TF.keys():\n for j in TF[i].keys():\n for agent in self.Ns:\n actions[flow_num][agent.id] = random.randint(0, agent.n_actions - 1)\n\n # update states to ss_\n sum_all = self.update_state(flow_num, actions)\n\n flow_num += 1\n\n sums.append(sum_all)\n print('cut-random: ' + str(sum_all))\n if time % 10000 == 0 and time != 0:\n str1 = 'cut-mini-random' + str(time) + '.txt'\n file = open(str1, 'w')\n file.write(str(sums))\n file.close()", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def run(self, GameState):\n pass", "def main():\n game = TinkerGame()\n game.setup()\n while game.calculate_points() > 0 and not game.game_over:\n game.play()\n game.end()", "def simulate(self):\n if not self.setup:\n self.snakeSetup(False)\n\n s = self.s\n clock = self.clock\n global score\n\n if not gameOver:\n #pygame.time.delay(50)\n #clock.tick(10)\n self.checkMove(s);\n s.simulate();\n self.score = score\n\n else:\n self.gameOver = True;\n self.score = score;\n\n return s.raycast()", "def GAME_LOOP():\n pass", "def simulate(self):\r\n while self.t < self.T:\r\n plays = [(int)(player.play()) for player in self.players] # plays of all players\r\n obs, rews = self.simulate_single_step(plays) # observations of all players\r\n for i in range(self.M):\r\n self.players[i].update(plays[i], obs[i]) # update strategies of all player\r\n if self.players[i].phase == self.players[i].COMMUNICATION: # If communication starts\r\n self.communication_flag = True\r\n reward_one_round = self.reward_function(rews)\r\n self.rewards_record.append(reward_one_round) # list of rewards\r\n self.t += 1\r\n if self.communication_flag:\r\n self.communication()\r\n self.communication_flag = False", "async def game(self):\n pass", "def Gameloop():", "def run_game_logic(self):\n pass", "def all():\n lab = test_loading()\n\n for _ in range(1):\n print('🦅🐀🐙')\n\n test_spawn(lab)\n\n pc = test_spawn_player(lab)\n\n while True:\n pc.store_move(PlayerMove(random.choice(['walk left', 'walk up', 'walk down', 'walk right'])))\n test_turn_ai_and_players(lab)\n if input() == '0':\n break", "def test_single_game_seed_works(self):\n sim = ss.Simulation(seed=23)\n game1 = sim.single_game()\n sim = ss.Simulation(seed=23)\n game2 = sim.single_game()\n assert game1 == game2, 'Your seed in Simulation class is not working.'", "def play_simulation(self):\n self._play(self._move_comp_person)", "def game_play(self):", "def play_game():\n pass", "def simulate(player,environment,n_trials=1000,verbose=False):\n environment.player = player\n rewards = []\n \n for i in range(1,n_trials+1):\n \n if i % (n_trials/5) == 0:\n if verbose:\n print (\"Loading game {}\".format(i))\n try:\n result = environment.play_game()\n rewards.append(result)\n except Exception:\n tb.print_exc(file=sys.stdout)\n \n return rewards", "def setup_game(self):", "def simulate(self):\n while self.character_1.is_alive and self.character_2.is_alive:\n # flip a coin (0,1), if 1 player 1 attacks\n if random.randint(0, 1):\n self.turn(self.character_1, self.character_2)\n else:\n self.turn(self.character_2, self.character_1)\n\n print('_____-----<< -*- >>-----_____')\n time.sleep(.5)\n # if a character dies print final stats of winner\n if self.character_1.is_alive:\n print(f'{self.character_1.name} has won!! o.o7\\nfinal stats:')\n print(self.character_1)\n else:\n print(f'{self.character_2.name} has won!! o.o7\\nfinal stats:')\n print(self.character_2)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline= True ) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def play_one_round(self):\r\n new_solutions = self.breeder.breed(self.solutions)\r\n self.solutions.clear()\r\n self.solutions.extend(new_solutions)\r\n self.mutation_maker.mutate(self.solutions)\r\n self.round += 1\r\n self.simulation_stats.add_stats(self.round, self.solutions)\r\n if self.simulation_viz is SimulationViz.FRONT:\r\n self.report_progress()", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def main():\n g = Game(800, 600)\n g.start()", "def play(self):\n utilities = {\n player: []\n for player\n in self.players\n }\n start_time = time.time()\n prev_print = 0\n for j in range(self.n_games):\n random.shuffle(self.players)\n initial_state = self.Game(\n self.players\n )\n contr = ahorn.Controller(\n initial_state\n )\n final_state = contr.play()\n for player in self.players:\n utilities[player].append(final_state.get_utility(player))\n\n elapsed = time.time()-start_time\n elapsed_since_print = time.time()-prev_print\n if self.verbose and ((elapsed_since_print > self.verbose_seconds) or j == self.n_games-1):\n prev_print = time.time()\n print(\"{}\".format(str(self.Game)))\n print(\n \"Game {} out of {} in {:2.1f}s ({:2.1f}s per game)\".format(\n j+1,\n self.n_games,\n elapsed,\n elapsed/(j+1)\n )\n )\n\n print(\"=\"*25)\n for player in sorted(self.players):\n low, mid, high = Arena.bootstrap(\n utilities[player],\n func=statistics.mean,\n confidence=self.confidence\n )\n print(\"{}\\t|\\t{:2.3f}/{:2.3f}/{:2.3f}\".format(\n str(player),\n low,\n mid,\n high\n ))\n print(\"\")\n result = {\n player: Arena.bootstrap(\n utility,\n func=statistics.mean,\n confidence=self.confidence\n )[1]\n for player, utility\n in utilities.items()\n }\n return result", "def _run_one_game(self):\n sum_reward = 0\n done = False\n state = torch.tensor(self.env.reset(), device=device).view(1, -1)\n losses = list()\n\n while not done:\n\n # Choose action in function of observation and play it\n action = self._select_action(state)\n next_state, reward, done, _ = self.env.step(action.item())\n\n sum_reward += reward\n next_state = torch.tensor(next_state, device=device).view(1, -1)\n reward = torch.tensor([reward], device=device)\n done = torch.tensor([done], device=device)\n \n # Add transition to memory\n self._add_to_memory(state, action, next_state, reward, done)\n\n # Compute loss\n loss = self._optimize_model()\n losses += [loss]\n \n # Prepare next state\n state = next_state\n\n # Wait time_to_sleep second so the user can view the state\n sleep(self.time_to_sleep)\n \n\n return sum_reward, mean(losses)", "def simulate(self):\n score = [0 for _ in range(N_PLAYERS)]\n self.sim.play_random_game()\n w = self.sim.winner\n if w in (0,1):\n score[w] += 1\n return np.array(score)", "def main():\n play_game(progression)", "def play_game():\n board = create_board()\n while True:\n for player in [1, 2]:\n random_place(board, player)\n result = evaluate(board)\n if result != 0:\n return result", "def main():\n even_game()", "def play(self): # TODO -- batches of games\n if self.terminal:\n raise Exception(\"This pit has already been played!\")\n\n # Let the models play a number of games\n for duel in range(self.num_duels):\n # Initialize a new game\n state = self.game_setup(self.args)\n current_player = 0\n\n # Store which model corresponds to which player\n # Let the models take turns in who is the starting player\n models = {duel % 2: (self.m1, self.mcst1),\n (duel + 1) % 2: (self.m2, self.mcst2)}\n\n # Play the game\n while not state.is_terminal():\n model, tree = models[current_player]\n # Perform a number of Monte Carlo searches\n for _ in range(self.num_sims):\n tree.search(state, model)\n # Determine an action by sampling from the policy as defined by the tree\n a, _ = tree.action(state, temperature=0)\n # Perform the move\n state.do_move(a)\n current_player = 1 - current_player\n # Add the game result to the win counter (taking player perspective into account)\n if duel % 2 == 0:\n self.wins += state.get_scores()\n else:\n self.wins += np.roll(state.get_scores(), 1)\n self.terminal = True\n\n return self.wins", "def play_game(self):\n # need everyone to pass to move to next phase?\n self.deal_cards()\n self.plant_food()", "def oneGame():\n playOneGame()", "def simulate_memories(simulation_length):\n \n \n pass", "def selfplay():\n agent2 = Agent(0.99, 0.1, 0.003, 42, train_games, 7, eps_dec)\n agent2.load_checkpoint()\n global win_cntr\n global done\n g = Game()\n turn = random.choice([PLAYER, AI])\n done = False\n transitions_agent = []\n transitions_agent2 = []\n while done == False:\n g.printBoard()\n if turn == PLAYER:\n # row = input('{}\\'s turn: '.format('Red'))\n # g.insert(int(row), turn)\n observation = []\n for sublist in g.board:\n for i in sublist:\n observation.append(i)\n observation = np.asarray(observation)\n action = agent2.choose_action(observation)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Red') % action)\n g.insert(action, PLAYER_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Red') % action)\n g.insert(action, PLAYER_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent2 += [(observation, action, observation_, done)]\n else:\n observation = []\n for sublist in g.board:\n for i in sublist:\n observation.append(i)\n observation = np.asarray(observation)\n action = agent.choose_action(observation)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent += [(observation, action, observation_, done)]\n turn = AI if turn == PLAYER else PLAYER\n if g.getWinner() == Tie:\n reward_agent = 0\n else:\n winner = AI if turn == PLAYER else PLAYER\n if winner == AI:\n win_cntr += 1\n if vertical_win:\n reward_agent = 5\n else:\n reward_agent = 20\n\n else:\n reward_agent = -20\n\n for i in range(len(transitions_agent)):\n agent.store_transition(transitions_agent[i][0], transitions_agent[i][1], reward_agent, transitions_agent[i][2],\n transitions_agent[i][3])\n agent.learn()\n return", "def run():\n game = Game()\n i = 0\n while True:\n print(i, \"\\n\\n\" + str(game))\n i += 1\n actions = game.possible_moves()\n if actions == []:\n return game.score()\n else:\n game_state = replace_none(np.array(game.state))\n action = h_min_max(game_state)[0]\n if action == UP:\n game.up()\n elif action == DOWN:\n game.down()\n elif action== LEFT:\n game.left()\n elif action== RIGHT:\n game.right()\n else:\n print(\"Didn't move\")\n return game", "def main(**kwargs):\n print('Start')\n agent = initAgent(**kwargs)\n kwargs['agent'] = agent\n result = []\n\n def mainsub(*args):\n game = Game(**kwargs)\n game.display(kwargs['noshow'])\n while True:\n # get_input = getch(\"Enter direction (w/a/s/d): \")\n get_input = game.action()\n if get_input in keypad:\n game.move(keypad.index(get_input))\n game.update()\n # elif get_input == \"q\":\n # break\n # else:\n # print(\"\\nInvalid choice.\")\n # continue\n if game.end:\n game.savegame()\n game.display(kwargs['noshow'])\n print(\"Result:\", game.nturn, game.score)\n break\n game.display(kwargs['noshow'])\n result.append((game.score, game.nturn))\n game.agent.replay()\n if kwargs['train']:\n game.agent.save()\n game.reset()\n if kwargs['train']:\n np.save('result.%s' % game.agent.algo, np.array(result))\n\n map(mainsub, range(kwargs['n']))\n print(\"Thanks for playing.\")", "def run(self):\n while True:\n if self.game_over: \n return \n\n self.handle_events() \n if self.paused:\n continue\n\n self.update_generation()\n self.draw_grid()\n\n self.cap_frame_rate()", "def run_game(self):\n n = 1\n while self._run:\n # lock Framerate\n self._clock.tick(self._fps)\n # Process Input\n self._map.drawmap(self._screen)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self._run = False\n if (event.type == pygame.KEYDOWN):\n print(\"KeyDown\")\n if (event.key == pygame.K_SPACE):\n print(\"KeySpace\")\n self.move_to_next_turn()\n\n \"\"\"\n All Test Code\n \"\"\"\n if (self._player_turn == 1 and n == 1):\n self._screen.blit(self._core_deck.draw_deck(), (105, 130))\n for player in self._player_list:\n self._screen.blit(self._core_deck.draw_deck(), (105, 130))\n for player in self._player_list:\n print(player._name)\n player.show_hand()\n print(\"Deck: \\n\")\n self._core_deck.print_deck()\n print(self._player_list[0]._name)\n print(\"Playing a card...\")\n self._core_deck.go_to_graveyard(self._player_list[0].play_card(1))\n print(\"The hand:\")\n self._player_list[0].show_hand()\n print(\"Main Deck:\")\n self._core_deck.print_deck()\n print(\"Graveyard:\")\n self._core_deck.show_graveyard()\n for player in self._player_list:\n print(player._name)\n player.print_units()\n n = 2\n pygame.display.update()", "def play(self):\n p1 = self.player()\n p2 = axelrod.Player()\n p1.reset()\n p1.strategy(p2)\n # Genome contains only valid responses.\n self.assertEqual(p1.genome.count(C) + p1.genome.count(D), len(p1.genome))", "def run_single(self):\n self.run_sim_time(1)", "def simulate(state: GameState) -> int:\n moves = list(state.moves)\n #print(\" moves available: \", moves)\n for i in range(len(state.moves)):\n move = random.choice(moves)\n #print(\" move making: \", move)\n move_idx = moves.index(move)\n #print(\" index of move: \", move_idx)\n moves.pop(move_idx)\n #print(\" new moves available: \", moves)\n state = state.traverse(move)\n #print(\" Winner: \", state.util)\n #print(\" New Board: \", state.display)\n return state.util", "def play_game():\n\tstate = Coinche(verbose=True)\n\tbeliefs = [Belief(i, state) for i in range(4)]\n\n\twhile state.get_moves():\n\t\tprint(state)\n\t\tm = ismcts(rootstate=state, itermax=2000, verbose=False, belief=beliefs[state.player_to_move])\n\t\tprint(\"Best Move: \" + str(m) + \"\\n\")\n\t\tstate.do_move(m)\n\n\tfor p in range(state.number_of_players):\n\t\tprint(\"Player \" + str(p), state.get_result(p))", "def run(self):\n \n t = 0\n while t < 10:\n self.reset()\n self.start_simulation()\n while not self.done:\n raw_input(\"Press Enter to continue...\")\n action = self.action_space.sample()\n print(action)\n state, reward, self.done, _ = self.step(action)\n print('Current state:\\n angles: {}'.format(state))\n print('Reward: {}'.format(reward))\n\n self.stop_simulation()\n t += 1", "def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def simulation(game_, num_simulations, setup_0=None, setup_1=None, show_game=False):\n blue_won = 0\n blue_wins_bc_flag = 0\n blue_wins_bc_noMovesLeft = 0\n red_won = 0\n red_wins_bc_flag = 0\n red_wins_bc_noMovesLeft = 0\n rounds_counter_per_game = []\n rounds_counter_win_agent_0 = []\n rounds_counter_win_agent_1 = []\n\n game_times_0 = []\n game_times_1 = []\n types = game_.types_available\n for simu in range(num_simulations): # simulate games\n # reset setup with new setup if none given\n if setup_0 is not None:\n setup_agent_0 = setup_0\n else:\n setup_agent_0 = draw_random_setup(types, 0, game_.game_dim)\n if setup_1 is not None:\n setup_agent_1 = setup_1\n else:\n setup_agent_1 = draw_random_setup(types, 1, game_.game_dim)\n game_.agents[0].setup = setup_agent_0\n game_.agents[1].setup = setup_agent_1\n game_.reset()\n\n agent_output_type_0 = str(game_.agents[0])\n agent_output_type_1 = str(game_.agents[1])\n agent_output_type_0 = re.search('agent.(.+?) object', agent_output_type_0).group(1)\n agent_output_type_1 = re.search('agent.(.+?) object', agent_output_type_1).group(1)\n\n game_time_s = timer()\n if (simu+1) % 1 == 0:\n print('{} won: {}, {} won: {}, Game {}/{}'.format(agent_output_type_0,\n red_won,\n agent_output_type_1,\n blue_won, simu,\n num_simulations))\n print('{} won by flag capture: {}, {} won by moves: {}, Game {}/{}'.format(agent_output_type_0,\n red_wins_bc_flag,\n agent_output_type_0,\n red_wins_bc_noMovesLeft,\n simu,\n num_simulations))\n print('{} won by flag capture: {}, {} won by moves: {}, Game {}/{}'.format(agent_output_type_1,\n blue_wins_bc_flag,\n agent_output_type_1,\n blue_wins_bc_noMovesLeft,\n simu,\n num_simulations))\n print(\"Game number: \" + str(simu + 1))\n for step in range(2000):\n if show_game:\n helpers.print_board(game_.board)\n game_reward = game_.run_step()\n if game_reward is not None:\n if game_reward[0] == 1: # count wins\n game_times_0.append(timer() - game_time_s)\n red_won += 1\n red_wins_bc_flag += 1\n rounds_counter_win_agent_0.append(game_.move_count)\n elif game_reward[0] == 2:\n game_times_0.append(timer() - game_time_s)\n red_won += 1\n red_wins_bc_noMovesLeft += 1\n rounds_counter_win_agent_0.append(game_.move_count)\n elif game_reward[0] == -1:\n game_times_1.append(timer() - game_time_s)\n blue_won += 1\n blue_wins_bc_flag += 1\n rounds_counter_win_agent_1.append(game_.move_count)\n else:\n game_times_1.append(timer() - game_time_s)\n blue_won += 1\n blue_wins_bc_noMovesLeft += 1\n rounds_counter_win_agent_1.append(game_.move_count)\n rounds_counter_per_game.append(game_.move_count)\n break\n if show_game:\n helpers.print_board(game_.board)\n file = open(\"{}_vs_{}_with_{}_sims.txt\".format(agent_output_type_0, agent_output_type_1, num_simulations), \"w\")\n file.write(\"Statistics of {} vs. {} with {} games played.\\n\".format(agent_output_type_0, agent_output_type_1, num_simulations))\n file.write(\"Overall computational time of simulation: {} seconds.\\n\".format(sum(game_times_0) + sum(game_times_1)))\n\n file.write(\"\\nAgent {} won {}/{} games (~{}%).\\n\".format(agent_output_type_0, red_won, num_simulations, round(100*red_won/num_simulations, 2)))\n file.write(\"Reasons for winning: {} flag captures, {} wins through killing all enemies\\n\".format(red_wins_bc_flag, red_wins_bc_noMovesLeft))\n\n file.write(\"\\nAgent {} won {}/{} games (~{}%).\\n\".format(agent_output_type_1, blue_won, num_simulations, round(100*blue_won/num_simulations, 2)))\n file.write(\"Reasons for winning: {} flag captures, {} wins through killing all enemies\\n\".format(blue_wins_bc_flag, blue_wins_bc_noMovesLeft))\n\n file.write(\"\\nAverage game duration overall: {} rounds\\n\".format(round(sum(rounds_counter_per_game)/num_simulations), 2))\n file.write(\"Maximum number of rounds played: {} rounds\\n\".format(max(rounds_counter_per_game)))\n file.write(\"Minimum number of rounds played: {} rounds\\n\".format(min(rounds_counter_per_game)))\n\n file.write(\"\\nAverage game duration for {} wins: {} rounds\\n\".format(agent_output_type_0, round(sum(rounds_counter_win_agent_0)/len(rounds_counter_win_agent_0)), 2))\n file.write(\"Maximum number of rounds played: {} rounds\\n\".format(max(rounds_counter_win_agent_0)))\n file.write(\"Minimum number of rounds played: {} rounds\\n\".format(min(rounds_counter_win_agent_0)))\n\n file.write(\"\\nAverage game duration for {} wins: {} rounds\\n\".format(agent_output_type_1, round(sum(rounds_counter_win_agent_1)/len(rounds_counter_win_agent_1)), 2))\n file.write(\"Maximum number of rounds played: {} rounds\\n\".format(max(rounds_counter_win_agent_1)))\n file.write(\"Minimum number of rounds played: {} rounds\\n\".format(min(rounds_counter_win_agent_1)))\n\n file.write(\"\\nAverage computational time for {} wins: {} seconds\\n\".format(agent_output_type_1, sum(game_times_1)/len(game_times_1)))\n file.write(\"Maximum computational time: {} seconds\\n\".format(max(game_times_1)))\n file.write(\"Minimum computational time: {} seconds\\n\".format(min(game_times_1)))\n\n file.write(\"\\nAverage computational time for {} wins: {} seconds\\n\".format(agent_output_type_0, sum(game_times_0)/len(game_times_0)))\n file.write(\"Maximum computational time: {} seconds\\n\".format(max(game_times_0)))\n file.write(\"Minimum computational time: {} seconds\\n\".format(min(game_times_0)))\n file.close()\n return", "def game_loop(brains: Tuple[BrainType, BrainType]) -> \\\n Tuple[Dict[str, Any], Game]:\n def get_turn_brain(player: Player) -> BrainType:\n if player == S_DOG:\n return brains[0]\n return brains[1]\n\n def try_brain_move(game: Game, ev: EventType) -> bool:\n \"\"\"\n Picking the brain for current player, tries to update game with the received move\n Returns true if a valid move is performed\n \"\"\"\n # print(\"Try brain move\")\n\n brain = get_turn_brain(game.state.turn)\n ui_change, state_change = perform_brain_move(game, ev, brain)\n\n # print(f\"Tryingmove: UI - {ui_change} | State - {state_change}\")\n\n return ui_change or state_change\n\n game = Game(State(Table(None), S_DOG, []), [])\n\n game_start_time = time.time()\n last_move_time = time.time()\n changed_game = False\n last_turn = game.state.turn\n\n print(\" ------ Starea initiala\")\n game.console_draw()\n\n dog_data: List[float] = []\n rabbit_data: List[float] = []\n dog_nodes: List[int] = []\n rabbit_nodes: List[int] = []\n\n\n while not game.state.isFinal():\n changed_game = False\n for ev in pygame.event.get():\n game.draw(pygame.display.get_surface())\n # print(f\"Event type: {ev}\")\n if ev.type == pygame.QUIT:\n print(\" Intrerupt abrupt -------- \")\n show_stats({'winner': \"Intrerupt / Invalid\",\n 'dog_data': dog_data, \n 'rabbit_data': rabbit_data,\n 'dog_nodes': dog_nodes,\n 'rabbit_nodes': rabbit_nodes}, game)\n pygame.display.quit()\n pygame.quit()\n sys.exit()\n \n if ev.type == pygame.MOUSEMOTION:\n continue\n\n\n changed_game = try_brain_move(game, ev.type)\n \n if changed_game:\n break\n\n\n if not changed_game:\n changed_game = try_brain_move(game, pygame.K_DELETE)\n \n game.draw(pygame.display.get_surface())\n \n if changed_game: \n if game.state.turn == last_turn:\n continue\n # Calcule la schimbarea jucatorului\n last_turn = game.state.turn\n time_move = time.time() - last_move_time\n last_move_time = time.time()\n # Logging la shimbarea jucatorului\n print(f\"{name_player(other_player(last_turn))} time for move: {time_move:.2f} s\")\n if other_player(last_turn) == S_DOG:\n rabbit_data.append(time_move)\n if is_DOG_AI:\n dog_nodes.append(get_computed_nodes())\n print(f\"Numar stari calculate la mutare: {get_computed_nodes()}\")\n else:\n dog_data.append(time_move)\n if is_RAB_AI:\n rabbit_nodes.append(get_computed_nodes())\n print(f\"Numar stari calculate la mutare: {get_computed_nodes()}\")\n \n print(\" --------- \")\n game.console_draw()\n\n print(\" ========================== \")\n print(f\"Total game time: {time.time() - game_start_time:.2f} s\")\n print(f\"Numar mutari Dog: {len(dog_data)}\")\n print(f\"Numar mutari Rabbit: {len(rabbit_data)}\")\n\n print(\"\\n Finished game\")\n winner = S_DOG\n if game.state.rabbits_win():\n winner = S_RAB\n return {'winner': name_player(winner),\n 'dog_data': dog_data, \n 'rabbit_data': rabbit_data,\n 'dog_nodes': dog_nodes,\n 'rabbit_nodes': rabbit_nodes}, game", "def main():\n character1 = generate_random_character(\"Dr. Bones\", 100, 60, 15, 5)\n character2 = generate_random_character(\"Mr. Meeseeks\", 100, 60,\n 15, 5)\n battle = BattleSimulator(character1, character2)\n battle.simulate()", "def main():\n ans = random_word()\n run_game(ans, N_TURNS)", "def play_game() -> None:\n board = tuple(tuple(0 for _ in range(i, i + 16))\n for i in range(0, 64, 16))\n state = GameState(board, 1)\n while state.util is None:\n # human move\n print(state.display)\n state = state.traverse(int(input(\"Move: \")))\n if state.util is not None:\n break\n # computer move\n find_best_move(state)\n move = (state.selected if state.selected != -1\n else random.choice(state.moves))\n state = state.traverse(move)\n print(state.display)\n if state.util == 0:\n print(\"Tie Game\")\n else:\n print(f\"Player {state.util} Wins!\")", "def simulate(self):\n self.round += 1", "def test_complete_game1(self):\n m = Game()\n\n plays = [\n ['a:-', 'a:-', 'a:b', 'b:b', 'b:-', 'b:b'],\n ['c:3i', 'c:f', 'c:f', 'c:-', 'd:f', 'd:-', 'd:-'],\n ['a:3i', 'a:2f', 'a:f', 'a:-', 'b:-', 'b:-', 'b:-'],\n ['c:3i', 'c:2f', 'c:-', 'c:f', 'd:-', 'd:b', 'd:b'],\n ['a:5i2rp', 'a:4f', 'a:f', 'a:b-', 'b:-', 'b:b', 'b:k']\n ]\n m.run(plays)\n m.print_stats()\n\n # Did the right team win?\n assert m.teams['a'].win == True\n\n # Did we get 5 turns?\n assert m.turn_count == 5\n\n # There should have been 30 throws in the game\n assert m.get_throw_count() == 30", "def test_movement(self):\n running = True\n\n self.refresh_tile_maps()\n\n self.data = self.board.data\n self.board_objects = self.board.board_objects\n self.weapons = self.board.weapons\n self.rooms = self.board.rooms\n self.players = self.board.players\n self.player_cards = self.board.player_cards\n self.combined_tiles = self.board.combined_tiles\n self.weapon_tokens = self.board.weapon_tokens\n self.player_tokens = self.board.player_tokens\n self.dice = self.board.dice\n\n cont = True\n movements = {'W': [0, -1], 'S': [0, 1], 'A': [-1, 0], 'D': [1, 0]}\n misc_options_one = ['E', 'D']\n misc_options_two = ['E']\n correct = False\n key = ''\n\n # self.move_players_testing()\n # self.remove_players_testing()\n\n while cont:\n for player_char in self.players: \n player_token = self.player_tokens[player_char]\n player_object = self.players[player_char]\n\n out_count = 0\n for p in self.players:\n if self.players[p].out:\n out_count += 1\n\n if out_count == len(self.players):\n cards = self.board.solution.get_solution()\n print('You all lost!')\n print('The solution was:')\n for card in cards:\n sym, a_card = list(card.items())[0]\n print('%s : %s' % (sym, a_card.name))\n input('Press enter to quit')\n cont = False\n break\n\n if not player_object.out:\n if key == 'P':\n cont = False\n break\n \n roll_one, roll_two = self.dice.roll()\n steps = roll_one + roll_two\n player_not_stopped = True\n \n while player_not_stopped and steps > 0 and not correct:\n if key == 'P':\n cont = False\n player_not_stopped = False\n break\n\n\n key_incorrect = True\n while key_incorrect:\n key, option = self.menu_refresh(player_token, player_char, steps)\n\n if key == 'P':\n cont = False\n player_not_stopped = False\n key_incorrect = False\n break\n \n if key =='/':\n cards = self.board.solution.get_solution()\n for card in cards:\n sym, a_card = list(card.items())[0]\n print('%s : %s' % (sym, a_card.name))\n input()\n\n elif key == 'H':\n for s, pl in player_object.hand.deck.items():\n print('%s : %s' % (s, pl.name))\n input('Continue?')\n\n elif key == '!':\n player_not_stopped = False\n key_incorrect = False\n player_token.reset_has_entered()\n \n elif key == '£' or (key == '\"' and (option == 1 or option == 3)):\n player_not_stopped = False\n # options order: player_cards, rooms, weapons\n if key == '£':\n options = self.board.get_card_options(False)\n if key == '\"' and (option == 1 or option == 3):\n options = self.board.get_card_options(True)\n \n \"\"\" temp for debugging \"\"\"\n # player_token_list = list(self.player_tokens.items())\n\n # for i, p in enumerate(player_token_list):\n # if p[0] == player_char:\n # p_pos = i\n\n # print(player_token_list[p_pos - 1 % len(player_token_list)][1].player.hand.deck)\n \"\"\" \"\"\"\n\n \"\"\" Also temp for debugging \"\"\"\n # cards = self.board.solution.get_solution()\n # print('cards: ', cards)\n \"\"\" \"\"\"\n\n selection = []\n for option_type in options:\n for i, card_details in enumerate(option_type):\n print('%s : %s - %s' % (i, card_details[1], card_details[3]))\n \n cont_three = True\n while cont_three:\n inp = input('Select from above: ')\n\n try: \n inp = int(inp)\n is_int = True\n except ValueError:\n is_int = False\n\n if is_int and inp < len(option_type) and inp >= 0:\n selection.append(option_type[inp])\n cont_three = False\n else:\n print('incorrect number')\n \n if key == '\"' and (option == 1 or option == 3):\n # Room for suggest doesn't need to be selected, is derrived from current room of the current player\n steps = 0\n key_incorrect = False\n player_not_stopped = False\n player_token_list = list(self.player_tokens.items())\n player_token.reset_has_entered()\n\n for i, p in enumerate(player_token_list):\n if p[0] == player_char:\n p_pos = i\n \n left_player = player_token_list[p_pos - 1 % len(player_token_list)]\n result = player_object.suggest(selection[0], {selection[0][1]: self.player_cards[selection[0][1]]}, {player_token.current_room: self.rooms[player_token.current_room]}, selection[1], left_player, self.board)\n if result != False:\n input('Card: %s, Enter to continue.' % result.name)\n else:\n input('Incorrect, Enter to continue.')\n\n elif key == '£':\n correct = player_object.accuse(selection[0][2], selection[1][2], selection[2][2], self.board.solution)\n steps = 0\n if correct:\n print('Yay you won :3')\n cont = False\n\n key = 'P'\n else:\n # print(player_object.out)\n player_object.make_out()\n # print(player_object.out)\n # input()\n print('Oof, you are out')\n \n key_incorrect = False\n player_not_stopped = False\n input()\n else:\n print('wut')\n input()\n\n elif key in movements or key in misc_options_one or key in misc_options_two:\n if option == 0 and key in movements:\n cont_two = True\n switch = True\n while cont_two:\n if switch:\n switch = False\n else:\n key, temp_option = self.menu_refresh(player_token, player_char, steps)\n\n if key == 'P':\n cont_two = False\n cont = False\n steps -= 1\n key_incorrect = False\n elif key in movements:\n off_x, off_y = movements[key] \n cont_two, has_entered = player_token.move_by_direction(off_x, off_y)\n cont_two = not cont_two\n\n if has_entered:\n steps = 1\n else:\n if not cont_two:\n steps -= 1\n key_incorrect = False\n elif option in [1, 2] and key in misc_options_one:\n if key == 'D':\n player_token.reset_has_entered()\n player_token.enter_secret_door()\n steps = 1\n else:\n player_token.exit_door()\n player_token.reset_has_entered()\n steps -= 1\n \n key_incorrect = False\n elif option in [3, 4] and key in misc_options_two:\n player_token.exit_door()\n player_token.reset_has_entered()\n steps -= 1\n key_incorrect = False\n player_token.reset_has_entered()", "def run(self):\n numOfWins = 0;\n for i in range(self.numOfGames):\n gameState = GameState()\n agentIndex = 0 # 0 for First Player (AI), 1 for Second Player (Human)\n while True:\n if agentIndex == 0: \n timed_func = util.TimeoutFunction(self.AIPlayer.getAction, int(self.maxTimeOut))\n try:\n start_time = time.time()\n action = timed_func(gameState, self.gameRules)\n except util.TimeoutFunctionException:\n print(\"ERROR: Player %d timed out on a single move, Max %d Seconds!\" % (agentIndex, self.maxTimeOut))\n return False\n\n if not self.muteOutput:\n print(\"Player 1 (AI): %s\" % action)\n else:\n action = self.HumanAgent.getAction(gameState, self.gameRules)\n if not self.muteOutput:\n print(\"Player 2 (Human): %s\" % action)\n gameState = gameState.generateSuccessor(action)\n if self.gameRules.isGameOver(gameState.boards):\n break\n if not self.muteOutput:\n gameState.printBoards(self.gameRules)\n\n agentIndex = (agentIndex + 1) % 2\n if agentIndex == 0:\n pdb.set_trace()\n print(\"****player 2 wins game %d!!****\" % (i+1))\n else:\n numOfWins += 1\n print(\"****Player 1 wins game %d!!****\" % (i+1))\n\n print(\"\\n****Player 1 wins %d/%d games.**** \\n\" % (numOfWins, self.numOfGames))", "def test_7_replay_4(self):\n self._execute_replay_nr(4)\n\n self.grid.add_pawn(5, 'H')\n self.grid.add_pawn(3, 'B')\n self.grid.add_pawn(2, 'H')\n self.grid.add_pawn(1, 'B')\n self.grid.add_pawn(1, 'H')\n\n # self.grid.print_grid()\n # print(self.minmaxBot_7.choose_move(self.grid))", "def main():\n game_of_life(10, 20)", "def test_simulation(self):\n\t\tprint \"Simulation is being tested\"\n\n\t\tif toggles.DEBUG_FLAG:\n\t\t\tprint \"Debug Flag Set!\"\n\t\t\tprint self.getConfig()\n\n\t\tif toggles.PACKING:\n\t\t\ttoggles.OUTPUT_PATH = toggles.OUTPUT_PATH+toggles.RUN_NAME+'/'\n\t\t\tpackageMaker(toggles.OUTPUT_PATH,self.getConfig())\n\t\tif toggles.IDEAL_GRID:\n\t\t\tself.consensusGrid()\n\n\t\tif toggles.REAL_DATA:\n\t\t\tsampleData = self.load_data()\n\t\t\tif toggles.RUN_DATA_STATS:\n\t\t\t\tself.output_data_stats(sampleData)\n\t\t\t\tself.reset_database()\n\t\t\tif toggles.RUN_AVERAGE_COST:\n\t\t\t\tself.sim_average_cost(sampleData)\n\t\t\t\tself.reset_database()\n\t\t\tif toggles.RUN_SINGLE_PAIR:\n\t\t\t\tself.sim_single_pair_cost(sampleData, pending_eddy(self.pick_worker([0], [0])))\n\t\t\t\tself.reset_database()\n\t\telse:\n\t\t\tsampleData = {}\n\t\t\tsyn_load_data()\n\n\t\tif toggles.RUN_ITEM_ROUTING and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: item Routing\"\n\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\tself.reset_database()\n\n\t\tif PRED_SCORE_COUNT and not (RUN_TASKS_COUNT or RUN_MULTI_ROUTING):\n\t\t\tif DEBUG_FLAG:\n\t\t\t\tprint \"Running: Pred Score count\"\n\t\t\tself.run_sim(sampleData)\n\t\t\tself.reset_database()\n\n\n\n\t\tif toggles.COUNT_TICKETS and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: ticket counting\"\n\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\tself.reset_database()\n\n\t\tif toggles.SELECTIVITY_GRAPH and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: selectivity amounts over time\"\n\t\t\tself.run_sim(sampleData)\n\t\t\tself.reset_database()\n\n\t\t#____FOR LOOKING AT ACCURACY OF RUNS___#\n\t\tif toggles.TEST_ACCURACY and toggles.REAL_DATA:\n\t\t\tcorrectAnswers = self.get_correct_answers(toggles.INPUT_PATH + toggles.ITEM_TYPE + '_correct_answers.csv')\n\t\t\tpassedItems = self.get_passed_items(correctAnswers)\n\n\n\t\tif toggles.RUN_OPTIMAL_SIM:\n\t\t\tcountingArr=[]\n\t\t\tself.reset_database()\n\t\t\tfor i in range(toggles.NUM_SIM):\n\t\t\t\tprint \"running optimal_sim \" +str(i)\n\t\t\t\tself.num_tasks = self.optimal_sim(sampleData)\n\t\t\t\tcountingArr.append(self.num_tasks)\n\t\t\t\tself.reset_database()\n\t\t\tdest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_optimal_tasks'\n\t\t\tgeneric_csv_write(dest+'.csv',[countingArr])\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Wrote File: \" + dest+'.csv'\n\n\n\n\t\tif toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING or toggles.RUN_CONSENSUS_COUNT:\n\t\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\t\t#print \"Running: task_count\"\n\t\t\t\t#f = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.csv', 'a')\n\t\t\t\t#f1 = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_incorrect_count.csv', 'a')\n\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\toutputArray = []\n\n\t\t\trunTasksArray = []\n\t\t\tgoodArray, badArray = [], []\n\t\t\tgoodPoints, badPoints = [], []\n\t\t\taccCount = []\n\t\t\tlocArray = [[],[],[],[]]\n\n\t\t\tfor i in range(toggles.NUM_SIM):\n\t\t\t\tprint \"running simulation \" + str(i+1)\n\t\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\t\trunTasksArray.append(self.num_tasks)\n\n\t\t\t\t#____FOR LOOKING AT ACCURACY OF RUNS___#\n\t\t\t\tif toggles.TEST_ACCURACY and toggles.REAL_DATA:\n\t\t\t\t\tnum_incorrect = self.final_item_mismatch(passedItems)\n\t\t\t\t\taccCount.append(num_incorrect)\n\t\t\t\tif toggles.RUN_CONSENSUS_COUNT or toggles.VOTE_GRID:\n\t\t\t\t\tdonePairs = IP_Pair.objects.filter(Q(num_no__gt=0)|Q(num_yes__gt=0))\n\t\t\t\t\tif toggles.TEST_ACCURACY:\n\t\t\t\t\t\tgoodPairs, badPairs = [], []\n\t\t\t\t\t\tfor pair in donePairs:\n\t\t\t\t\t\t\tval = bool((pair.num_yes-pair.num_no)>0)\n\t\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\t\tcorrect = ((correctAnswers[(pair.item,pair.predicate)]) == val)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcorrect = (pair.true_answer == val)\n\t\t\t\t\t\t\tif correct:\n\t\t\t\t\t\t\t\tgoodArray.append(pair.num_no+pair.num_yes)\n\t\t\t\t\t\t\t\tgoodPoints.append((pair.num_no,pair.num_yes))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tbadArray.append(pair.num_no+pair.num_yes)\n\t\t\t\t\t\t\t\tbadPoints.append((pair.num_no,pair.num_yes))\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor pair in donePairs:\n\t\t\t\t\t\t\tgoodArray.append(pair.num_no + pair.num_yes)\n\t\t\t\t\t\t\tgoodPoints.append((pair.num_no,pair.num_yes))\n\n\t\t\t\t\t#print \"This is number of incorrect items: \", num_incorrect\n\n\t\t\t\tself.reset_database()\n\n\t\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\t\tgeneric_csv_write(toggles.OUTPUT_PATH+toggles.RUN_NAME+'_tasks_count.csv',[runTasksArray])\n\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.csv'\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\tif len(runTasksArray)>1:\n\t\t\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.png'\n\t\t\t\t\t\ttitle = toggles.RUN_NAME + ' Cost distribution'\n\t\t\t\t\t\thist_gen(runTasksArray, dest, labels = ('Cost','Frequency'), title = title)\n\t\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\tprint \"Wrote File: \" + dest\n\t\t\t\t\telif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"only ran one sim, not running hist_gen\"\n\n\t\t\tif toggles.RUN_MULTI_ROUTING:\n\t\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME + '_Eddy_sys_' + str(toggles.EDDY_SYS) + '_multi_routing.png'\n\t\t\t\t\ttitle = toggles.RUN_NAME + ' Average Predicate Routing'\n\t\t\t\t\tquestions = toggles.CHOSEN_PREDS\n\t\t\t\t\tarrayData = []\n\t\t\t\t\tfor i in range(len(questions)):\n\t\t\t\t\t\tarrayData.append([])\n\t\t\t\t\tfor routingL in ROUTING_ARRAY:\n\t\t\t\t\t\tfor i in range(len(questions)):\n\t\t\t\t\t\t\tarrayData[i].append(routingL[i])\n\t\t\t\t\tmrsavefile = open(toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.csv','w')\n\t\t\t\t\tmrwriter = csv.writer(mrsavefile)\n\t\t\t\t\tmrwriter.writerow(questions)\n\t\t\t\t\tfor row in arrayData:\n\t\t\t\t\t\tmrwriter.writerow(row)\n\t\t\t\t\tmrsavefile.close()\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Wrote File: \"+toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.csv'\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\tstats_bar_graph_gen(arrayData, questions, dest, labels = ('Predicate','# of Items Routed'), title = title)\n\t\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.png'\n\t\t\tif toggles.ACCURACY_COUNT:\n\t\t\t\tdest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_acc_count'\n\t\t\t\tgeneric_csv_write(dest+'.csv',[accCount])\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\thist_gen(accCount, dest+'.png')\n\n\t\t\tif toggles.RUN_CONSENSUS_COUNT:\n\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME+'_consensus_count'\n\t\t\t\tif len(goodArray)>1:\n\t\t\t\t\tif len(badArray) == 0:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',[goodArray])\n\t\t\t\t\t\t#print goodArray\n\t\t\t\t\telse:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',[goodArray,badArray])\n\t\t\t\t\t\t#print goodArray,badArray\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Wrote File: \" + dest + '.csv'\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\ttitle = 'Normalized Distribution of Tasks before Consensus'\n\t\t\t\t\t\tlabels = ('Number of Tasks', 'Frequency')\n\t\t\t\t\t\tif len(badArray) < 2:\n\t\t\t\t\t\t\thist_gen(goodArray, dest+'.png',labels=labels,title=title)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tleg = ('Correctly Evaluated IP pairs','Incorrectly Evaluated IP pairs')\n\t\t\t\t\t\t\tmulti_hist_gen([goodArray,badArray],leg,dest+'.png',labels=labels,title=title)\n\t\t\t\telif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint \"only ran one sim, ignoring results\"\n\t\t\tif toggles.VOTE_GRID:\n\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME+'_vote_grid'\n\t\t\t\tif len(goodPoints)>1:\n\t\t\t\t\tif len(badPoints)==0:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',goodPoints)\n\t\t\t\t\telse:\n\t\t\t\t\t\tgeneric_csv_write(dest+'_good.csv',goodPoints)\n\t\t\t\t\t\tgeneric_csv_write(dest+'_bad.csv',badPoints)\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\ttitle = \"Vote Grid Graph\"\n\t\t\t\t\t\tlabels = (\"Number of No Votes\",\"Number of Yes Votes\")\n\t\t\t\t\t\tif len(badPoints)==0:\n\t\t\t\t\t\t\txL,yL=zip(*goodPoints)\n\t\t\t\t\t\t\tline_graph_gen(xL,yL,dest+'.png',title=title,labels=labels,scatter=True,square=True)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tgX,gY = zip(*goodPoints)\n\t\t\t\t\t\t\tbX,bY = zip(*badPoints)\n\t\t\t\t\t\t\tmulti_line_graph_gen((gX,bX),(gY,bY),('Correct','Incorrect'),dest+'_both.png',title=title,labels=labels,scatter=True,square=True)\n\t\t\t\t\t\t\tline_graph_gen(gX,gY,dest+'_good.png',title=title+\" goodPoints\",labels=labels,scatter=True,square=True)\n\t\t\t\t\t\t\tline_graph_gen(bX,bY,dest+'_bad.png',title=title+\" badPoints\",labels=labels,scatter=True,square=True)\n\t\tif toggles.TIME_SIMS:\n\t\t\tself.timeRun(sampleData)\n\n\t\tif toggles.RUN_ABSTRACT_SIM:\n\t\t\tself.abstract_sim(sampleData, toggles.ABSTRACT_VARIABLE, toggles.ABSTRACT_VALUES)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def run(self):\n numOfWins = 0;\n for i in range(self.numOfGames):\n gameState = GameState()\n agentIndex = 0 # 0 for First Player (AI), 1 for Second Player (Human)\n while True:\n if agentIndex == 0: \n timed_func = util.TimeoutFunction(self.AIPlayer.getAction, int(self.maxTimeOut))\n try:\n start_time = time.time()\n action = timed_func(gameState, self.gameRules)\n except util.TimeoutFunctionException:\n print(\"ERROR: Player %d timed out on a single move, Max %d Seconds!\" % (agentIndex, self.maxTimeOut))\n return False\n\n if not self.muteOutput:\n print(\"Player 1 (AI): %s\" % action)\n else:\n action = self.HumanAgent.getAction(gameState, self.gameRules)\n if not self.muteOutput:\n print(\"Player 2 (Human): %s\" % action)\n gameState = gameState.generateSuccessor(action)\n if self.gameRules.isGameOver(gameState.boards):\n break\n if not self.muteOutput:\n gameState.printBoards(self.gameRules)\n\n agentIndex = (agentIndex + 1) % 2\n if agentIndex == 0:\n print(\"****player 2 wins game %d!!****\" % (i+1))\n else:\n numOfWins += 1\n print(\"****Player 1 wins game %d!!****\" % (i+1))\n\n print(\"\\n****Player 1 wins %d/%d games.**** \\n\" % (numOfWins, self.numOfGames))", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.00000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def simulation(nepisodes):\n # Initialize robots\n # print('I am inside the simulation')\n agents = [] # List containing all robots\n a1 = Agent(start = [0, 0], end = [grid_size-1, grid_size-1], nr = 1) # Create agent 1\n a2 = Agent(start = [0, grid_size-1], end = [grid_size-1, 0], nr = 2) # Create agent 2\n a3 = Agent(start = [grid_size-1, 0], end = [0, grid_size-1], nr = 3) # Create agent 3\n a4 = Agent(start = [grid_size-1, grid_size-1], end = [0, 0], nr = 4) # Create agent 4\n agents.append(a1)\n agents.append(a2)\n agents.append(a3)\n agents.append(a4)\n\n # for agent in agents:\n # agent.load_target('target_weights_{}.h5'.format(agent.nr))\n # agent.load_policy('policy_weights_{}.h5'.format(agent.nr))\n # print('loaded')\n\n steps_list = [[] for i in range(len(agents))]\n reward_list = [[] for i in range(len(agents))]\n cumulative_rewards = [[] for i in range(len(agents))]\n collisions_list = [[] for i in range(len(agents))]\n\n t = 0 # Set time to zero\n for i in range(nepisodes):\n t = episode(agents, t, i+1) # Run one episode\n\n print('End of episode ', i+1)\n agent_index = 0\n for agent in agents:\n steps_list[agent_index].append(agent.steps)\n reward_list[agent_index].append(agent.reward)\n collisions_list[agent_index].append(agent.collisions)\n if i == 0:\n cumulative_rewards[agent_index].append(agent.reward)\n else:\n cumulative_rewards[agent_index].append(agent.reward + cumulative_rewards[agent_index][i-1])\n agent_index += 1\n\n if i % 1000 == 0:\n with open('reward_4_agents_{}'.format(i),'wb') as f:\n pickle.dump(reward_list,f)\n\n with open('steps_4_agents_{}'.format(i), 'wb') as f:\n pickle.dump(steps_list, f)\n\n with open('cols_4_agents_{}'.format(i), 'wb') as f:\n pickle.dump(collisions_list, f)\n\n\n return steps_list, reward_list, collisions_list, cumulative_rewards", "def start_game(self):\n\n\t\tpass", "def step(self):\n self.game.step()", "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def GAMEOVER_LOOP():\n pass", "def start(self):\n with self.players['w'], self.players['b']:\n\n game = 0\n\n while game < self.num_games:\n\n # Print info.\n print \"Game %d - %s [%s] (White) VS: %s [%s] (Black)\" % (game + 1,\n self.players['w'].name,\n type(self.players['w']).__name__,\n self.players['b'].name,\n type(self.players['b']).__name__)\n # Reset board\n self.board.reset()\n\n # Signal to players that a new game is being played.\n [p.new_game() for p in self.players.itervalues()]\n\n curr_player_idx = 'w'\n\n game_pgn = chess.pgn.Game()\n game_pgn.headers[\"White\"] = self.players['w'].name\n game_pgn.headers[\"Black\"] = self.players['b'].name\n game_pgn.headers[\"Date\"] = time.strftime(\"%Y.%m.%d\")\n game_pgn.headers[\"Event\"] = \"Test\"\n game_pgn.headers[\"Round\"] = game\n game_pgn.headers[\"Site\"] = \"My PC\"\n\n _, time_taken = self.play(curr_player_idx, game_pgn=game_pgn)\n\n result = self.board.result(claim_draw=True)\n if result == '1-0':\n winner = self.players['w']\n elif result == '0-1':\n winner = self.players['b']\n else:\n winner = None\n self.data['draws'] += 1\n print \"Draw.\" \n\n if winner is not None:\n self.data['wins'][winner.name] += 1\n print \"%s wins.\" % winner.name\n\n for color, p in self.players.iteritems():\n print \"Player %s took %f seconds in total\" % (p.name, time_taken[color])\n p.time_taken = 0\n\n game_pgn = game_pgn.root()\n game_pgn.headers[\"Result\"] = result\n with open(resource_filename('guerilla', 'data/played_games/') + self.players['w'].name + '_' +\n self.players['b'].name + '_' + str(game) + '.pgn', 'w') as pgn:\n try:\n pgn.write(str(game_pgn))\n except AttributeError as e:\n print \"Error writing pgn file: %s\" % (e)\n\n self.swap_colours()\n game += 1", "def simulate(self):\n self._t = self._t + 1\n if self._t == self._cycle:\n # End of a season, start of the next one. Year is also cyclic that is WINTER -> SPRING.\n self._t = 0\n self._season = self._season.next()\n\n # When the ammount of newly produced food in a cell is over and the cell can seed we\n # randomly choose another spot where some random ammount of newly produced food should\n # be stored.\n for i in range(self._height):\n for j in range(self._width):\n if self._env[i][j].get_newly() == 0 and not self._seeded[i][j]:\n # if the cell become empty just now seed in once in a randomn cell on the grid.\n self._seeded[i][j] = True\n cap = self._height + self._width\n while cap > 0:\n seedi = random.randint(0, self._height - 1)\n seedj = random.randint(0, self._width - 1)\n\n production_cap = self._food_per_season[self._season.value]\n\n production_cap -= self._env[seedi][seedj].get_newly()\n\n if production_cap > 0:\n seed_amount = random.randint(1, production_cap)\n self._env[seedi][seedj].produce(seed_amount)\n self._seeded[seedi][seedj] = False\n break\n\n cap = cap - 1", "def main():\n game = Game(TIMES, HARDNESS)\n game.start()\n game.print_score()", "def simulate(seconds):\n\n #Grab the start time\n start_time = dt.datetime.now()\n\n # fill list with the start\n times_on_the_second = [start_time + dt.timedelta(seconds=x) for x in range(seconds + 1)]\n\n #end_time = start_time + dt.timedelta(seconds=seconds)\n\n end_time = times_on_the_second[-1]\n epochs = 0\n\n\n\n print(f\"Simulation started at {start_time}\")\n\n while dt.datetime.now() < end_time:\n\n while dt.datetime.now() < times_on_the_second[epochs]:\n pass\n\n for asteroid in Controller.currentAsteroids:\n asteroid.move()\n print(asteroid, F\"time: {dt.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]}\")\n epochs += 1\n\n\n\n # time.sleep(1)", "def test_solve_one_player_1(self):\n self.rush_hour_data = rush_hour_data_1\n self.state_data = state_data_1\n self.execute_minimax_single_player()", "def mc_simul_all_hands():\r\n\tglobal hands, H, G, B, P, rnd, equity_arr, equity_all, equity_win, equity_tie, df, equity_mc_cvg\r\n\r\n\thands = np.array(map(hand_str_to_no, all_preflop_hands), dtype=np.int32)\r\n\tH = hands.shape[0]\r\n\tG = int(2e5)\t\t\t# 1e6\t\t2e5\r\n\tB = int(500)\t\t\t# 300\t\t500\r\n\tnb_opp = 9\r\n\tequity_arr = np.zeros([B, H, nb_opp, 2], dtype=np.float32)\r\n\r\n\tprint '\\n---------------- Monte Carlo simulation start'\r\n\tprint 'nb of hands = {}'.format(H)\r\n\tprint 'nb of random games per block = {}'.format(G)\r\n\tprint 'nb of blocks = {}\\n'.format(B)\r\n\r\n\r\n\tt_init = timer()\r\n\tfor b in xrange(B):\r\n\t\tprint 'block #{}'.format(b)\r\n\t\tt0 = timer()\r\n\r\n\t\t# random games generation\r\n\t\tC = 50-2\r\n\t\tP = 2*nb_opp+5\r\n\t\tN = int(2*P*G)\r\n\t\tseed = 0\t# None\r\n\t\trnd = rnd_games(G, P, C, N, seed)\r\n\t\t# rnd = rnd_games_slow(G, P, C)\r\n\r\n\t\tt1 = timer()\r\n\t\tprint '\\tRNG time = \\t{:.6f} s'.format(t1-t0)\r\n\r\n\t\t# hand ranking\r\n\t\tequity_arr[b, :, :, :] = mc_block_all_fast(hands, nb_opp, rnd, keys.CARD_FLUSH_KEY,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tkeys.CARD_FACE_KEY,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tkeys.CARD_SUIT,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tkeys.SUIT_MASK,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tkeys.SUIT_BIT_SHIFT,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tEvalSeven.flush_rank,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tEvalSeven.face_rank,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tEvalSeven.flush_suit)\r\n\r\n\t\tt2 = timer()\r\n\t\tprint '\\tequity time = \\t{:.6f} s'.format(t2-t1)\r\n\t\tprint '\\tblock time = \\t{:.6f} s'.format(t2-t0)\r\n\r\n\tprint '\\n---------------- Monte Carlo simulation end'\r\n\tprint 'full run time = {:.6f} s'.format(t2-t_init)\r\n\tpickle(os.path.join('Tables', 'equity_array.pk'), equity_arr)\r\n\r\n\r\n\tequity_all = np.mean(equity_arr, axis=0)\r\n\tequity_win = equity_all[:, :, 0]\r\n\tequity_tie = equity_all[:, :, 1]\r\n\tdf = pd.DataFrame(data=np.concatenate((equity_win, equity_tie), axis=1),\r\n\t\t\t\t\t\tindex=all_preflop_hands,\r\n\t\t\t\t\t\tcolumns=[str(i)+'_win' for i in range(1, nb_opp+1)]+[str(i)+'_tie' for i in range(1, nb_opp+1)])\r\n\tdf.to_pickle(os.path.join('Tables', 'df_equity_montecarlo.pd'))\r\n\tdf.to_csv(os.path.join('Tables', 'df_equity_montecarlo.csv'))\r\n\r\n\tequity_mc_cvg = np.zeros_like(equity_arr)\r\n\tfor k in xrange(B):\r\n\t\tequity_mc_cvg[k, :, :, :] = np.mean(equity_arr[:k+1, :, :, :], axis=0)\r\n\tpickle(os.path.join('Tables', 'equity_mc_cvg.pk'), equity_mc_cvg)\r\n\r\n\t# df_equity_montecarlo = pd.read_pickle(os.path.join('Tables', 'df_equity_montecarlo.pd'))\r\n\t# equity_arr_1 = unpickle(os.path.join('Tables', 'test.pk'))\r\n\t# equity_mc_cvg_1 = unpickle(os.path.join('Tables', 'equity_mc_cvg.pk'))\r", "def move_simulation(self):\n import simulation\n\n dt = 1e-3 # Pas de temps en seconde.\n x, y = [], []\n state = simulation.State() # On positione la voiture a l'origine\n for i, t in enumerate(np.arange(0, self.portion_duration*self.nbr_portions, dt)):\n state.update(*self(t), dt=dt)\n if not i % 1000:\n x.append(state.x)\n y.append(state.y)\n\n # self.score = x[-1]**2 + y[-1]**2 # Bidon et mal fait, c'est juste pour le test.\n # self.score = y[-1]-abs(x[-1])\n # self.score = 1 / ( (self.arriveeX*self.nbr_portions/10.0-x[-1])**2 + (self.arriveeY*self.nbr_portions/10.0-y[-1])**2 ) # Tout droit jusqu'au point choisi\n self.score = 1 / ( (self.arriveeX*self.nbr_portions*4.0/20-x[-1])**2 +\n (self.arriveeY*self.nbr_portions*4.0/20-y[-1])**2 ) # Le point choisi dépend du point standard (0.1) et de nbr_portions\n\n return x, y", "def simulate(self, num_games):\r\n # self.runs = num_games #Initializes a tracker for the number of runs\r\n for _ in range(num_games):\r\n self.results.append(self._simulate_once())\r\n return self.results", "def game_tick_run(self):\n pass", "def start_game(self):\n GameManager.time = 0\n print(\"Game started\")\n time_spent = 0\n start_time = tm.process_time()\n winning_path = self.__agent.apply_strategy(self.__graph, self.__board)\n execution_time = tm.process_time() - start_time\n for location in winning_path:\n x = location[0]\n y = location[1]\n time_to_wait = self.__board[x][y]\n while time_spent != time_to_wait:\n GameManager.time += 1\n time_spent += 1\n self.__agent.current_location = location\n GameManager.time += 1\n time_spent = 0\n self.__print_grid(winning_path)\n print(f\"Total time required: {GameManager.time}\")\n return GameManager.time, execution_time*1000", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create learning agent\n # a = e.create_agent(RandomAgent) # create random agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01)\n # reduce update_delay to speed up simulation\n sys.stdout = open(\"./output.txt\", \"w\")\n tic = time()\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n toc = time()\n sys.stdout = sys.__stdout__\n\n print \"Totoal time used: {}.\".format(toc - tic)\n parse(\"./output.txt\")", "def _simulate(self, action=None):\n for k in range(int(self.SIMULATION_FREQUENCY // self.config[\"policy_frequency\"])):\n if action is not None and \\\n self.time % int(self.SIMULATION_FREQUENCY // self.config[\"policy_frequency\"]) == 0:\n # Forward action to the spacecraft\n self.spacecraft.act(self.ACTIONS[action])\n\n self.space.act()\n self.space.step(1 / self.SIMULATION_FREQUENCY)\n self.time += 1\n\n # Automatically render intermediate simulation steps if a viewer has been launched\n # Ignored if the rendering is done offscreen\n self._automatic_rendering()\n\n # Stop at terminal states\n if self.done or self._is_terminal():\n break\n self.enable_auto_render = False", "def play():\n global done\n done = False\n g = Game()\n turn = random.choice([PLAYER, AI])\n transitions_agent = []\n agent.epsilon = agent.eps_min\n while done == False:\n g.printBoard()\n if turn == PLAYER:\n row = input('{}\\'s turn:'.format('Red'))\n g.insert(int(row), PLAYER_PIECE)\n else:\n observation = []\n for sublist in g.board:\n for i in sublist:\n observation.append(i)\n observation = np.asarray(observation)\n action = agent.choose_action(observation)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent += [(observation, action, observation_, done)]\n turn = AI if turn == PLAYER else PLAYER\n winner = AI if turn == PLAYER else PLAYER\n if winner == AI:\n reward = 20\n else:\n reward = -20\n for i in range(len(transitions_agent)):\n agent.store_transition(transitions_agent[i][0], transitions_agent[i][1], reward, transitions_agent[i][2],\n transitions_agent[i][3])\n agent.learn()\n return", "def run_tests():\n \n test_constructor_positive()\n test_constructor_negative()\n test_game_move_positive()\n test_game_move_negative()\n test_game_move_edge()\n print(\"Congratulations ! You passed all the game test cases.\")", "def test_play_game_hard(self):\r\n wins = [0,0,0]\r\n\r\n for i in range(1,10):\r\n a_player_1_id = 1\r\n a_player_2_id = 2\r\n a_players = [RandomPlayer(a_player_1_id), RandomPlayer(a_player_2_id)]\r\n a_x_dist = i\r\n a_y_dist = i\r\n a_num_to_win = 3\r\n a_game = Game(a_players,a_x_dist,a_y_dist,a_num_to_win)\r\n a_game.play_game()\r\n\r\n wins[a_game.winner] += 1\r\n\r\n print(wins)", "def simulate_front_simulation(self):\r\n factory = SimulationFactory()\r\n simulation = factory.create_front_simulation()\r\n simulation.play()", "def UCTPlayGame():\n #start_time = time.time()\n state = GameState()\n m = UCT(rootstate=state, itermax=750, verbose=False) # play with values for itermax and verbose = True\n print str(m[0][0])+\" \"+str(m[0][1])+\" \"+str(m[1][0])+\" \"+str(m[1][1])\n state.DoMove(m)\n #print state #for user vs bot\n #print(\"--- %s seconds ---\" % (time.time() - start_time))\n \"\"\"if state.GetResult(state.whosemove) == 1.0:\n print \"Player \" + str(1 - state.whosemove) + \" wins!\"\n elif state.GetResult(state.whosemove) == 0.0:\n print \"Player \" + str(state.whosemove) + \" wins!\"\n else:\n print \"Nobody wins!\"\"\"", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n print 'alpha, gamma:', a.alpha, a.gamma\n print 'penalties:', a.total_penalties\n print 'total rewards:', a.total_rewards", "def main():\n g = CommanderGame([4, 3], 3)\n g.fit_army_orders()\n g.fit_game_matrix()\n g.show_submatrixes([\n [[4, 0, 0]],\n [[2, 1, 0], [1, 1, 1]],\n ])\n print(f'Full game matrix: \\n{g.game_matrix_}')", "def start_simulation(self):\n\n\t\tif self.objects==10:#Adding colors for planet\n\t\t\tself.col_planet()\n\t\t\t\n\t\tfor step in range(self.steps-1):#iterator=all simulation steps\n\t\t\tvis.rate(600)#frames per sec\n\t\t\ti=0\n\t\t\tprint self.dt\n\t\t\tos.system('clear')\n\t\t\tprint \"==========================\\n\", \"Date: \",datetime(1930,12,24)+timedelta(seconds=step*self.dt) ,\"\\n==========================\"\n\t\t\tprint \"Steps: \",self.steps,\"Objects: \", self.objects, \"\\ndt: \",round(float(self.times[self.objects+1]/86400),5),\"days\\n==========================\"\n\t\t\tfor planet in self.ob:# iterator = all planets and sun\n\t\t\t\tx,y,z = (self.positions[i][step+1][0], self.positions[i][step+1][1], self.positions[i][step+1][2])\n\t\t\t\tplanet.pos = (x,y,z)#updating positions\n\t\t\t\tr = ((self.positions[0][step+1][0]-x)**2 + (self.positions[0][step+1][1]-y)**2 + (self.positions[0][step+1][2]-z)**2)**0.5#lenght from sun\n\t\t\t\tprint self.names[i], \"=\", r,\"AU\"\n\t\t\t\ti += 1\n\t\t\t\tself.p2.pos = (self.positions[0][step+1][0], self.positions[0][step+1][1], self.positions[0][step+1][2])#moving sun center\n\t\t\t\tself.p.pos = (self.center[0][step+1][0], self.center[0][step+1][1], self.center[0][step+1][2])#moving solar system mass center\n\t\t\t\tself.sun.pos = (self.positions[0][step+1][0], self.positions[0][step+1][1], self.positions[0][step+1][2])#moving sun \n\t\t\tprint \"==========================\\nBarycenter =\", round(((self.positions[0][step+1][0]-self.center[0][step+1][0])**2 + (self.positions[0][step+1][1]-self.center[0][step+1][1])**2 + (self.positions[0][step+1][2]-self.center[0][step+1][2])**2)**0.5,8),\"AU\"", "def simulPlay(self, info):\n\n if not self.setup:\n self.snakeSetup(True)\n\n screen = self.screen\n s = self.s\n clock = self.clock\n global score\n if not gameOver:\n pygame.time.delay(50)\n clock.tick(175)\n self.checkMove(s);\n self.draw(screen, s,info)\n self.score = score\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n return\n\n else:\n self.gameOver = True;\n self.score = score;\n\n return s.raycast()", "def main(self) -> None:\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Advancing to 1st Generation\")\n\n # Mandatory first generation advancement\n self.neat.advance_generation()\n\n # Metrics are initialized\n max_fitness = self.neat.get_best_fitness()\n max_fitnesses = [max_fitness]\n\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Entering Main Loop\")\n\n # The main loop is entered\n stop = 0.0\n while max_fitness <= TARGET_SCORE:\n # Metrics of the last generation are checked and shared\n if LOG[\"Experiment\"]:\n print(\"\\n[Experiment] Generation = \" + str(self.neat.get_generation()))\n print(\"[Experiment] Maximum Fitness of the Generation = \" + str(max_fitness))\n print(\"[Experiment] Compared the Previous Recorded Maximum = \" + str(stop))\n print(\"[Experiment] Maximum Innovation of the Generation = \" + str(self.neat.get_maximum_innovation()))\n print(\"[Experiment] Amount of Species = \", len(self.neat.get_shared_fitness_sums()))\n print(\"[Experiment] Total Shared Fitness = \", self.neat.get_total_shared_fitness(), \"\\n\")\n\n # If an improvement is found, the game may be simulated\n if max_fitness > stop:\n stop = max_fitness\n if LOG[\"FrequentSimulations\"] and input(\"[Experiment] Show Simulation? (y/n)\\n\") == \"y\":\n n = self.neat.get_population()[-1]\n self.snake_game.show(Snake(11, Experiment.ExperimentAI(n)), self.last_used_seed,\n \"Generation = \" + str(self.neat.get_generation()),\n fps=max(4, int(max_fitness / 4)))\n\n # Generation advancement\n self.neat.advance_generation()\n max_fitness = self.neat.get_best_fitness()\n max_fitnesses.append(max_fitness)\n\n # If the target was passed, metrics are consulted\n if LOG[\"Experiment\"]:\n print(\"\\n[Experiment] Generation = \" + str(self.neat.get_generation()))\n print(\"[Experiment] Maximum Fitness of the Generation = \" + str(max_fitness))\n print(\"[Experiment] Compared to a 'stop' value of = \" + str(stop))\n print(\"[Experiment] Maximum Innovation of the Generation = \" + str(self.neat.get_maximum_innovation()))\n print(\"[Experiment] Shared fitness sums = \", self.neat.get_shared_fitness_sums())\n print(\"[Experiment] Total shared fitness = \", self.neat.get_total_shared_fitness(), \"\\n\")\n\n # Metrics are updated again\n max_fitness = self.neat.get_best_fitness()\n max_fitnesses.append(max_fitness)\n\n # A simulation of the result can be shown if the user wants to\n sim = input(\"[Experiment] Show Simulation? (y/n)\\n\")\n while sim == \"y\":\n n = self.neat.get_population()[-1]\n self.snake_game.show(Snake(11, Experiment.ExperimentAI(n)), self.last_used_seed,\n \"Generation = \" + str(self.neat.get_generation()),\n fps=max(4, int(max_fitness / 4)))\n sim = input(\"[Experiment] Show Simulation? (y/n)\\n\")\n\n # The resulting network may be printed\n if SHOW_RESULT:\n print(\"The best network generated is specified as:\\n\", str(self.neat.get_best_network_details()))\n\n # The resulting network may be saved\n if SAVE_RESULT:\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Saving Resulting Network\")\n\n # Previous saves are removed\n dm.clear_dir(networks_saving_directory)\n\n # A .txt is generated\n with open(networks_saving_directory+\"/best_network.txt\", \"w\") as text_file:\n text_file.write(str(self.neat.get_best_network_details()))\n\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Resulting Network Saved\")\n\n # A plot of fitnesses may be created\n if PLOT:\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Generating Fitness Plot\")\n\n # The plot is generated in matplotlib\n _, ax = plt.subplots()\n\n ax.plot(range(1, len(max_fitnesses)+1), max_fitnesses)\n ax.set_xlim([0, len(max_fitnesses)+2])\n ax.set_ylim([max(min(min(max_fitnesses), TARGET_SCORE - 100), 0), TARGET_SCORE+5])\n\n plt.title(\"Generational fitness for board size \" + str(BOARD_SIZE) +\n \" using seed \" + str(SEED))\n plt.xlabel(\"Generation\")\n plt.ylabel(\"Fitness\")\n ax.grid(True)\n\n # The plot may be saved to memory\n if SAVE_PLOTS:\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Saving Fitness Plot\")\n\n # Previous saves are removed\n dm.clear_dir(plots_saving_directory)\n\n name = plots_saving_directory + \"/plot_board\" + str(BOARD_SIZE)\n name += \".png\"\n\n # A new .png is saved\n plt.savefig(name, bbox_inches='tight')\n\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Fitness Plot Saved\")\n # Otherwise the plot is displayed\n else:\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Showing Fitness Plot\")\n\n plt.show()\n\n plt.close()\n\n if LOG[\"Experiment\"]:\n print(\"[Experiment] Quitting Experiment\")\n\n # The experiment ends\n self.snake_game.quit()", "def new_game(self):\n self.cells = [] # Array of cells\n self.frame_count = 0\n self.database = []\n self.timer = [Consts[\"MAX_TIME\"], Consts[\"MAX_TIME\"]]\n self.result = None\n # Define the players first\n self.cells.append(Cell(0, [Consts[\"WORLD_X\"] / 4, Consts[\"WORLD_Y\"] / 2], [0, 0], Consts[\"DEFAULT_RADIUS\"]))\n self.cells.append(Cell(1, [Consts[\"WORLD_X\"] / 4 * 3, Consts[\"WORLD_Y\"] / 2], [0, 0], Consts[\"DEFAULT_RADIUS\"]))\n # Generate a bunch of random cells\n for i in range(Consts[\"CELLS_COUNT\"]):\n if i < 4:\n rad = 1.5 + (random.random() * 1.5) # Small cells\n elif i < 10:\n rad = 10 + (random.random() * 4) # Big cells\n else:\n rad = 2 + (random.random() * 9) # Everything else\n x = Consts[\"WORLD_X\"] * random.random()\n y = Consts[\"WORLD_Y\"] * random.random()\n cell = Cell(i + 2, [x, y], [(random.random() - 0.5) * 2, (random.random() - 0.5) * 2], rad)\n safe_dist = Consts[\"SAFE_DIST\"] + rad\n while min(map(cell.distance_from, self.cells[:2])) < safe_dist:\n cell.pos = [\n Consts[\"WORLD_X\"] * random.random(),\n Consts[\"WORLD_Y\"] * random.random()\n ]\n self.cells.append(cell)", "def test_simulation(self):\r\n positions = [1, 10, 100,1000]\r\n num_trials = 10000\r\n result = Investment(positions, num_trials)\r\n result = result.simulation(positions, num_trials) \r\n for pos in positions:\r\n self.assertEqual(len(result[pos]), num_trials)\r\n self.assertTrue(result[pos].all() == 1 or result[pos].all() == -1)", "def __run_game(game):\n\n while not game['lost']:\n\n game['move'], game['moveScores'] = my_algorithm(game)\n\n check_game_lost(game)\n\n # I changed the order of this loop to record information\n # about the algorithm before the board is moved so\n # in the move log csv the initial board will show scores and\n # a planned move instead of having the scores off by 1\n record_move(game)\n\n move(game)\n\n return game", "def play_game(self):\r\n\r\n print('Welcome to a game of Concentration!!')\r\n if self.who_goes_first():\r\n self.user_turn()\r\n else:\r\n self.computer_turn()\r\n\r\n while True:\r\n if self.match:\r\n self.user_turn()\r\n else:\r\n self.computer_turn()\r\n self.check_game_end()", "def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()", "def test_simple():\n game = Game(3, [0, 0], -1, 5, -5, 10, 1, [[0, 1]], [0.0])\n\n print(f\"Check the baby exists\\n{game.baby}\")\n\n print(\"\\nCheck the berry exists\")\n for berry in game.get_berries():\n print(berry)\n\n print(f\"\\nHere is the board\\n{game.get_board()}\")\n\n print(\"First let's perform an illegal move Northwards\")\n board, reward, done = game.step(\"N\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")\n\n print(\"\\nNow let's perform a legal move which does NOT eat the berry\")\n board, reward, done = game.step(\"E\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")\n\n print(\"\\nNow we will move back to the original place and then eat the berry\")\n board, reward, done = game.step(\"W\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")\n\n print(\"\\nNow let's perform a legal move which does NOT eat the berry\")\n board, reward, done = game.step(\"S\")\n print(f\"Here is the board\\n{game.get_board()}\")\n print(f\"And the reward experienced: {reward}\")\n print(f\"And whether the game is over: {done}\")", "def start_game(table_index):\n initiate_game(table_index)\n\n uId = uIds[table_index]\n current_map = maps[table_index]\n current_alg = applied_algs[table_index].func\n game_over = False\n previousmoves = []\n\n while True:\n try:\n if not game_over:\n # print('Move scores')\n # print(evaluate.evaluate(current_map, 0))\n # print(evaluate.evaluate(current_map, 1))\n # print(evaluate.evaluate(current_map, 2))\n # print(evaluate.evaluate(current_map, 3))\n\n move = current_alg(current_map)\n\n print(previousmoves)\n # check for bug\n # first case\n if len(previousmoves) == 0:\n previousmoves.append(move)\n # same movement\n elif previousmoves[-1] != move:\n previousmoves = []\n elif previousmoves[-1] == move:\n previousmoves.append(move)\n # 10 same movement\n\n\n if len(previousmoves) >= 4 and move == previousmoves[-1]:\n wrongdirection = previousmoves[-1]\n previousmoves = []\n moves = [\"w\", \"a\", \"s\", \"d\"]\n moves.remove(wrongdirection)\n\n move = random.choice(moves)\n\n print(move)\n\n request = requests.post(url=base_URL + \"/api/play_the_game\",\n json={'direction': move,\n 'uId': uId})\n current_map = request.json()['board']\n\n print(request.json())\n\n # TODO: Type checking, error handling (HTTP response?)\n game_over = request.json()[\"game_over\"]\n\n else:\n c_score = request.json()[\"c_score\"]\n SESSION_NAME = TEAM_NAME + \"_\" + applied_algs[table_index].label\n\n with open('score_data.txt', 'a') as f:\n f.write(datetime.datetime.now().strftime('%H:%M:%S') + \" \" + SESSION_NAME + \" \" + '%d' % c_score + \"\\n\")\n\n print()\n print(f\"Game Over. Your score is: {c_score}\")\n print()\n\n initiate_game(table_index)\n game_over = False\n uId = uIds[table_index]\n current_map = maps[table_index]\n except:\n print(\"Error\")" ]
[ "0.76476705", "0.7423586", "0.7248727", "0.69607615", "0.6947179", "0.6916251", "0.69077504", "0.6889119", "0.68533415", "0.6840715", "0.68284386", "0.6805519", "0.6801347", "0.6782698", "0.67796993", "0.6775046", "0.66829413", "0.6681817", "0.6652152", "0.66504765", "0.66499096", "0.6593548", "0.6590692", "0.6569411", "0.65645325", "0.6557406", "0.6554239", "0.6553756", "0.6553013", "0.65442824", "0.65388244", "0.65281445", "0.65251946", "0.65219504", "0.65090257", "0.6502473", "0.6494668", "0.6494301", "0.64851004", "0.64823127", "0.64813066", "0.64794487", "0.64733255", "0.6472878", "0.6460956", "0.64601374", "0.64594847", "0.64453614", "0.6442754", "0.64426863", "0.6439449", "0.64392334", "0.6439172", "0.64374816", "0.64371085", "0.6433309", "0.6432994", "0.6412673", "0.64030117", "0.6394819", "0.6384693", "0.6378357", "0.63692373", "0.635646", "0.6355889", "0.6342042", "0.63412005", "0.633092", "0.6319434", "0.63194007", "0.6312146", "0.6310887", "0.63017166", "0.62977415", "0.6294148", "0.628433", "0.6281555", "0.6274733", "0.6265176", "0.6252327", "0.62500453", "0.62380683", "0.6236975", "0.62328905", "0.623289", "0.62306976", "0.62253046", "0.6224251", "0.62196016", "0.6219508", "0.6210494", "0.6204912", "0.62037265", "0.62015355", "0.6201145", "0.61939687", "0.61927396", "0.61925673", "0.61918783", "0.6185506", "0.6181073" ]
0.0
-1
return nothing but updating a score
def mc_update_scores(scores, board, player): status = board.check_win() if status == provided.DRAW: pass if status == player: for row in range(board.get_dim()): for col in range(board.get_dim()): znak = board.square(row, col) helper(True, znak, player, scores, row, col) if status == provided.switch_player(player): for row in range(board.get_dim()): for col in range(board.get_dim()): znak = board.square(row, col) helper(False, znak, player, scores, row, col) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_score():\n pass", "def updateScore(score):\n return score + 1", "def update_score(self, score: int) -> int:\n self.score += score\n return self.score", "def score(self):", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)", "def increase_score(self):\n self.score += 1", "def update_score(score, role):\n if role == 'winner':\n score = score + 1\n if role == 'loser':\n score = score - 1\n return score", "def updateScore(self, score):\n self.__score += score", "def score_update(scoreboard, compare):\r\n if compare == 'Victory':\r\n scoreboard['W'] += 1\r\n elif compare == 'Defeat':\r\n scoreboard['L'] += 1\r\n elif compare == 'Tie':\r\n scoreboard['T'] += 1", "def update_score(self, data):\r\n queuekey = data['queuekey']\r\n score_msg = data['xqueue_body']\r\n self.lcp.update_score(score_msg, queuekey)\r\n self.set_state_from_lcp()\r\n self.publish_grade()\r\n\r\n return dict() # No AJAX return is needed\r", "def update(self, game):\n super().update(game)\n self.nn_def.set_score(self.score)", "def scoring(self):\n pass", "def r_point(self):\n self.r_score += 1\n self.update_scoreboard()", "def disp_score():", "def vanilaScore(self,attended,state,W):", "def score(self):\n return None", "def update_scores(self):\n self.score[0] = (-1)*sum(self.board[self.board == -1])\n self.score[1] = sum(self.board[self.board == 1])\n #self.score[i] = sum(1 for j in range(len(stones_on_board)) if stones_on_board[j] == i)", "def get_score(self):\n return self.score", "def update_score(self, engine, *args):\n #pdb.set_trace()\n self.score_label.text = \"Gold: {}/{}\".format(str(engine.score),\n str(engine.win_score))", "def update_score(self, board):\n self._score += 1", "def adjust_score(self):\n self.score += game.temporary_score", "def update_score(self, concept: _Concept, result: _Result) -> None:\n\n score = self.make_score(concept, result)\n if score is None:\n pass\n else:\n self._vector.append(score)", "def l_point(self):\n self.l_score += 1\n self.update_scoreboard()", "def getScore(data):\n return score", "def update_score(self, match, i):\n self.match_views.update_score(match)\n score_p1 = input(f\"Veuillez rentrer le score de \"\n f\"{match[0][0]['first_name']} \"\n f\"{match[0][0]['last_name']} (1/0.5/0) \")\n score_p2 = input(f\"Veuillez rentrer le score de \"\n f\"{match[1][0]['first_name']} \"\n f\"{match[1][0]['last_name']} (1/0.5/0) \")\n self.validate_score(score_p1, score_p2, match, i)\n new_score = float(score_p1)\n new_score2 = float(score_p2)\n return new_score, new_score2", "def update_score(best_score: int, new_score: int) -> int:\n if new_score > best_score:\n return new_score\n else:\n return best_score", "def get_score(self):\r\n return None", "def r_point(self):\n self.r_score += 1\n self.update()", "def update_scores(self, score):\n self.result_list.append(score)\n\n if self.best_score == 0 and self.worst_score == 0:\n self.best_score = score\n self.worst_score = score\n\n if score < self.best_score:\n self.best_score = score\n\n if score > self.worst_score:\n self.worst_score = score", "def update_score(self):\n td = self.created - datetime.datetime(1970, 1, 1)\n epoch_seconds = td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)\n order = math.log(max(abs(self.points), 1), 10)\n sign = 1 if self.points > 0 else -1 if self.points < 0 else 0\n seconds = epoch_seconds - 1134028003\n self.score = round(order + sign * seconds / 45000, 7)", "def score(self):\n raise NotImplementedError()", "def f1_score(self):", "def fix_score(self,req):\n if self.kind in (\"album\",\"artist\"):\n self.update_score()\n req.message=\"score reset from child scores\"\n elif self.kind==\"track\":\n self.score=0\n for i in self.Play.list(page=self.uid):\n self.score+=i.times\n self.flush()\n req.message=\"score reset from plays table\"\n else:\n req.error= \"not a track, album, or artist\"\n return self.view(req)", "def update_score(self, blanks=0, letters=0, words=0):\n points = 0\n points += letters * LETTER_POINT\n points += words * WORD_POINT\n points += blanks * BLANK_POINT\n self.score += points", "def update(self):\n self.clear()\n self.score += 1\n self.write(f\"Score : {self.score}\",\n align=\"center\", font=(\"Arial Black\", 20))", "def updateScore(self, player: int) -> None:\n\n if player == 1:\n self._score[0] += 1\n elif player == 2:\n self._score[1] += 1\n\n # logging\n logger.info(\"Player {winner} has scored a goal. Score: {score}\", winner=player, score=str(self._score))", "def score(self,*val):\n if len(val):\n self._score = val[0]\n self.evaluated = 1\n else: self.evaluate()\n return self._score", "def l_point(self):\n self.l_score += 1\n self.update()", "def _update_score(self) -> None:\n\n # setting new score by iterating over players\n self.score_play[self.n_play_turns, ] = [\n self._score_table[(\n self.contract.level,\n self.contract.suit,\n self.tricks[i],\n self.contract.player_vulnerability[i],\n int(self.contract.double + self.contract.redouble)\n )]\n for i in range(NUM_PLAYERS)\n ]", "def increase_score(self):\n\n old_score = self.get_score()\n new_score = old_score + 1\n sql = \"UPDATE Users SET score = ? WHERE username = ?\"\n self.conn.execute(sql, (new_score, self.username))\n self.conn.commit()", "def set_score(self, a, b, score):\n ### FILL IN ###", "def update_score(self, score_msg, oldcmap, queuekey):\r\n (valid_score_msg, correct, points, msg) = self._parse_score_msg(score_msg)\r\n\r\n _ = self.capa_system.i18n.ugettext\r\n\r\n dog_stats_api.increment(xqueue_interface.XQUEUE_METRIC_NAME, tags=[\r\n 'action:update_score',\r\n 'correct:{}'.format(correct)\r\n ])\r\n\r\n dog_stats_api.histogram(xqueue_interface.XQUEUE_METRIC_NAME + '.update_score.points_earned', points)\r\n\r\n if not valid_score_msg:\r\n # Translators: 'grader' refers to the edX automatic code grader.\r\n error_msg = _('Invalid grader reply. Please contact the course staff.')\r\n oldcmap.set(self.answer_id, msg=error_msg)\r\n return oldcmap\r\n\r\n correctness = 'correct' if correct else 'incorrect'\r\n\r\n # TODO: Find out how this is used elsewhere, if any\r\n self.context['correct'] = correctness\r\n\r\n # Replace 'oldcmap' with new grading results if queuekey matches. If queuekey\r\n # does not match, we keep waiting for the score_msg whose key actually\r\n # matches\r\n if oldcmap.is_right_queuekey(self.answer_id, queuekey):\r\n # Sanity check on returned points\r\n if points < 0:\r\n points = 0\r\n # Queuestate is consumed\r\n oldcmap.set(\r\n self.answer_id, npoints=points, correctness=correctness,\r\n msg=msg.replace('&nbsp;', '&#160;'), queuestate=None)\r\n else:\r\n log.debug(\r\n 'CodeResponse: queuekey %s does not match for answer_id=%s.',\r\n queuekey,\r\n self.answer_id\r\n )\r\n\r\n return oldcmap", "def update_score(self, mark):\n if mark == 'X':\n self.model.game_score[self.model.player_1] += 1\n else:\n self.model.game_score[self.model.player_2] += 1", "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n if not self.score and self.id:\n self._compute_score()", "def set_score(self, change):\n self._score = self._score + change", "def commit_score(self):\n\n # Update the player's total score and total roll count\n self._total_score += self._current_score", "def _update(self, commit=False):\n votes = Vote.objects.filter(\n content_type = self.get_content_type(),\n object_id = self.instance.pk,\n key = self.field.key,\n )\n obj_score = sum([v.score for v in votes])\n obj_votes = len(votes)\n\n score, created = Score.objects.get_or_create(\n content_type = self.get_content_type(),\n object_id = self.instance.pk,\n key = self.field.key,\n defaults = dict(\n score = obj_score,\n votes = obj_votes,\n )\n )\n if not created:\n score.score = obj_score\n score.votes = obj_votes\n score.save()\n self.score = obj_score\n self.votes = obj_votes\n if commit:\n self.instance.save()", "def score(self, X, y=...):\n ...", "def test_update_score_multiple(self):\r\n self.update_score_multiple()\r\n score = self.openendedmodule.latest_score()\r\n self.assertEquals(score, 1)", "def _adjust_score(self, my_choice, their_choice):\n self._score += p.params['score_matrix'][my_choice][their_choice]\n self._score -= p.params['loss_per_tick']", "def update_g_score(self, value):\n self.g_score = value", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc", "def _update_scoreboard(self, result):\n if result == Moveresult.KEY:\n self.current_turn.keys_collected += 1\n elif result == Moveresult.EXIT:\n self.current_turn.successful_exits += 1", "def set_score(self, points):\n self.score += points", "def update_score(self, data, system):\r\n queuekey = data['queuekey']\r\n score_msg = data['xqueue_body']\r\n # TODO: Remove need for cmap\r\n self._update_score(score_msg, queuekey, system)\r\n\r\n return dict() # No AJAX return is needed\r", "def update_turn_score(self, score):\n\n # Increment the attribute by the passed value\n self._current_score += score", "def score_minus_one():\r\n # TODO: Avoid duplicated code with score_plus_one; have\r\n # both call a single add_to_score function.\r\n postid = request.values.get(\"postid\")\r\n con = get_db()\r\n con.execute(\"\"\"\r\n UPDATE posts SET score=score-1 WHERE postid=?;\r\n \"\"\",\r\n (postid,))\r\n con.commit()\r\n con.close()\r\n return redirect(url_for(\"display_top\"))", "def update_activity_points(self, user_id,score):\n \n print(\"score :\"+str(score))\n\n if score<0:\n self.execute(TABELLE['activity_points']['update']['loose'],(score,user_id,))\n else:\n self.execute(TABELLE['activity_points']['update']['win'],(score, user_id,))", "def score(self, model, context):\n pass", "def add_score(self, score):\n self._score += score", "def score_fn(self):\n raise NotImplementedError()", "def score(self, score):\n\n self._score = score", "def score(self, score):\n\n self._score = score", "def score(self, score):\n\n self._score = score", "def add_score(score):\n global SCORE\n SCORE = SCORE + score\n # update the display\n mvaddstr(1, 2, \"Score:\", color_pair(HEADING_COLOUR) | A_BOLD)\n mvaddstr(1, 9, \"%d\" % SCORE, color_pair(TEXT_COLOUR) | A_BOLD)", "def change_score(self, change: float=1):\n self._score += change", "def change_score(self, change: float = 1):\n self._score += change", "def __init__(self):\r\n self.score = 0", "def get_score(self, a, b):\n ### FILL IN ###", "def get_score(self):\n for response in self.response_list:\n self.score += response.get_score", "def _tally(self, score):\n self._score[self._turn] += score", "def increase_score(self, increase):\n if increase > 0:\n self.__score += increase", "def update_score(self, score_point: int):\r\n self._score_point = score_point\r\n self._update_score() # change the visual display of points for the player\r", "def update_score(self, node, addToScore):\r\n current_score = 0\r\n score_string = self.parser.getAttribute(node, 'gravityScore')\r\n if score_string:\r\n current_score = int(score_string)\r\n\r\n new_score = current_score + addToScore\r\n self.parser.setAttribute(node, \"gravityScore\", str(new_score))", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def update_score(self, node, addToScore):\n current_score = 0\n score_string = self.parser.getAttribute(node, 'gravityScore')\n if score_string:\n current_score = int(score_string)\n\n new_score = current_score + addToScore\n self.parser.setAttribute(node, \"gravityScore\", str(new_score))", "def reset_score(self):\n self._score = p.params['initial_score']", "def update_score_from_cmd(self, new_score, prev_score):\r\n if new_score is None:\r\n return # No change\r\n \r\n player = new_score[0]\r\n score = new_score[1]\r\n player.set_score(score)", "def enter_game_scores():\n pass", "def patch_user_score(user_id):\n user = User.query.get(user_id)\n\n if user is None:\n abort(422)\n\n try:\n\n old_score = user.score\n score = request.json.get(\"score\")\n\n if score is not None:\n user.score += int(score)\n\n user.update()\n\n except AttributeError:\n abort(400)\n\n response = jsonify(\n {\n \"success\": True,\n \"updated_user_id\": user_id,\n \"old_score\": old_score,\n \"new_score\": user.score,\n }\n )\n\n return response", "def updateScore(self,ability,amount):\n abilities = {'str':'strength','dex':'dexterity',\n 'con':'constitution','int':'intelligence',\n 'wis':'wisdom','cha':'charisma',\n 'hp':'hit points'}\n if ability == 'str':\n self.str += amount\n print \"You added {0} point(s) to the {1} stat.\".format(amount,abilities[ability])\n elif ability == 'dex':\n self.dex += amount\n print \"You added {0} point(s) to the {1} stat.\".format(amount,abilities[ability])\n elif ability == 'con':\n self.con += amount\n print \"You added {0} point(s) to the {1} stat.\".format(amount,abilities[ability])\n elif ability == 'int':\n self.int += amount\n print \"You added {0} point(s) to the {1} stat.\".format(amount,abilities[ability])\n elif ability == 'wis':\n self.wis += amount\n print \"You added {0} point(s) to the {1} stat.\".format(amount,abilities[ability])\n elif ability == 'cha':\n self.cha += amount\n print \"You added {0} point(s) to the {1} stat.\".format(amount,abilities[ability])\n elif ability == 'hp':\n self.hp += amount\n print \"You added {0} point(s) to the {1} stat.\".format(amount,abilities[ability])\n else:\n print \"Please use 'str','dex','con','int','wis', or 'cha' as input.\"", "def updateScore(self, node, addToScore):\n currentScore = 0\n scoreString = node.attrib.get('gravityScore')\n if scoreString:\n currentScore = int(scoreString)\n \n newScore = currentScore + addToScore\n node.set(\"gravityScore\", str(newScore))", "def mc_update_scores(scores, board, player):\n dim = board.get_dim()\n winner = board.check_win()\n other_player = provided.switch_player(player)\n \n if winner == provided.DRAW:\n ratio = {player: 0, other_player: 0, 1: 0}\n elif winner == player:\n ratio = {player: 0 + SCORE_CURRENT, other_player: 0 - SCORE_OTHER, provided.EMPTY: 0}\n elif winner == other_player:\n ratio = {player: 0 - SCORE_CURRENT, other_player: 0 + SCORE_OTHER, provided.EMPTY: 0}\t\n \n for valx in range(dim):\n for valy in range(dim): \n scores[valx][valy] += ratio[board.square(valx, valy)] \n return scores", "def score_plus_one():\r\n # Retrieve the query parameter postid\r\n postid = request.values.get(\"postid\")\r\n con = get_db()\r\n con.execute(\"\"\"\r\n UPDATE posts SET score=score+1 WHERE postid=?;\r\n \"\"\",\r\n (postid,))\r\n con.commit()\r\n con.close()\r\n return redirect(url_for(\"display_top\"))", "def update_scoreboard(self):\n self.clear()\n self.goto(-(WIDTH//6), (HEIGHT//2-30))\n self.write(self.l_score, align = 'center', font = ('Courier', 20, 'normal'))\n self.goto((WIDTH//6), (HEIGHT//2-30))\n self.write(self.r_score, align = 'center', font = ('Courier', 20, 'normal'))", "def add_to_score(self, to_add):\n self.score += to_add", "def scoring(self):\n return -100 if self.loss_condition() else 0", "def _score_has_changed(self):\n print('The score for {} has changed'.format(self.team))\n self.relay_controller.activate_solenoid()", "def increase(self, points):\n self.score += points", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def update_score(wrd):\r\n\r\n if wrd not in correct_words.get(0, 'end'):\r\n correct_words.insert(tk.END, wrd)\r\n current = int(score['text'][7:])\r\n new = len(wrd) ** 2\r\n score.configure(text=f\"Score: {current + new}\")\r\n word_display['fg'] = 'green'\r\n else:\r\n word_display['fg'] = 'orange'", "def score(self, X, y):\n ...", "def update_score(self):\n isCrossed = np.any([self.bird.x == (pipe[0] + self.args.pipe_width//2) for pipe in self.env.pipes])\n \n if isCrossed:\n # update the score\n self.score += 1\n # display the new score\n if not self.muteDisplay:\n display_info(self.score, self.highscore, text_handle=self.text_score) \n \n return isCrossed" ]
[ "0.88064134", "0.8144664", "0.77495396", "0.76195884", "0.7563196", "0.74926865", "0.7427741", "0.73870414", "0.7202365", "0.7133871", "0.710654", "0.7085516", "0.70635676", "0.706069", "0.7026134", "0.7020511", "0.70177984", "0.7006921", "0.70053643", "0.6973539", "0.6971714", "0.6970598", "0.69332707", "0.69272965", "0.69267267", "0.69174206", "0.6907406", "0.6886231", "0.6883637", "0.6872622", "0.6848009", "0.68184245", "0.681506", "0.6776789", "0.6765204", "0.6764481", "0.6746868", "0.6742517", "0.67098844", "0.6702237", "0.6699947", "0.6654376", "0.6641059", "0.6635106", "0.66282797", "0.6620385", "0.6603393", "0.66012895", "0.6600429", "0.65999675", "0.65917903", "0.6580032", "0.65773445", "0.6577074", "0.6555604", "0.655079", "0.6542014", "0.65289015", "0.65156394", "0.6503843", "0.64979124", "0.6472847", "0.6472847", "0.6472847", "0.64717746", "0.64697564", "0.6464968", "0.6461404", "0.6458701", "0.64582646", "0.64525", "0.64464414", "0.6445313", "0.64375734", "0.6432673", "0.6432673", "0.6432673", "0.6429818", "0.64144546", "0.6405388", "0.6405321", "0.639378", "0.6390645", "0.639054", "0.63893795", "0.6383756", "0.63830674", "0.63829", "0.63788", "0.63717", "0.63706493", "0.6366984", "0.6366984", "0.6366984", "0.6366984", "0.6366984", "0.6366984", "0.63622904", "0.6360338", "0.63563365" ]
0.63917595
82
return a move for the machine player in the form of a (row, column) tuple.
def mc_move(board, player, trials): grid_of_scores = [[0 for dummy_i in range(board.get_dim())] for dummy_j in range(board.get_dim())] test_board = board.clone() for dummy_i in range(trials): mc_trial(test_board, player) mc_update_scores(grid_of_scores, test_board, player) test_board = board.clone() best_move = get_best_move(board, grid_of_scores) return best_move
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def get_move(board, player):\n row, col = 0, 0\n return row, col", "def get_ai_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def get_ai_move(board, player):\n row, col = 0, 0\n return row, col", "def getMove(self, board):\n pass", "def move(self, row, col, player):", "def get_move(self, board):\n while True:\n col = random.randint(0, board.width)\n row = board.try_move(col)\n\n if row >= 0:\n break\n\n return row, col", "def make_move(board: Connect4Board) -> \"(row, col)\":\r\n\r\n while True:\r\n\r\n try:\r\n\r\n print('\\nPlease Specify your move. Enter the number column of a cell on the board.')\r\n print('-'*85)\r\n \r\n col = Connect4GameUI.move_col(board)\r\n row = Connect4GameUI._get_valid_row(board, col)\r\n print(row,col)\r\n return row, col\r\n\r\n break\r\n\r\n except:\r\n print('\\nInvalid move!!!')\r\n print('Please try it again.')", "def make_move(board, picked_column, player):\n row = find_first_free_cell(board, picked_column)\n board[row][picked_column] = player\n return board, row", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def _get_player_move(self) -> Tile:\n if not self.game_state:\n raise RuntimeError(\"Cannot call get_player_move when the game has not started!\")\n current_player = next(player for player in self.player_list if player.name == self.current_turn.name)\n if current_player is None:\n raise RuntimeError(\"Attempted to get player move from a player who does not exist!\")\n return current_player.move()", "def get_ai_move(board):\n return Connect4MiniMax.get_move(board)", "def get_move(moves):\n pass", "def get_move(self):\n if self._difficulty == 0:\n return self._get_easy_move()\n else:\n # Different stategies/difficulties can be attached here\n return", "def move_wrapper(board, player, trials):\r\n move = mm_move(board, player)\r\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\r\n return move[1]", "def move_wrapper(board, player, trials):\r\n move = mm_move(board, player)\r\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\r\n return move[1]", "def get_move(self, board, possible_moves, player_1_or_2):\n\n # Given a Tic-Tac-Toe 3x3 board position where 1 => current player's square,\n # -1 => opponent's square, 0 => blank square,\n # this will return the current player's best move [as the x and y indexes into \n # the board array.]\n # The second input parameter, player_1_or_2, is 1 or -1 to indicate which player's\n # move it is. \n \n print('RL ~ Current player 1 or 2 (= -1):', player_1_or_2)\n \n print('RL ~ Current board: ')\n print(board)\n \n print('RL ~ possible_moves:', possible_moves)\n\n next_move = () \n\n # This will be the best move i.e. the move with the current\n # value of highest winning probability except when it is making exploratory\n # (as opposed to greedy) moves.\n\n next_move = self.board_position_states.get_next_move(board, possible_moves, self.current_player)\n\n next_move_location_tuple = possible_moves[next_move]\n board[next_move_location_tuple] = self.current_player\n\n self.list_board_positions_moved_to.append(board.copy()) # This board that we are\n # appending here could be changed by the next line of code, for example.\n # Hence we need to make a copy\n\n board[next_move_location_tuple] = 0 # undo the move in case it affects the calling method.\n\n return next_move", "def move_wrapper(board, player, trials):\n move = mm_move(board, player)\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\n return move[1]", "def move_wrapper(board, player, trials):\n move = mm_move(board, player)\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\n return move[1]", "def move(self, row: int, col: int, player: int) -> int:\n n = self.n\n if player == 1:\n self.rows_1[row] += 1\n self.cols_1[col] += 1\n if player == 2:\n self.rows_2[row] += 1\n self.cols_2[col] += 1\n if row == col:\n self.diag1[row] = player\n if row + col + 1 == n:\n self.diag2[row] = player\n f = 0\n g = 0\n for i in range(n):\n if self.rows_1[row] == n or self.cols_1[col] == n:\n return 1\n if self.rows_2[row] == n or self.cols_2[col] == n:\n return 2 \n if self.diag1[i] != self.diag1[0]:\n f = 1\n if self.diag2[i] != self.diag2[0]:\n g = 1\n if f == 0:\n return self.diag1[0]\n if g == 0:\n return self.diag2[0]\n return 0", "def getOpponentMove(move, playerBoard, oppBoard, playerSeeds, oppSeeds):\r\n pass", "def move(self, row, col, player):\r\n if player == 1:\r\n self.mat[row][col] = 1\r\n else:\r\n self.mat[row][col] = -1\r\n if self.checkrow(player,row) or self.checkcol(player,col):\r\n return player\r\n if row == col or row + col == self.size-1:\r\n if self.checkdiag(player):\r\n return player\r\n return 0", "def get_move(self, game):\n return", "def _get_move(self) -> Tile:\n if not self.game_state:\n raise RuntimeError(\"Cannot call get_move when the game has not started!\")\n if isinstance(self.current_turn, Player):\n return self._get_player_move()\n elif isinstance(self.current_turn, Enemy):\n return self._get_enemy_move()\n else:\n raise TypeError(\"You're trying to move something that isn't a character or an adversary.\")", "def getMove(self, board):\r\n raise NotImplementedError(\"must be implemented in subclass\")", "def human_move(self):\n move = -1\n while move < 1 or move > self.BOARD.COLUMNS:\n try:\n move = input(\"{}: Choose a column>>> \".format(self.NAME))\n\n for i in self.QUIT:\n if str(move) == i:\n return None\n\n move = int(move)\n\n except KeyboardInterrupt:\n exit(0)\n except ValueError:\n pass\n if self.PIECE_COUNT <= 0:\n # cannot do anything\n self.STATE == Spectator.State.INACTIVE\n return None\n else:\n return move", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def firstMove(board):\r\n x = board.size / 2\r\n return (x, x)", "def get_player_move(self, roundNum, player):\n return (self.moves[roundNum])[player]", "def get_move(board, player):\n #optimization to always pick the top-left corner on an empty board\n if set(board) == set([config.NO_PLAYER]):\n return 0\n result = minimax(board, player, 2, config.NEG_INF, config.INF)\n return result.index", "def computer_move(board,move,player):\r\n com_execution(board, move, player)", "def next_move(self, board):\n \n return self.best_move(self.score_columns(board))", "def mm_move(board, player):\r\n if board.check_win() == provided.PLAYERX:\r\n return SCORES[provided.PLAYERX],(-1,-1)\r\n elif board.check_win() == provided.PLAYERO:\r\n return SCORES[provided.PLAYERO],(-1,-1)\r\n elif board.check_win() == provided.DRAW:\r\n return SCORES[provided.DRAW],(-1,-1)\r\n else:\r\n empty_tuple_list = board.get_empty_squares()\r\n score_pos_tuple_list = []\r\n best_score = None\r\n best_pos = None\r\n for idx1 in range(len(empty_tuple_list)):\r\n empty_tuple = empty_tuple_list[idx1]\r\n board_clone = board.clone()\r\n board_clone.move(empty_tuple[0],empty_tuple[1],player)\r\n score_pos_tuple = mm_move(board_clone,provided.switch_player(player))\r\n score_pos_tuple_list.append(score_pos_tuple)\r\n\r\n #decide best score and pos fast!!!\r\n if score_pos_tuple[0]*SCORES[player] == 1:\r\n return (score_pos_tuple[0],empty_tuple)\r\n\r\n #decide best score and pos\r\n for idx2 in range(len(score_pos_tuple_list)):\r\n if idx2 == 0:\r\n best_score = score_pos_tuple_list[idx2][0]\r\n best_pos = empty_tuple_list[idx2]\r\n else:\r\n if score_pos_tuple_list[idx2][0]*SCORES[player] > best_score*SCORES[player]:\r\n best_score = score_pos_tuple_list[idx2][0]\r\n best_pos = empty_tuple_list[idx2]\r\n\r\n return (best_score,best_pos)", "def move(self, row, col, player):\n if self.winning == True:\n return\n self.matrix[row][col] = player\n n = len(self.matrix)\n indicator = True\n for i in range(n):\n if self.matrix[row][i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n \n indicator = True\n for i in range(n):\n if self.matrix[i][col] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n \n if row == col:\n indicator = True\n for i in range(n):\n if self.matrix[i][i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n if row + col == n - 1:\n indicator = True\n for i in range(n):\n if self.matrix[i][n - 1 - i] != player:\n indicator = False\n break\n if indicator == True:\n self.winning = True\n return player\n return 0", "def mm_move(board, player): \r\n if board.check_win() != None:\r\n score = SCORES[board.check_win()]\r\n return score, (-1,-1)\r\n else:\r\n best_score = -2\r\n score_list = []\r\n move_list = []\r\n for each_cell in board.get_empty_squares():\r\n passboard = board.clone()\r\n passboard.move(each_cell[0], each_cell[1], player) \r\n other_player = provided.switch_player(player)\r\n nextmove = mm_move(passboard, other_player)\r\n score_list.append(nextmove[0])\r\n move_list.append(nextmove[1])\r\n if nextmove[0] == SCORES[player]:\r\n return nextmove[0], each_cell\r\n #print score_list\r\n #print move_list\r\n #print \"\"\r\n if player == provided.PLAYERX:\r\n best_score = max(score_list)\r\n else:\r\n best_score = min (score_list)\r\n best_move = move_list[score_list.index(best_score)]\r\n return best_score, best_move", "def simul_move(self, column, player):\n\n new_rack = [x[:] for x in self.layout] #copy this state's layout\n #travel down this column\n r = 0\n while new_rack[column][r] !=0:\n r+=1\n if r >= self.height:\n return None\n new_rack[column][r] = player\n new_state = State(new_rack, player)\n return new_state", "def get_move(self, board):\n # First, check if we can win in the next move\n winning_move = self.get_winning_move(board, self.letter)\n if winning_move is not None:\n return winning_move\n # Check if the player could win on their next move, and block them.\n blocking_move = self.get_winning_move(board, self.opponent_letter)\n if blocking_move is not None:\n return blocking_move\n # Try to take one of the corners, if they are free.\n corner_move = self.move_in_a_corner(board)\n if corner_move is not None:\n return corner_move\n # Try to take the center, if it is free.\n if board.size % 2 == 1:\n if board.is_position_availible(board.letters[board.size // 2]\n + board.numbers[board.size // 2]):\n return board.letters[board.size // 2] + board.numbers[board.size // 2]\n # Move on one of the sides.\n return self.choose_random_move_from_list(board, list(board.positions.keys()))", "def find_player(self):\n for y, line in enumerate(self.maze):\n for x, character in enumerate(line):\n if character == \"m\":\n return y, x\n return None", "def mm_move(board, player):\n moves = []\n results = []\n best_score = None\n best_move = None\n \n opponet = op_player(player)\n \n if board.check_win() != None:\n \n if board.check_win() == provided.PLAYERX:\n return SCORES[provided.PLAYERX] , (-1, -1)\n \n if board.check_win() == provided.PLAYERO:\n return SCORES[provided.PLAYERO] , (-1, -1)\n \n if board.check_win() == provided.DRAW:\n return SCORES[provided.DRAW] , (-1, -1)\n \n free_steps = board.get_empty_squares()\n \n for step in free_steps:\n clone = board.clone() \n clone.move(step[0],step[1],player)\n temp = mm_move(clone,opponet)\n \n if temp != None:\n if temp[0] == SCORES[player]: \n return temp[0] , step \n else: \n results.append(temp)\n moves.append(step)\n \n for result, move in zip(results, moves): \n if result[0] * SCORES[player] > best_score:\n best_score = result[0]\n best_move = move\n return best_score, best_move", "def next_move(board, player):\n \n move_row = \"move\"\n move_column = \"move\"\n\n while not move_row.isnumeric():\n move_row = input(\"{}, pick row to place your {}. > \".format(player.name, player.char))\n while not move_column.isnumeric(): \n move_column = input(\"Pick column in row {} to place your {}. > \".format(move_row, player.char))\n\n move_row = int(move_row)\n move_column = int(move_column)\n\n move = Move(player, (move_row, move_column))\n \n # Check if move is out of bounds\n if (move_row >= len(board.current_board) or\n move_column >= len(board.current_board)):\n print(\"Move out of bounds. Choose a valid move.\")\n return board\n\n # Check if space is already used\n if board.current_board[move_row][move_column] != \"-\":\n print(\"Spot already played. Pick an unused space.\")\n return board\n\n board.last_move = player.name\n board.add_move(move)\n\n return board", "def parse_move(move):\n if not (len(move) == 2):\n return None, None\n try:\n row = ord(move[0].upper()) - 65\n col = int(move[1])\n except:\n return None, None\n return row, col", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def move(self, row, col, player):\n value = (1.5 - player) * 2\n self.rows[row] += value\n self.colums[col] += value\n if row == col:\n self.diag[0] += value\n if row + col == self.n-1:\n self.diag[1] += value\n if abs(self.rows[row]) == self.n or abs(self.colums[col]) == self.n or abs(self.diag[0]) == self.n or abs(self.diag[1]) == self.n:\n return player\n return 0", "def get_moves(self, board, player):\r\n width, height = self.board_size\r\n return self.get_moves_c(board, player, width, height)", "def firstMove(self):\n return (10, 10)", "def getMove(self, board):\r\n moves = self._getAvailableActions(board)\r\n return moves[-1]", "def moved_board(board):\n return legal_move_on(board=board).map(\n lambda (start, end): board.move(start=start, end=end),\n )", "def get_move(self, game_state: BotGameState) -> BotMove:\n return", "def move(self):\r\n his_move = random.randint(0, 2)\r\n return the_moves[his_move]", "def execute(self, row, col, action=None):\n assert action is not None, \"No action selected!\"\n\n if action == 'north':\n if (row-1) < 0 or self.board[row-1, col] == '*':\n return row, col\n elif action == 'south':\n if (row+1) >= self.N or self.board[row+1, col] == '*':\n return row, col\n elif action == 'east':\n if (col+1) >= self.M or self.board[row, col+1] == '*':\n return row, col\n elif action == 'west':\n if (col-1) < 0 or self.board[row, col-1] == '*':\n return row, col\n\n return row + self.step_row[action], col + self.step_col[action]", "def move(self, row: int, col: int, player: int) -> int:\n s = -1 if player == 1 else 1\n\n self.rows[row] += s\n if abs(self.rows[row]) == self.n:\n return player\n\n self.cols[col] += s\n if abs(self.cols[col]) == self.n:\n return player\n\n if row == col:\n self.diagonals[0] += s\n if abs(self.diagonals[0]) == self.n:\n return player\n\n if (row + col) == self.n - 1:\n self.diagonals[1] += s\n if abs(self.diagonals[1]) == self.n:\n return player\n\n return 0", "def move(self, player, row, col):\n\t\tif player != self.turn: # not this player's turn\n\t\t\tprint(\"Oops! It is not your turn, Player%d\" % player)\n\t\t\treturn\n\t\tif self.state[row][col]: # already a move in this position\n\t\t\tprint(\"Oops! It seems that someone has already moved here, Player%d\" % player)\n\t\t\treturn\n\t\telse: # empty spot\n\t\t\tself.state[row][col] = player\n\t\t\tself.checkForWin(self.state, player)\n\t\t\tself.changeTurn()", "def _get_enemy_move(self) -> Tile:\n if not self.game_state:\n raise RuntimeError(\"Cannot call get_enemy_move when the game has not started!\")\n current_enemy = next(enemy for enemy in self.enemy_list if enemy == self.current_turn)\n if current_enemy is None:\n raise RuntimeError(\"Attempted to get a move from a nonexistent enemy!\")\n return current_enemy.move()", "def move(self, row, col, player):\n offset = player * 2 - 3 # 1 or -1\n self.row[row] += offset\n self.col[col] += offset\n if row == col:\n self.diag += offset\n if row + col == self.n - 1:\n self.anti_diag += offset\n if self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 2\n if -self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 1\n return 0", "def move(self, row, col, player):\n score = 1 if player == 1 else -1\n self.rows[row] += score\n self.cols[col] += score\n if row == col:\n self.diagonal1 += score\n if row + col == self.n - 1:\n self.diagonal2 += score\n win = {self.rows[row], self.cols[col], self.diagonal1, self.diagonal2}\n if self.n in win or -self.n in win:\n return player\n return 0", "def get_move(self, i):\n # Exception if not (0 <= i < self.length)\n return self._moves[i]", "def move(self, board):\n winning_move = self.find_winning_move(board)\n if winning_move != -1:\n return winning_move\n\n blocking_move = self.find_blocking_move(board)\n if blocking_move != -1:\n return blocking_move\n\n if board[4] == \"4\": # center square is open\n return 4\n else:\n return self.prng.choice(board.available())", "def getMove(self):\n while True:\n try:\n init = tuple(int(str.strip()) for str in raw_input('Choose the initial position of your move: ').split(','))\n break\n except ValueError:\n print(\"Input is not integer.\")\n\n while (len(init) != 2) or (init[0] not in range(1, self.grid.width+1)) or (init[1] not in range(1, self.grid.height+1)):\n print 'Initial position is not valid.'\n init = tuple(int(str.strip()) for str in raw_input('Choose the initial position of your move: ').split(','))\n\n while True:\n try:\n dest = tuple(int(str.strip()) for str in raw_input('Choose the destination position of your move: ').split(','))\n break\n except ValueError:\n print(\"Input is not integer.\")\n\n while (len(dest) != 2) or (dest[0] not in range(1, self.grid.width+1)) or (dest[1] not in range(1, self.grid.height+1)):\n print 'Destination position is not valid.'\n dest = tuple(int(str.strip()) for str in raw_input('Choose the destination position of your move: ').split(','))\n\n return (init, dest)", "def get(self, layer, row, column):\n if layer < 0 or row < 0 or column < 0:\n raise game.InvalidMoveException('The position ({}) is outside of the board'.format([layer, row, column]))\n try:\n return self._state['visible']['board'][layer][row][column]\n except:\n raise game.InvalidMoveException('The position ({}) is outside of the board'.format([layer, row, column]))", "def get_move(self, board, color_to_play):\n move = self.MCTS.get_move(board, color_to_play, self.n_simualtions_per_move, self.exploration)\n self.update(move)\n return move", "def make_move(board, player_num, row, col):\n board[row][col] = 'X' if player_num == 1 else 'O'", "def move(self, row, col, player):\n score = self.score[player]\n win_score = self.win[player]\n self.rows[row] += score\n if self.rows[row] == win_score:\n return player\n\n self.columns[col] += score\n if self.columns[col] == win_score:\n return player\n if col - row == 0:\n self.diagonal[1] += score\n if self.diagonal[1] == win_score:\n return player\n if col + row == self.size - 1:\n self.diagonal[0] += score\n if self.diagonal[0] == win_score:\n return player\n return 0", "def move(self, row, col, player):\n toadd = 1 if player == 1 else -1\n \n self.row[row] += toadd\n self.col[col] += toadd\n if row == col: self.diagonal += toadd\n if col == self.n - row -1 : self.antidiag += toadd\n \n if abs(self.row[row]) == self.n or abs(self.col[col]) == self.n or abs(self.diagonal) == self.n or abs(self.antidiag) == self.n:\n return player\n else:\n return 0", "def move(self, row: int, col: int, player: int) -> int:\n if player == 1:\n self.newList[row][col] = 1\n self.colSum[col] += 1\n self.rowSum[row] += 1\n if row == col:\n self.diag += 1\n if row + col == (self.n - 1):\n self.revDiag += 1\n if self.rowSum[row] == self.n or self.colSum[col] == self.n or self.diag == self.n or self.revDiag == self.n:\n return 1\n if player == 2:\n self.newList[row][col] = -1\n self.colSum[col] -= 1\n self.rowSum[row] -= 1\n if row == col:\n self.diag -= 1\n if row + col == (self.n - 1):\n self.revDiag -= 1\n if self.rowSum[row] == -self.n or self.colSum[col] == -self.n or self.diag == -self.n or self.revDiag == -self.n:\n return 2\n \n return 0", "def make_move(self, board: Board) -> int:\n move, evalutation = self.minimax(board, -math.inf, math.inf, self._depth, 1)\n return move", "def move(self, direction: Direction) -> \"TilePosition\":\r\n return TilePosition(self.tile_x + direction.dx, self.tile_y + direction.dy)", "def make_move(self, column):\r\n trans_board = numpy.transpose(self.__board[::1]) # transpose the\r\n # board so that columns are now arrays\r\n if 0 not in trans_board[column] or self.get_winner() or column >= \\\r\n self.BOARD_COLUMNS or column < 0:\r\n # column is full, illegal or the game is already finished\r\n return self.ILLEGAL_MOVE # exception?\r\n else:\r\n reversed_col = list(reversed(trans_board[column]))\r\n for hole in reversed_col:\r\n if hole == 0:\r\n row_i = self.BOARD_ROWS - 1 - reversed_col.index(hole)\r\n self.__board[row_i][column] = self.__cur_player\r\n winner = self.get_winner()\r\n if winner: # is not none\r\n return winner\r\n self.__switch_player()", "def move(self):\r\n move = None\r\n if self.last_move is None:\r\n move = rockyman.move(self)\r\n else:\r\n index = the_moves.index(self.last_move) + 1\r\n if index >= len(the_moves):\r\n index = 0\r\n move = the_moves[index]\r\n self.last_move = move\r\n return move", "def result(self, row, col, move):\n start = (row, col)\n end = self.updateCell(row, col, move)\n\n return self.change(start, end)", "def GetMove(self, board):\n move = None\n while True:\n move = input(\"Enter coordinates as XY (e.g. 21): \")\n if board[Game.GetIndexFromCoords(*move)] == \" \":\n return Game.GetIndexFromCoords(*move)\n else:\n print(\"Space occupied.\")", "def next_move(self, board):\r\n scores = self.scores_for(board)\r\n self.num_moves += 1\r\n return self.max_score_column(scores)", "def __get_random_player_position(self) -> Tuple[int, int]:\n no_player_position = True\n while no_player_position:\n for row in range(0, self.__labyrinth.labyrinth_height):\n for col in range(0, self.__labyrinth.labyrinth_width):\n if self.__labyrinth[row][col] == Labyrinth.FLOOR and no_player_position:\n self.__row_position = row\n self.__col_position = col\n\n if len(self.__path_to_end()) > self.__labyrinth.labyrinth_width and \\\n len(self.__path_to_end()) > self.__labyrinth.labyrinth_height:\n self.__labyrinth[row][col] = Labyrinth.START\n no_player_position = False\n\n return self.__row_position, self.__col_position", "async def move(self, board, valid_actions):\n self._move = None\n output_move_row = Value('d', -1)\n output_move_column = Value('d', 0)\n try:\n # await self.search(board, valid_actions) \n p = Process(\n target=self.search, \n args=(\n self._color, board, valid_actions, \n output_move_row, output_move_column))\n p.start()\n while p.is_alive():\n await asyncio.sleep(0.1)\n self._move = np.array([output_move_row.value,output_move_column.value],dtype=np.int32)\n except asyncio.CancelledError as e:\n print('The previous player is interrupted by a user or a timer.')\n except Exception as e:\n print(type(e).__name__)\n print('move() Traceback (most recent call last): ')\n traceback.print_tb(e.__traceback__)\n finally:\n p.kill()\n self._move = np.array(\n [output_move_row.value, output_move_column.value],\n dtype=np.int32)\n return self.best_move", "def game_move(self):\n\t\t# make a note of the player who isn't playing\n\t\tfor x in self.players.keys():\n\t\t\tif x != self.nextPlayer:\n\t\t\t\totherPlayer = x\n\t\t\t\tbreak\n\t\t\n\t\t\n\t\t# If there are no remaining moves for this player, either the other\n\t\t# player has won or it's a draw\n\t\t# self.expansions = 1\n\t\tself.expansionCounter.count = 1\n\t\tif len(self.state.successors()) == 0:\n\t\t\tif self.state.is_win(otherPlayer):\n\t\t\t\treturn (None, otherPlayer)\n\t\t\telse:\n\t\t\t\t# None, None for a draw\n\t\t\t\treturn (None, None)\n\t\t\t\n\t\t# allow the player max_expansions for this turn\n\t\t# self.expansions = self.max_expansions\n\t\tself.expansionCounter.count = self.max_expansions\n\t\t\n\t\tnextPlayer = self.players[self.nextPlayer]\n\t\tlastPlayer = None\n\t\t\n\t\t# player may throw an exception\n\t\ttry:\n\t\t\t# get player's move, make sure we don't modify the current state\n\t\t\tmove = nextPlayer.move(self.state.get_player_state(self.nextPlayer), \n\t\t\t\t\t self.visitedStates)\n\t\t\t# player may give up\n\t\t\tif move.is_forfeit():\n\t\t\t\tprint \"Player\", self.nextPlayer, \"forfeits.\"\n\t\t\t\treturn (None, otherPlayer)\n\t\t\t# player may return illegal move\n\t\t\tif not self.state.is_valid_move(move):\n\t\t\t\tprint \"Illegal move returned by player\", self.nextPlayer, \\\n\t\t\t\t\t\t\"(\", self.players[self.nextPlayer].get_name(), \")\"\n\t\t\t\treturn (move, otherPlayer)\n\t\t\t# this player is now last player\n\t\t\tlastPlayer = self.nextPlayer\n\t\t\t# get the new next player and make the indicated move\n\t\t\tself.nextPlayer, clear = self.state.move(move, True)\n\t\t\tif clear:\n\t\t\t\tself.clear_repeat()\n\t\texcept:\n\t\t\tprint \"Exception thrown by player\", self.nextPlayer, \\\n\t\t\t\t\t\t\"(\", self.players[self.nextPlayer].get_name(), \")\"\n\t\t\tprint\n\t\t\ttraceback.print_exc()\n\t\t\tprint\n\t\t\treturn (None, otherPlayer)\n\t\t\n\t\tos.chdir(self.wd)\n\t\t\n\t\t# may be a repeated state IF the game cycles\n\t\tif self.is_repeat(self.state):\n\t\t\tself.state.handle_cycle()\n\t\t# otherwise, if the game cycles, note that we've been here\n\t\telif self.state.repeats():\n\t\t\tself.visitedStates.add(self.state.repeated_rep())\n\t\t\t\n\t\t# player may have sacrificed the game\n\t\tif self.state.is_win(otherPlayer):\n\t\t\treturn (move, otherPlayer)\n\t\t\n\t\t# player may have won\n\t\tif self.state.is_win(lastPlayer):\n\t\t\treturn (move, lastPlayer)\n\t\t\n\t\t# nobody's won or lost yet\n\t\treturn (move, None)", "def get_current_move(self):\n x_count = self.game_board.count('X')\n o_count = self.game_board.count('O')\n if x_count <= o_count:\n return 'X'\n return 'O'", "def get_next_move(self):\n return int(input('Enter your move: '))", "def _calculate_move_location(self, direction):\n current_row = self._current_loc.get_row()\n current_column = self._current_loc.get_column()\n\n # Calculate the new location for a left move\n if (direction == \"l\"):\n return Location(current_row, current_column - 1)\n # Calculate the new location for an up move\n elif (direction == \"u\"):\n return Location(current_row - 1, current_column)\n # Calculate the new location for a right move\n elif (direction == \"r\"):\n return Location(current_row, current_column + 1)\n # Calculate the new location for a down move\n elif (direction == \"d\"):\n return Location(current_row + 1, current_column)\n return Location()", "def get_winning_move(self, board, given_letter):\n diagonal_1 = board.check_diagonal_1(given_letter)\n if diagonal_1[0] == board.size - 1:\n if board.is_position_availible(diagonal_1[1][0]):\n return diagonal_1[1][0]\n\n diagonal_2 = board.check_diagonal_2(given_letter)\n if diagonal_2[0] == board.size - 1:\n if board.is_position_availible(diagonal_2[1][0]):\n return diagonal_2[1][0]\n\n for number in board.numbers:\n row = board.check_row(number, given_letter)\n if row[0] == board.size - 1:\n if board.is_position_availible(row[1][0]):\n return row[1][0]\n\n for letter in board.letters:\n column = board.check_column(letter, given_letter)\n if column[0] == board.size - 1:\n if board.is_position_availible(column[1][0]):\n return column[1][0]\n return None", "def move(self, board):\n\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def get_move(self, game, time_left):\r\n legal_moves = game.get_legal_moves()\r\n if not legal_moves:\r\n return (-1, -1)\r\n\r\n print(game.to_string()) #display the board for the human player\r\n print(('\\t'.join(['[%d] %s' % (i, str(move)) for i, move in enumerate(legal_moves)])))\r\n\r\n valid_choice = False\r\n while not valid_choice:\r\n try:\r\n index = int(input('Select move index:'))\r\n valid_choice = 0 <= index < len(legal_moves)\r\n\r\n if not valid_choice:\r\n print('Illegal move! Try again.')\r\n\r\n except ValueError:\r\n print('Invalid index! Try again.')\r\n\r\n return legal_moves[index]", "def get_move(self, game, time_left):\r\n legal_moves = game.get_legal_moves()\r\n if not legal_moves:\r\n return (-1, -1)\r\n\r\n print(game.to_string()) #display the board for the human player\r\n print(('\\t'.join(['[%d] %s' % (i, str(move)) for i, move in enumerate(legal_moves)])))\r\n\r\n valid_choice = False\r\n while not valid_choice:\r\n try:\r\n index = int(input('Select move index:'))\r\n valid_choice = 0 <= index < len(legal_moves)\r\n\r\n if not valid_choice:\r\n print('Illegal move! Try again.')\r\n\r\n except ValueError:\r\n print('Invalid index! Try again.')\r\n\r\n return legal_moves[index]", "def makeMove(self, move, player):", "def get_move(self, game, time_left):\n legal_moves = game.get_legal_moves()\n if not legal_moves:\n return (-1, -1)\n\n print(game.to_string()) # display the board for the human player\n print(('\\t'.join(['[%d] %s' % (i, str(move)) for i, move in enumerate(legal_moves)])))\n\n valid_choice = False\n while not valid_choice:\n try:\n index = int(input('Select move index:'))\n valid_choice = 0 <= index < len(legal_moves)\n\n if not valid_choice:\n print('Illegal move! Try again.')\n\n except ValueError:\n print('Invalid index! Try again.')\n\n return legal_moves[index]", "def choose_absolute_move(self):\n move = self.choose_move()\n if self.player_name == 'A':\n return move\n # Player B, revert the IDs\n return (move + 6) % 12", "def getMove(player):\n\n\tsquares = { \"1\":1, \"2\":2, \"3\":3, \"4\":4, \"5\":5, \"6\":6, \"7\":7, \"8\":8, \"9\":9 }\n\tchoice = input(\"Player \" + str(player + 1) + \", pick a square (1-9): \")\n\ttry:\n\t\treturn squares[choice]\n\texcept KeyError:\n\t\tpass", "def getMove(self, grid):\n\n cells = grid.getAvailableCells()\n\n while True:\n moveInput = input(\"Enter your move: \")\n \n if re.match(r\"place \\d,\\d\", moveInput) or re.match(r\"erase \\d,\\d\", moveInput):\n move = moveInput.split()\n action = move[0]\n pos = move[1].split(',')\n\n if (action == \"place\" and (int(pos[0]), int(pos[1])) in cells) or (action == \"erase\" and grid.getCellValue((int(pos[0]), int(pos[1]))) != 'T'):\n return [move[0], (int(pos[0]), int(pos[1]))]\n \n elif moveInput == \"restart\":\n return -1\n \n elif moveInput == \"show solution\":\n return 0\n \n print(\"Move not valid\")", "def move(self, row, col, player):\n if self.winning == True:\n return\n if player == 1:\n val = 1\n else:\n val = -1\n self.row[row] += val\n self.col[col] += val\n if row == col:\n self.diagonal += val\n n = len(self.row)\n if row + col == n - 1:\n self.antidiagonal += val\n if abs(self.row[row]) == n or abs(self.col[col]) == n or abs(self.diagonal) == n or abs(self.antidiagonal) == n:\n self.winning = True\n return player\n return 0", "def get_next_move(self):\n if self.move == 'X':\n return 'O'\n return 'X'", "def get_move(self, board):\n\n valid_moves = [move for move in board.legal_moves]\n is_valid_move = False\n while not is_valid_move:\n move = input(\"Enter a valid move in uci format: \").lower()\n if len(move) == 4 or len(move) == 5:\n try:\n player_move = chess.Move.from_uci(move)\n\n if board.is_legal(player_move):\n try:\n board.push(player_move)\n return player_move\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")", "def _EAN_coords_to_board_coords(EAN_move: str) -> (int, int):\n assert EAN_move[0] in \"abcdefgh\" and EAN_move[1] in \"12345678\", \"failed to get \" + EAN_move\n\n\n col = ord(EAN_move[0]) - ord('a')\n row = 8 - int(EAN_move[1])\n return row, col", "def move(self, board):\n return self.prng.choice(board.available())", "def getBoardInfo(self):\n return self.my_pos, self.opp_pos", "def make_move(self, x, y):\n player = self.get_player()\n self.__grid[y][x] = player\n\n winner, win_tiles = self.check_move(self.get_player(), x, y)\n\n self.__turns_played += 1\n\n # Check if winner has been found\n if player == winner:\n loser = MarkerType(1 - winner.value)\n self.__winner = winner\n self.__loser = loser\n self.__state = GameState.WINNER\n return GameState.WINNER, winner, loser, win_tiles\n\n # Check if board is full and tie happens\n elif self.__turns_played >= Settings.SIZE_X * Settings.SIZE_Y:\n self.__state = GameState.TIE\n return GameState.TIE, MarkerType.NONE, MarkerType.NONE, []\n\n self.__turn += 1\n return GameState.PLAYING, MarkerType.NONE, MarkerType.NONE, []", "def getMove(self, board):\r\n self.thisNumTurns += 1\r\n moves = self._getAvailableActions(board)\r\n return moves[random.randint(len(moves))]", "def move(self, row: int, col: int, player: int) -> int:\n self.board[row][col] = 1 if player == 1 else -1\n rowsum = sum(self.board[row])\n colsum = sum([self.board[r][col] for r in range(self.n)])\n diagsum1 = sum([self.board[i][i] for i in range(self.n)])\n diagsum2 = sum([self.board[i][-i-1] for i in range(self.n)])\n if player == 1:\n if rowsum == self.n or colsum == self.n or diagsum1 == self.n or diagsum2 == self.n:\n return 1\n else:\n if rowsum == -self.n or colsum == -self.n or diagsum1 == -self.n or diagsum2 == -self.n:\n return 2\n return 0", "def move(self, board):\n if self.name == \"Combination_Easy\":\n return self.alpha_beta_search(board, 1)\n elif self.name == \"Combination_Normal\":\n return self.alpha_beta_search(board, 2)\n elif self.name == \"Combination_Hard\":\n return self.alpha_beta_search(board, 3)\n elif self.name == \"static\":\n return self.static_player(board)\n elif self.name == \"parity\":\n return self.parity_player(board)\n elif self.name == \"mobility\":\n return self.mobility_player(board)\n elif self.name == \"pmobility\":\n return self.potential_mobility_player(board)\n elif self.name == \"corners\":\n return self.corners_player(board)\n elif self.name == \"stability\":\n return self.stability_player(board)", "def move(self, row: int, col: int, player: int) -> int:\n if not self.winner:\n p = player - 1\n \n self.rows[p][row] += 1\n self.cols[p][col] += 1\n if row == col:\n self.d[p] += 1\n if row + col + 1 == self.n:\n self.subd[p] += 1 \n \n if self.n in [self.rows[p][row], self.cols[p][col], self.d[p], self.subd[p]]:\n self.winner = player\n \n return self.winner", "def make_move(self, move, disc):\n column = move - 1\n board = self.game[\"board\"]\n # Check if this row is already full\n if board[column][0] != self.EMPTY:\n return None\n # Drop disc\n for idx, cell in enumerate(board[column]):\n if cell != self.EMPTY:\n row = idx - 1\n break\n else:\n row = idx\n board[column][row] = disc\n return (column, row)", "def get_position(self, row, column):\n position_key = \"{}{}\".format(row, column)\n return self.positions[position_key]", "def next_move(self, board):\r\n lc = [x for x in range(board.width) if board.can_add_to(x)]\r\n column = random.choice(lc)\r\n self.num_moves += 1\r\n return column", "def move(self, position, direction):\n i, j = position\n direction %= 360\n if direction == 0:\n return (i - 1, j)\n if direction == 90:\n return (i, j + 1)\n if direction == 180:\n return (i + 1, j)\n if direction == 270:\n return (i, j - 1)\n raise ValueError(f\"Maze.move called with bad angle = {direction}\")" ]
[ "0.84373444", "0.8416715", "0.7964209", "0.79255193", "0.72848445", "0.7280993", "0.7246106", "0.7221404", "0.7153344", "0.7151742", "0.70832056", "0.7001192", "0.689266", "0.6870447", "0.6862894", "0.6862894", "0.6854077", "0.684734", "0.684734", "0.6843987", "0.68329155", "0.68203247", "0.6770778", "0.6714817", "0.67132074", "0.66938347", "0.669041", "0.6622448", "0.66188836", "0.6602306", "0.65994644", "0.6589012", "0.65562034", "0.6551946", "0.65515727", "0.65489584", "0.6544551", "0.6539214", "0.65345335", "0.6526475", "0.6521857", "0.6515578", "0.65070546", "0.64927363", "0.64697915", "0.6465213", "0.6464289", "0.64480877", "0.6437453", "0.6435053", "0.6434878", "0.6431331", "0.641295", "0.6409268", "0.6385621", "0.6369034", "0.6358359", "0.6352995", "0.6352646", "0.6351971", "0.6348789", "0.63423693", "0.63404405", "0.6333213", "0.6312075", "0.63040465", "0.630036", "0.6291445", "0.6261867", "0.62618023", "0.6258399", "0.6254781", "0.62541693", "0.6254043", "0.62482053", "0.62440354", "0.6238099", "0.62331426", "0.6218492", "0.6208191", "0.6208191", "0.6203384", "0.6197345", "0.619701", "0.6196607", "0.6177217", "0.6176298", "0.6174279", "0.617231", "0.6167701", "0.61575675", "0.6144902", "0.6144145", "0.614271", "0.61398363", "0.61383766", "0.61254436", "0.6124587", "0.612425", "0.6113153", "0.61121047" ]
0.0
-1
Train the crf tagger based on the training data.
def _train_model(self, df_train): # type: (List[List[Tuple[Text, Text, Text, Text]]]) -> None import sklearn_crfsuite X_train = [self._sentence_to_features(sent) for sent in df_train] y_train = [self._sentence_to_labels(sent) for sent in df_train] from itertools import chain import nltk import sklearn import scipy.stats from sklearn.metrics import make_scorer from sklearn.model_selection import cross_val_score from sklearn.model_selection import RandomizedSearchCV import sklearn_crfsuite from sklearn_crfsuite import scorers from sklearn_crfsuite import metrics X_train = [self._sentence_to_features(sent) for sent in df_train] y_train = [self._sentence_to_labels(sent) for sent in df_train] if self.component_config["grid_search"]: self.ent_tagger = sklearn_crfsuite.CRF( algorithm='lbfgs', # stop earlier max_iterations=self.component_config["max_iterations"], # include transitions that are possible, but not observed all_possible_transitions=True ) self.ent_tagger.fit(X_train, y_train) params_space = { 'c1': scipy.stats.expon(scale=0.5), 'c2': scipy.stats.expon(scale=0.5), } labels = self.ent_tagger.classes_ # use the same metric for evaluation f1_scorer = make_scorer(metrics.flat_f1_score, average='weighted', labels=labels) # search rs = RandomizedSearchCV(self.ent_tagger, params_space, cv=10, verbose=1, n_jobs=-1, n_iter=100, scoring=f1_scorer) rs.fit(X_train, y_train) print('best params:', rs.best_params_) print('best CV score:', rs.best_score_) print('model size: {:0.2f}M'.format(rs.best_estimator_.size_ / 1000000)) try: import json with open("tunning_score.json", "w") as f: json.dump(rs.best_params_, f, sort_keys=True, indent=4) except Exception: pass self.ent_tagger = sklearn_crfsuite.CRF( algorithm='lbfgs', c1=rs.best_params_["c1"], c2=rs.best_params_["c2"], # stop earlier max_iterations=self.component_config["max_iterations"], # include transitions that are possible, but not observed all_possible_transitions=True ) else: print("L1_c", self.component_config["L1_c"]) print("L2_c", self.component_config["L2_c"]) self.ent_tagger = sklearn_crfsuite.CRF( algorithm='lbfgs', # coefficient for L1 penalty c1=self.component_config["L1_c"], # coefficient for L2 penalty c2=self.component_config["L2_c"], # stop earlier max_iterations=self.component_config["max_iterations"], # include transitions that are possible, but not observed all_possible_transitions=True ) self.ent_tagger.fit(X_train, y_train)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_crf(ctx, input, output, clusters):\n click.echo('chemdataextractor.crf.train')\n sentences = []\n for line in input:\n sentence = []\n for t in line.split():\n token, tag, iob = t.rsplit('/', 2)\n sentence.append(((token, tag), iob))\n if sentence:\n sentences.append(sentence)\n\n tagger = CrfCemTagger(clusters=clusters)\n tagger.train(sentences, output)", "def train(self, training_data):\n pass", "def train(self, training_data, model_name):\n dataset = []\n for example in training_data:\n entity_offsets = self._convert_example(example)\n dataset.append(self._from_json_to_crf(example, entity_offsets))\n\n features = [self._sentence_to_features(s) for s in dataset]\n labels = [self._sentence_to_labels(s) for s in dataset]\n trainer = sklearn_crfsuite.CRF(\n algorithm=\"lbfgs\",\n # coefficient for L1 penalty\n c1=0.1,\n # coefficient for L2 penalty\n c2=0.1,\n # stop earlier\n max_iterations=50,\n # include transitions that are possible, but not observed\n all_possible_transitions=True,\n )\n trainer.fit(features, labels)\n logger.info(\"Creating Model for Intent %s\",model_name)\n joblib.dump(trainer, 'core/agent/model_files/%s.model' % model_name)\n return True", "def train(self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any) -> None:\n pass", "def train(self):\n # 1. Extracting details of attributes\n\n self.get_attribute_data()\n if self.train_data is None and self.train_data_file is None:\n raise ValueError(\"Neither training data not training file provided\")\n\n self.get_train_data()\n self.classifier = self.build_tree(rows=self.train_data, attribute_list=self.attribute_names)", "def train(self, train_data):\n with open(train_data, 'r') as train_data:\n while True:\n tokens = train_data.readline().split()\n pos = train_data.readline().split()\n labels = train_data.readline().split()\n if not tokens or not pos or not labels:\n break\n # Generate transition probabilities\n for i in range(0, len(labels) - self.N_VALUE + 1):\n self.add_label_sequence(labels[i:i + self.N_VALUE])\n # Generate lexical generation probabilities\n for i in range(0, len(tokens)):\n token = tokens[i].lower()\n label = labels[i]\n self.add_word_tag(token, label)\n self.handle_unknowns()", "def train(self, data):\n pass", "def train():\n pass", "def train(self, trainData):\n pass", "def train(self):\n for data_tier in self.data_tiers:\n fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')\n self.preprocessed_data[data_tier] = json.load(fd)\n fd.close()\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.ceil(tot*0.8))\n training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])\n trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])\n avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])\n t1 = datetime.datetime.utcnow()\n self.clf_trend[data_tier].fit(training_features, trend_training_classifications)\n self.clf_avg[data_tier].fit(training_features, avg_training_classifications)\n t2 = datetime.datetime.utcnow()\n td = t2 - t1\n self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))\n joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')\n joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')", "def train(self, train_data, train_labels):\n\n # Apply filtering\n if len(self.preprocessing) > 0:\n print('Applying', len(self.preprocessing), 'filter(s) to training data')\n for filter in self.preprocessing:\n for i in range(len(train_data)):\n train_data[i] = filter(train_data[i])\n\n # Apply feature extraction\n if len(self.features) > 0:\n print('Extracting', len(self.features), 'feature(s) from training data')\n scaler = MinMaxScaler(feature_range=(0, 1))\n for i in range(len(train_data)):\n features = []\n for feature in self.features:\n features.append(feature(train_data[i]))\n train_data[i] = np.hstack(features)\n train_data = scaler.fit_transform(train_data)\n else:\n # Flatten images (not necessary when using feature extraction)\n train_data = np.array(train_data).reshape((len(train_data), -1))\n\n # Fit model\n print('Fitting RF model on', len(train_labels), 'images')\n self.classifier.fit(train_data, train_labels)", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def retrain(self):\n thread = Thread(target=self.trainer.train_classifier)\n thread.start()", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def trainModel( self, featureTrain, classTrain):", "def train(\n self,\n training_data: TrainingData,\n config: Optional[RasaNLUModelConfig] = None,\n **kwargs: Any,\n ) -> None:\n pass", "def train(\n self,\n training_data: TrainingData,\n config: Optional[RasaNLUModelConfig] = None,\n **kwargs: Any,\n ) -> None:\n pass", "def train(self, corpus):\n self.tokens = []\n self.tags = []\n sentences = corpus.split(NEW_LINE)\n for sentence in sentences:\n start = START_SIGHT + SLASH + START_SIGHT + SPACE + START_SIGHT + SLASH + START_SIGHT + SPACE\n end = SPACE + END + SLASH + END\n sentence = start + sentence + end \n tokens = sentence.split(SPACE)\n for t in tokens:\n token = t.rsplit(SLASH, 1)\n if (len(token) > 1):\n self.tokens.append(token) \n self.tags.append(token[TAG_INDEX])\n \n nonsense_cases = set([(END, START_SIGHT), (START_SIGHT, END),\n (START_SIGHT, START_SIGHT, END),\n (END, START_SIGHT, START_SIGHT)])\n self.bigram_tags = [b for b in zip(self.tags[:-1], self.tags[1:]) if b not in nonsense_cases]\n self.trigram_tags = [t for t in zip(self.tags[:-1], self.tags[1:], self.tags[2:])\\\n if not (t[WORD_INDEX], t[TAG_INDEX]) in nonsense_cases and\\\n not (t[WORD_INDEX], t[TAG_INDEX]) in nonsense_cases]", "def train(self, x_train, y_train):\n\n # convert input to format for classifier\n list_of_embeddings = list(x_train[self.embeddings_col])\n x_train = np.array([[float(i) for i in embedding.strip('[]').split()] for embedding in list_of_embeddings])\n\n # discard fold ID column from labels\n review_groups = [col for col in y_train.columns if not col=='k']\n\n for review_group in tqdm(review_groups, desc='Train Review Groups'):\n\n # pull label column\n labels = y_train[review_group]\n\n # logistic classifier\n classifier = SGDClassifier(loss=\"log\", alpha=self.alpha,\n l1_ratio = self.l1_ratio, penalty=\"elasticnet\").fit(x_train, labels)\n\n # save the model in dictionary of models\n self.models[review_group] = classifier", "def train(self, tagged_sentences: Iterator[Tuple[TokenSeq, PosSeq]]) -> Tuple[NDArray, NDArray]:\n #add tokens\n for sentence in tagged_sentences:\n tokens, pos_tags = sentence\n for pos in pos_tags:\n self.pos_tags.append(pos)\n pos_tags.insert(0, \"<s>\")\n pos_tags.pop(len(pos_tags) - 1)\n for i in range(0, len(tokens)):\n temp_dict = {}\n temp_dict = add_features(tokens,pos_tags[i],i, temp_dict)\n self.features.append(temp_dict)\n #print(self.features)\n feature_matrix = self.vectorizer.fit_transform(self.features)\n label_vector = self.le.fit_transform(self.pos_tags)\n for i in range(0, len(label_vector)):\n self.l[self.pos_tags[i]] = i\n \n self.feature_matrix = feature_matrix\n self.label_vector = label_vector\n self.clf.fit(self.feature_matrix, self.label_vector)\n\n return (self.feature_matrix, label_vector)", "def train(self, X, y):\n tf.logging.set_verbosity(\n tf.logging.INFO) # comment if you don't want to display the information during training/evaluation\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=self.params[\"TEST_SIZE\"], random_state=42, stratify=y)\n\n self.label_list = y.unique()\n\n train_features = self.sentences_to_features(X_train, y_train)\n test_features = self.sentences_to_features(X_test, y_test)\n if DEBUG:\n print(\"Transformation to features completed\")\n\n num_train_steps = int(\n len(train_features) / self.params[\"BATCH_SIZE\"] * self.params[\"NUM_TRAIN_EPOCHS\"])\n num_warmup_steps = int(\n num_train_steps * self.params[\"WARMUP_PROPORTION\"])\n\n run_config = self.run_config_builder()\n model_fn = self.model_fn_builder(len(self.label_list), self.params[\"LEARNING_RATE\"], num_train_steps,\n num_warmup_steps)\n self.estimator = self.estimator_builder(model_fn, run_config)\n\n train_input_fn = bert.run_classifier.input_fn_builder(features=train_features,\n seq_length=self.params[\"MAX_SEQ_LENGTH\"],\n is_training=True, drop_remainder=False)\n if DEBUG:\n print(\"Beginning Training!\")\n current_time = time.time()\n self.estimator.train(input_fn=train_input_fn,\n max_steps=num_train_steps)\n if DEBUG:\n print(\"Training took time :\", time.time() - current_time,\n \"s, or \", (time.time() - current_time) / 60, \"min\")\n\n self.classifier_trained = True\n\n test_input_fn = run_classifier.input_fn_builder(features=test_features,\n seq_length=self.params[\"MAX_SEQ_LENGTH\"],\n is_training=False, drop_remainder=False)\n\n # apply model on test set and print all metrics\n if DEBUG:\n print(\"Evaluating\")\n self.estimator.evaluate(input_fn=test_input_fn, steps=None)", "def train(self, training_data, cfg, **kwargs):\n pass", "def train(self, training_steps=10):", "def train(self):\n # >>> YOUR ANSWER HERE\n\n fake_docs = []\n fake_words = []\n fake_words_freq = {}\n real_docs = []\n real_words = []\n real_words_freq = {}\n\n # load fake data of the training dataset, store the docs and words\n fake_data = open(self.train_data['fake']).readlines()\n for sentence in fake_data:\n preprocess_sentence = sentence.strip()\n fake_docs.append(preprocess_sentence)\n fake_words.extend(preprocess_sentence.split())\n\n # load real data of the training dataset, store the docs, words and word frequencies.\n real_data = open(self.train_data['real']).readlines()\n for sentence in real_data:\n preprocess_sentence = sentence.strip()\n real_docs.append(preprocess_sentence)\n real_words.extend(preprocess_sentence.split())\n\n # remove stop words if necessary\n if self.REMOVE_STOPWORDS:\n fake_words = [word for word in fake_words if word not in self.stopwords]\n real_words = [word for word in real_words if word not in self.stopwords]\n\n # calculate all words' frequency\n for word in fake_words:\n self.vocabulary.add(word)\n fake_words_freq[word] = fake_words_freq.get(word, 0) + 1\n for word in real_words:\n self.vocabulary.add(word)\n real_words_freq[word] = real_words_freq.get(word, 0) + 1\n\n # pre-calculate the number of all docs, the number of docs per class and words frequency per class for\n # calculation in the training loop.\n n_doc = len(fake_docs) + len(real_docs)\n n_class = {'fake': len(fake_docs), 'real': len(real_docs)}\n big_doc_dict = {'fake': fake_words_freq, 'real': real_words_freq}\n fake_words_num = 0\n real_words_num = 0\n for w in self.vocabulary:\n fake_words_num += fake_words_freq.get(w, 0)\n real_words_num += real_words_freq.get(w, 0)\n words_frequency_per_class = {'fake': fake_words_num, 'real': real_words_num}\n\n # Training\n for c in self.classes:\n self.logprior[c] = math.log(n_class[c] / n_doc)\n for w in self.vocabulary:\n count_w_c = big_doc_dict[c].get(w, 0)\n log_likelihood = math.log((count_w_c + 1) / (len(self.vocabulary) + words_frequency_per_class[c]))\n self.loglikelihood[(w, c)] = log_likelihood\n # >>> END YOUR ANSWER", "def train(self, batch):\n pass", "def train(self, trainfile):", "def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):", "def train(self, trainfile):\r\n\r\n # We load the data and lower the text\r\n data_train = pd.read_csv(trainfile, sep = \"\\t\", names = [\"polarity\", \"category\", \"word\", \"offsets\", \"sentence\"])\r\n data_train['sentence_l'] = data_train['sentence'].apply(str.lower)\r\n data_train['word'] = data_train['word'].apply(str.lower)\r\n \r\n # We try to keep all the no/nor/not words as this changes radically the sentiment analysis\r\n data_train['sentence_l'] = data_train[\"sentence_l\"].apply(lambda sentence: sentence.replace(\"can\\'t\", \"can not\"))\r\n data_train['sentence_l'] = data_train[\"sentence_l\"].apply(lambda sentence: sentence.replace(\"n\\'t\", \" not\"))\r\n self.stopwords = stopwords.words(\"english\")\r\n self.stopwords.remove('nor')\r\n self.stopwords.remove('no')\r\n self.stopwords.remove('not')\r\n \r\n # We clean the train data and stem the words\r\n self.stemmer = nltk.porter.PorterStemmer()\r\n clean_sentences = []\r\n for row in data_train['sentence_l']:\r\n tokens = word_tokenize(row)\r\n tokens = [word for word in tokens if word.isalpha()]\r\n tokens = [w for w in tokens if not w in self.stopwords] \r\n tokens = [self.stemmer.stem(word) for word in tokens]\r\n clean_sentences.append(tokens)\r\n data_train['stems'] = clean_sentences\r\n \r\n # We also stem the target words to be coherent with the stemmed words in the sentences\r\n data_train['word'] = [self.stemmer.stem(word) for word in data_train['word']]\r\n \r\n # We recreate the sentences with the selected and cleaned words\r\n Classifier.create_sentence = staticmethod(Classifier.create_sentence)\r\n data_train.clean_sentence = Classifier.create_sentence(data_train.stems)\r\n \r\n # We create a BOW vector\r\n self.restaurant_vect = CountVectorizer(min_df=1, tokenizer=nltk.word_tokenize)\r\n reviews_counts = self.restaurant_vect.fit_transform(data_train.clean_sentence)\r\n \r\n # We transform the BOW vector with the tfidf scores\r\n self.tfidf_transformer = TfidfTransformer()\r\n reviews_tfidf = self.tfidf_transformer.fit_transform(reviews_counts)\r\n \r\n polarities = []\r\n for row in data_train['polarity']:\r\n if row == 'positive':\r\n polarities.append(1)\r\n if row == 'neutral':\r\n polarities.append(0)\r\n if row == 'negative':\r\n polarities.append(-1)\r\n data_train['polarity_floats'] = polarities\r\n \r\n # Split data into training and test sets\r\n test_size = 10\r\n X_train, X_test, y_train, y_test = train_test_split(reviews_tfidf, data_train.polarity_floats,\r\n test_size = test_size/100, random_state = None)\r\n \r\n ############# CNN MODEL ##############\r\n \r\n from keras.layers import Input, Dense, Embedding, Conv2D, MaxPool2D\r\n from keras.layers import Reshape, Flatten, Dropout, Concatenate\r\n from keras.callbacks import ModelCheckpoint\r\n from keras.optimizers import Adam\r\n from keras.models import Model\r\n \r\n sequence_length = X_train.shape[1] # 7\r\n vocabulary_size = X_train.shape[0] # 1503\r\n embedding_dim = 256\r\n filter_sizes = [3,4,5]\r\n num_filters = 512\r\n drop = 0.5\r\n \r\n epochs = 10\r\n batch_size = 50\r\n \r\n # this returns a tensor\r\n print(\"Creating Model...\")\r\n inputs = Input(shape=(sequence_length,), dtype='int32')\r\n embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=sequence_length)(inputs)\r\n reshape = Reshape((sequence_length,embedding_dim,1))(embedding)\r\n \r\n conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embedding_dim), padding='valid', kernel_initializer='normal', activation='relu')(reshape)\r\n conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embedding_dim), padding='valid', kernel_initializer='normal', activation='relu')(reshape)\r\n conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], embedding_dim), padding='valid', kernel_initializer='normal', activation='relu')(reshape)\r\n \r\n maxpool_0 = MaxPool2D(pool_size=(sequence_length - filter_sizes[0] + 1, 1), strides=(1,1), padding='valid')(conv_0)\r\n maxpool_1 = MaxPool2D(pool_size=(sequence_length - filter_sizes[1] + 1, 1), strides=(1,1), padding='valid')(conv_1)\r\n maxpool_2 = MaxPool2D(pool_size=(sequence_length - filter_sizes[2] + 1, 1), strides=(1,1), padding='valid')(conv_2)\r\n \r\n concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])\r\n flatten = Flatten()(concatenated_tensor)\r\n dropout = Dropout(drop)(flatten)\r\n output = Dense(units=1, activation='softmax')(dropout)\r\n \r\n # this creates a model that includes\r\n model = Model(inputs=inputs, outputs=output)\r\n \r\n checkpoint = ModelCheckpoint('weights.{epoch:03d}-{val_acc:.4f}.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='auto')\r\n adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\r\n \r\n model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])\r\n print(\"Training Model...\")\r\n model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=[checkpoint], validation_data=(X_test, y_test)) # starts training\r", "def train(self, features, labels):\n pass", "def train(self):\n raise NotImplementedError", "def train(self, batch_training=False):\n raise NotImplementedError", "def train(self, arg1=None, arg2=None, **kwargs):\n nltk.download('averaged_perceptron_tagger')\n nltk.download('wordnet')\n nltk.download('twitter_samples')\n nltk.download('punkt')\n nltk.download('stopwords')\n nltk.download('vader_lexicon')\n\n positive_tweets = twitter_samples.strings('positive_tweets.json')\n negative_tweets = twitter_samples.strings('negative_tweets.json')\n text = twitter_samples.strings('tweets.20150430-223406.json')\n tweet_tokens = twitter_samples.tokenized('positive_tweets.json')[0]\n\n stop_words = stopwords.words('english')\n\n positive_tweet_tokens = twitter_samples.tokenized('positive_tweets.json')\n negative_tweet_tokens = twitter_samples.tokenized('negative_tweets.json')\n\n positive_cleaned_tokens_list = []\n negative_cleaned_tokens_list = []\n\n for tokens in positive_tweet_tokens:\n positive_cleaned_tokens_list.append(self.remove_noise(tokens, stop_words))\n\n for tokens in negative_tweet_tokens:\n negative_cleaned_tokens_list.append(self.remove_noise(tokens, stop_words))\n\n all_pos_words = self.get_all_words(positive_cleaned_tokens_list)\n\n freq_dist_pos = FreqDist(all_pos_words)\n print(freq_dist_pos.most_common(20))\n\n positive_tokens_for_model = self.get_tweets_for_model(positive_cleaned_tokens_list)\n negative_tokens_for_model = self.get_tweets_for_model(negative_cleaned_tokens_list)\n\n positive_dataset = [(tweet_dict, \"Positive\")\n for tweet_dict in positive_tokens_for_model]\n\n negative_dataset = [(tweet_dict, \"Negative\")\n for tweet_dict in negative_tokens_for_model]\n\n dataset = positive_dataset + negative_dataset\n\n random.shuffle(dataset)\n\n train_data = dataset[:7000]\n test_data = dataset[7000:]\n\n self.classifier = NaiveBayesClassifier.train(train_data)", "def train_classifier(images_path):\n car_imgs = get_images(images_path + '/vehicles/')\n non_car_imgs = get_images(images_path + '/non-vehicles/')\n\n print('Computing car features')\n car_features = extract_features(car_imgs,\n color_space=COLOR_SPACE,\n spatial_size=SPATIAL_SIZE,\n hist_bins=HIST_BINS,\n orient=ORIENT,\n pix_per_cell=PIX_PER_CELL,\n cell_per_block=CELL_PER_BLOCK,\n hog_channel=HOG_CHANNEL,\n spatial_feat=SPATIAL_FEAT,\n hist_feat=HIST_FEAT,\n hog_feat=HOG_FEAT)\n print(len(car_features))\n\n print('Computing non-car features')\n non_car_features = extract_features(non_car_imgs,\n color_space=COLOR_SPACE,\n spatial_size=SPATIAL_SIZE,\n hist_bins=HIST_BINS,\n orient=ORIENT,\n pix_per_cell=PIX_PER_CELL,\n cell_per_block=CELL_PER_BLOCK,\n hog_channel=HOG_CHANNEL,\n spatial_feat=SPATIAL_FEAT,\n hist_feat=HIST_FEAT,\n hog_feat=HOG_FEAT)\n print(len(non_car_features))\n \n X = np.vstack((car_features, non_car_features)).astype(np.float64) \n print('X shape: {}'.format(X.shape))\n # Fit a per-column scaler\n X_scaler = StandardScaler().fit(X)\n # Apply the scaler to X\n scaled_X = X_scaler.transform(X)\n\n # Define the labels vector\n y = np.hstack((np.ones(len(car_features)), np.zeros(len(non_car_features))))\n\n # Split up data into randomized training and test sets\n rand_state = np.random.randint(0, 100)\n X_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.2, random_state=rand_state)\n\n # Use a linear SVC \n svc = LinearSVC()\n # Check the training time for the SVC\n t=time.time()\n svc.fit(X_train, y_train)\n t2 = time.time()\n print(round(t2-t, 2), 'Seconds to train SVC...')\n # Check the score of the SVC\n print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n # Check the prediction time for a single sample\n t=time.time()\n\n return svc, X_scaler", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train(self, ):\n raise NotImplementedError", "def pre_train(self, dataset, **kwargs):\n\n pass", "def train(\n self, training_data: TrainingData, cfg: DazuConfig, **kwargs: Any\n ) -> None:", "def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()", "def train(cleaner, data_source, save_to=\"../models/model.pkl\"):\n df = pd.read_csv(data_source)\n df = df[pd.notnull(df['tags'])]\n print(\"Start : Pre-cleaning process . . . \")\n print(\" HTML decoding . . . done\")\n print(\" lowercase text . . . done\")\n print(\" replace [/(){}\\[\\]\\|@,;] symbols by space . . . done\")\n print(\" remove remaining symbols . . . done\")\n print(\" remove stopwords . . . done\")\n df['post'] = df['post'].apply(cleaner)\n print(\"End : Pre-cleaning process\")\n x = df.post\n y = df.tags\n # no need for split data in final training stage\n # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state = 42)\n print(\"Start : model creation process . . . \")\n sgd = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, random_state=42, max_iter=5,\n tol=None)),\n ])\n # sgd.fit(X_train, y_train)\n sgd.fit(x, y)\n print(\"End : model creation process\")\n model = open(save_to, 'wb')\n pickle.dump(sgd, model)\n model.close()\n print(\"Trained model saved to \" + save_to)\n return sgd", "def train(self):\n\t\traise NotImplementedError", "def train(self, train_loader):\n pass", "def train(self, X_train, y_train):\n\n self.model_pipeline.fit(X_train, y_train)", "def train(self):\n # self.recognizer.train()\n self.detector.train()\n self.shared_conv.train()", "def Train(self, training_set):\n c_word_tag = defaultdict(int)\n c_tag = defaultdict(int)\n c_tag_bigram = defaultdict(int)\n self.dataset_size = len(training_set)\n\n for i in xrange(len(training_set) - 1):\n c_word_tag[training_set[i]] += 1\n c_tag[training_set[i][1]] += 1\n c_tag_bigram[(training_set[i][1], training_set[i + 1][1])] += 1\n self.dictionary[training_set[i][0]].add(training_set[i][1])\n c_word_tag[training_set[-1]] += 1\n c_tag[training_set[-1][1]] += 1\n self.dictionary[end_token].add(end_token)\n\n # \"self.transitions\" contains P(ti|ti-1) i.e. C(ti-1,ti)/C(ti-1)\n for tag_tag in c_tag_bigram:\n self.transitions[tag_tag] = log(float(c_tag_bigram[tag_tag]) / c_tag[tag_tag[0]])\n\n # \"self.emissions\" contains P(Wi|ti) i.e. C(Wi,ti)/C(ti)\n for word_tag in c_word_tag:\n self.emissions[word_tag] = log(float(c_word_tag[word_tag]) / c_tag[word_tag[1]])", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def train(self):\n return", "def train(self):\r\n faces = [] #empty list for faces\r\n Ids = [] #empty list for Id's\r\n path = f\"{PARENT_PATH}\\\\{DATASET_DIR}\" #dataset path\r\n\r\n #join each and every image paths\r\n image_paths = [os.path.join(path, i) for i in os.listdir(path)]\r\n #print(image_paths)\r\n\r\n for image in image_paths:\r\n face_img = Image.open(image).convert('L') #Pillow Image\r\n np_face = np.array(face_img, 'uint8') #into numpy array - usigned 8 bit -1byte\r\n Id = int(os.path.split(image)[-1].split('.')[1]) #get id from image path\r\n #print(Id)\r\n faces.append(np_face) #append in faces array/list\r\n Ids.append(Id) #append in Ids list/array\r\n\r\n RECOGNIZER.train(faces, np.array(Ids)) #train model using faces and Id (numpy arrays)\r\n RECOGNIZER.save(f\"{PARENT_PATH}\\\\{TRAINED_FILE}\")\r\n\r\n self.pop_window(title=\"Restart Needed!\", msg=\"Training Successful.\\nRestart the app Now.\")\r\n return", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def train(self, dataset) -> None:\n raise NotImplementedError()", "def train(pipetype, datapath):\n with open(datapath, 'r') as f:\n training_data = json.load(f)\n docs = ['{0} {1}'.format(d['title'], d['text']) for d in training_data]\n\n if pipetype == 'bow':\n vector.train(docs)\n\n if pipetype in ['stanford', 'spotlight', 'keyword']:\n concept.train(docs, pipetype=pipetype)", "def main():\n # Read data for train set\n print('loading training data')\n train = read_datafile('../data/tsd_train.csv')\n\n # Read trial data for validation set\n validation = read_datafile('../data/tsd_trial.csv')\n\n # Read data for test set\n print('loading test data')\n test = read_datafile('../data/tsd_test.csv')\n\n # Convert training data to Spacy Entities\n nlp = spacy.load(\"en_core_web_sm\")\n print('preparing training data')\n training_data = []\n for n, (spans, text) in enumerate(train):\n doc = nlp(text)\n ents = spans_to_ents(doc, set(spans), 'TOXIC')\n training_data.append((doc.text, {'entities': ents}))\n\n toxic_tagging = spacy.blank('en')\n toxic_tagging.vocab.strings.add('TOXIC')\n ner = nlp.create_pipe(\"ner\")\n toxic_tagging.add_pipe(ner, last=True)\n ner.add_label('TOXIC')\n\n pipe_exceptions = [\"ner\", \"trf_wordpiecer\", \"trf_tok2vec\"]\n unaffected_pipes = [\n pipe for pipe in toxic_tagging.pipe_names\n if pipe not in pipe_exceptions]\n\n\n print('Training!')\n with toxic_tagging.disable_pipes(*unaffected_pipes):\n \n toxic_tagging.begin_training()\n for iteration in range(30):\n random.shuffle(training_data)\n losses = {}\n batches = spacy.util.minibatch(\n training_data, size=spacy.util.compounding(\n 4.0, 32.0, 1.001))\n for batch in batches:\n texts, annotations = zip(*batch)\n toxic_tagging.update(texts, annotations, drop=0.5, losses=losses)\n print(\"Losses\", losses)\n\n\n # Define helper function for evaluating datasets\n def evaluate(dateset):\n precision_recall_f1_scores = []\n for spans, text in dateset:\n pred_spans = []\n doc = toxic_tagging(text)\n for ent in doc.ents:\n pred_spans.extend(range(ent.start_char, ent.start_char + len(ent.text)))\n \n # score = semeval2021.f1(pred_spans, spans)\n precision_recall_f1_scores.append(per_post_precision_recall_f1(pred_spans, spans))\n\n # compute average precision, recall and f1 score of all posts\n return np.array(precision_recall_f1_scores).mean(axis=0)\n\n # Evaluate on dev and test sets\n print('Evaluation:')\n eval_precision, eval_recall, eval_f1 = evaluate(validation)\n test_precision, test_recall, test_f1 = evaluate(test)\n \n print(f'Dev set: Precision = {eval_precision}, Recall = {eval_recall}, F1 = {eval_f1}')\n print(f'Test set: Precision = {test_precision}, Recall = {test_recall}, F1 = {test_f1}')", "def train(self, clfs, dataset):\n # TODO: implement stacking to help with resolving ties\n pass", "def train_pipeline(nlp: spacy.language.Language) -> None:\n if TEXTCAT not in nlp.pipe_names:\n textcat = nlp.create_pipe(TEXTCAT, config={\"exclusive_classes\": False})\n nlp.add_pipe(textcat, last=True)\n else:\n textcat = nlp.get_pipe(TEXTCAT)\n\n for category in CATEGORIES:\n textcat.add_label(category.value)\n\n pipe_exceptions = {TEXTCAT, \"trf_wordpiecer\", \"trf_tok2vec\"}\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n with nlp.disable_pipes(*other_pipes): # only train textcat\n all_data = list(get_classification_training_data())\n random.shuffle(all_data)\n\n training_data = all_data[: len(all_data) - 2]\n validation_data = all_data[len(all_data) - 2 :]\n\n optimizer = nlp.begin_training()\n for itn in range(20):\n losses: Dict[str, Any] = {}\n random.shuffle(training_data)\n batches = minibatch(training_data, size=compounding(4.0, 32.0, 1.001))\n\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(texts, annotations, sgd=optimizer, drop=0.2, losses=losses)", "def parse_train_data(training_set, language):\n print \"Reading training set: \" + training_set\n xmldoc = minidom.parse(training_set)\n lex_list = xmldoc.getElementsByTagName('lexelt')\n training_output = {}\n\n print \"Processing training set and training models...\"\n for node in lex_list:\n lexelt = node.getAttribute('item')\n training_output[lexelt] = {}\n inst_list = node.getElementsByTagName(\"instance\")\n # setup the neighbor_word_list within k distance of the word\n neighbor_word_list = []\n senseid_set = set()\n for inst in inst_list:\n sentence = inst.getElementsByTagName('context')[0]\n senseid_set.add(inst.getElementsByTagName('answer')[0].getAttribute('senseid'))\n neighbor_word_list = list(set(neighbor_word_list + get_neighbor_words_list(sentence, language)))\n senseid_list = list(senseid_set)\n training_output[lexelt][\"neighbor_word_list\"] = neighbor_word_list\n _4c_4d_feature = extract_4c_4d_feature(neighbor_word_list, senseid_list, inst_list, language)\n training_output[lexelt][\"4c_4d_feature\"] = _4c_4d_feature\n x_list = []\n y_list = []\n for inst in inst_list:\n y = inst.getElementsByTagName('answer')[0].getAttribute('senseid')\n if ignore_U_activated and y.__eq__('U'):\n continue\n y_list.append(str(replace_accented(y)))\n x = extract_vector(inst, neighbor_word_list, _4c_4d_feature, language)\n x_list.append(x)\n # for each node, build a classifier\n if language.__eq__(\"English\"):\n #clf = RandomForestClassifier(n_estimators=10) 58.9\n #clf = SGDClassifier() 61.1\n #clf = MultinomialNB() 62.9\n #clf = BernoulliNB() 55.8\n #clf = Perceptron() 60.4\n #clf = PassiveAggressiveClassifier() 62.1\n #clf = RidgeClassifier() 62.7\n #clf = svm.LinearSVC() 62.5\n #clf = KNeighborsClassifier()\n #clf = GaussianNB()\n clf = MultinomialNB(alpha=0.95) #+ alpha=0.95 + k=13 + left_right_order + vector_0_1 off = 64.7\n elif language.__eq__(\"Spanish\"):\n #clf = svm.LinearSVC() 82.0\n #clf = MultinomialNB() 82.2\n #clf = RidgeClassifier() 81.5\n #clf = PassiveAggressiveClassifier() 81.9\n #clf = BernoulliNB() 72.4\n clf = MultinomialNB(alpha=0.50) #0.25:82.6 0.4:83.1 0.45:83.2 0.5: 83.2 0.55:83.2 0.6:82.8 0.75:82.7\n elif language.__eq__(\"Catalan\"):\n #clf = svm.LinearSVC() # 82.8\n #clf = MultinomialNB() # 80.8\n #clf = RidgeClassifier() 82.6\n #clf = svm.LinearSVC(C=1.5) 82.9\n clf = MultinomialNB(alpha=0.25) # 0.5:84.3 0.35:84.6 0.3:84.8 0.25:85.4 0.2:85.3\n else:\n clf = svm.LinearSVC()\n clf.fit(x_list, y_list)\n training_output[lexelt][\"Classifier\"] = clf\n print \"Models trained.\"\n return training_output", "def train(self):\n feature = Feature(trained=False)\n classifier = LogisticRegression(\n penalty='l2',\n max_iter=100,\n solver='liblinear',\n random_state=self.RAND_SEED)\n\n true_labels = []\n predicted_labels = []\n\n for subj in self.subjects:\n print(subj)\n # preprocess training and testing set\n self.dataset_gen(subject=subj, valid=False)\n\n # train and predict\n pipeline_steps = [('vectorized', feature.vector)]\n if self.istfidf:\n pipeline_steps.append(('tf-idf', feature.tfidftransform))\n if self.islda == 'small':\n pipeline_steps.append(('lda', feature.ldatransform_small))\n elif self.islda == 'large':\n pipeline_steps.append(('lda', feature.ldatransform_large))\n else:\n pass\n if self.isnorm:\n pipeline_steps.append(('scalar', StandardScaler(with_mean=False)))\n pipeline_steps.append(('clf', classifier))\n model = Pipeline(steps=pipeline_steps)\n\n model.fit(self.X_train, self.y_train)\n\n predicted = model.predict(self.X_test)\n # hamming\n predicted_labels.append(predicted)\n true_labels.append(self.y_test)\n\n true_matrix, pred_matrix = np.array(true_labels, int).T, np.array(predicted_labels, int).T\n true_matrix[true_matrix == -1] = 0\n pred_matrix[pred_matrix == -1] = 0\n\n evaluation = Evaluation(self.subjects)\n evaluation.model_evaluate(true_matrix=true_matrix, pred_matrix=pred_matrix, model_name=self.modelname)", "def train(self, tag, tag_column):\n if (tag_column not in self.database.df.columns):\n raise ValueError(f\"Tag {tag_column} not found in dataset\")\n elif tag not in self.database.df[tag_column].to_list():\n raise ValueError(f\"Tag {tag} not found in dataset column {tag_column}\")\n\n # Filter the tags of the chosen tag\n docs = self.database.df[self.database.df[tag_column]== tag]\n \n docs = docs[f\"orig_{self.database.text_column}\"]\n \n preprocess = Preprocess(tags_types = None, \n filter_flags = {\"digits\" : False,\n \"stopwords\": False,\n \"text_only\": False,\n \"simbols\" : True,\n \"punct\" : True,\n \"links\" : True,\n \"refs\" : False,\n \"tokenize\" : False})\n \n docs = list(preprocess.preprocess_text(docs))\n \n self.tokenizer = Tokenizer()\n self.tokenizer.fit_on_texts(docs)\n self.total_words = len(self.tokenizer.word_index) + 1\n\n # Generate text sequences for teh model\n sequences = []\n for line in docs:\n token_list = self.tokenizer.texts_to_sequences([line])[0]\n for i in range(2, len(token_list)):\n n_gram_sequence = token_list[:i+1]\n sequences.append(n_gram_sequence)\n \n sequences = pad_sequences(sequences, maxlen=self.max_sequence_len+1, padding='pre')\n\n # Creates teh model\n self.model = Sequential()\n self.model.add(Embedding(self.total_words, 256, input_length=self.max_sequence_len))\n self.model.add(Bidirectional(LSTM(128)))\n self.model.add(Dense(self.total_words, activation='softmax'))\n optimizer = Adam(lr=0.01)\n self.model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n # Trains teh model\n sequences = pad_sequences(sequences, maxlen=self.max_sequence_len+1, padding='pre')\n X = sequences[:, :-1]\n y = to_categorical(sequences[:, -1], num_classes=self.total_words)\n X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.9)\n\n self.model.fit(X_train, y_train, epochs=50, batch_size=4096, \n validation_data=(X_valid, y_valid))", "def train(self,training_file,rare_thresh=100,clf_params=None,model_path=None,chosen_feats=None,tune_mode=None,size=None,as_text=False,multitrain=True,chosen_clf=None):\n\n\t\tif tune_mode is not None and size is None and tune_mode != \"hyperopt\":\n\t\t\tsize = 5000\n\t\t\tsys.stderr.write(\"o No sample size set - setting size to 5000\\n\")\n\n\t\tif not as_text:\n\t\t\ttrain = io.open(training_file,encoding=\"utf8\").read().strip().replace(\"\\r\",\"\") + \"\\n\"\n\t\telse:\n\t\t\ttrain = training_file\n\n\t\tif size is not None:\n\t\t\ttrain = shuffle_cut_conllu(train,size)\n\t\t#tagged = udpipe_tag(train,self.udpipe_model)\n\t\ttagged = tt_tag(train,self.lang,preserve_sent=True)\n\n\t\tif model_path is None: # Try default model location\n\t\t\tmodel_path = script_dir + os.sep + \"models\" + os.sep + self.corpus + \"_ensemble_sent.pkl\"\n\n\t\tif clf_params is None:\n\t\t\t# Default classifier parameters\n\t\t\t#clf_params = {\"n_estimators\":125,\"min_samples_leaf\":1, \"max_depth\":15, \"max_features\":None, \"n_jobs\":4, \"random_state\":42, \"oob_score\":True, \"bootstrap\":True}\n\t\t\tclf_params = {\"n_estimators\":100,\"min_samples_leaf\":1, \"min_samples_split\":5, \"max_depth\":10, \"max_features\":None, \"n_jobs\":4, \"random_state\":42, \"oob_score\":True, \"bootstrap\":True}\n\n\t\tif chosen_clf is None:\n\t\t\tchosen_clf = RandomForestClassifier(n_jobs=4,oob_score=True, bootstrap=True)\n\t\t\tchosen_clf.set_params(**clf_params)\n\n\t\tcat_labels = [\"word\",\"first\",\"last\",\"genre\",\"pos\",\"cpos\"]\n\t\tnum_labels = [\"tok_len\",\"tok_id\"]\n\n\t\ttrain_feats, vocab, toks, firsts, lasts = read_conll(tagged,genre_pat=self.genre_pat,mode=\"sent\",as_text=True,char_bytes=self.lang==\"zho\")\n\t\tgold_feats, _, _, _, _ = read_conll(train,mode=\"sent\",as_text=True)\n\t\tgold_feats = [{\"wid\":0}] + gold_feats + [{\"wid\":0}] # Add dummies to gold\n\n\t\t# Ensure that \"_\" is in the possible values of first/last for OOV chars at test time\n\t\toov_item = train_feats[-1]\n\t\toov_item[\"first\"] = \"_\"\n\t\toov_item[\"last\"] = \"_\"\n\t\toov_item[\"lemma\"] = \"_\"\n\t\toov_item[\"word\"] = \"_\"\n\t\toov_item[\"pos\"] = \"_\"\n\t\toov_item[\"cpos\"] = \"_\"\n\t\toov_item[\"genre\"] = \"_\"\n\t\ttrain_feats.append(oov_item)\n\t\ttrain_feats = [oov_item] + train_feats\n\t\ttoks.append(\"_\")\n\t\ttoks = [\"_\"] + toks\n\n\t\tvocab = Counter(vocab)\n\t\ttop_n_words = vocab.most_common(rare_thresh)\n\t\ttop_n_words, _ = zip(*top_n_words)\n\n\t\theaders = sorted(list(train_feats[0].keys()))\n\t\tdata = []\n\n\t\tpreds = {}\n\n\t\tfor e in self.estimators:\n\t\t\tif multitrain and e.name in [\"LRSentencer\",\"DNNSentencer\"]:\n\t\t\t\tpred = e.predict_cached(tagged)\n\t\t\telse:\n\t\t\t\tpred = e.predict(tagged)\n\t\t\t_, preds[e.name + \"_prob\"] = [list(x) for x in zip(*pred)]\n\t\t\tpreds[e.name + \"_prob\"] = [0.0] + preds[e.name + \"_prob\"] + [0.0] # Add dummy wrap for items -1 and +1\n\t\t\theaders.append(e.name + \"_prob\")\n\t\t\tnum_labels.append(e.name + \"_prob\")\n\n\t\tfor i, item in enumerate(train_feats):\n\t\t\tif item[\"word\"] not in top_n_words:\n\t\t\t\titem[\"word\"] = item[\"pos\"]\n\t\t\tfor e in self.estimators:\n\t\t\t\titem[e.name + \"_prob\"] = preds[e.name + \"_prob\"][i]\n\n\t\t\tfeats = []\n\t\t\tfor k in headers:\n\t\t\t\tfeats.append(item[k])\n\n\t\t\tdata.append(feats)\n\n\t\tdata, headers, cat_labels, num_labels = self.n_gram(data, headers, cat_labels, num_labels)\n\t\t# No need for n_gram feats for the following:\n\t\tif \"NLTKSentencer_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"NLTKSentencer_prob_min1\")\n\t\t\tnum_labels.remove(\"NLTKSentencer_prob_pls1\")\n\t\tif \"UDPipeSentencer_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"UDPipeSentencer_prob_min1\")\n\t\t\tnum_labels.remove(\"UDPipeSentencer_prob_pls1\")\n\t\tif \"LRSentencer_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"LRSentencer_prob_min1\")\n\t\t\tnum_labels.remove(\"LRSentencer_prob_pls1\")\n\t\tif \"RuleBasedSplitter_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"RuleBasedSplitter_prob_min1\")\n\t\t\tnum_labels.remove(\"RuleBasedSplitter_prob_pls1\")\n\t\tif \"DNNSentencer_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"DNNSentencer_prob_min1\")\n\t\t\tnum_labels.remove(\"DNNSentencer_prob_pls1\")\n\t\tif \"tok_id_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"tok_id_min1\")\n\t\t\tnum_labels.remove(\"tok_id_pls1\")\n\t\tif \"genre_min1\" in cat_labels:\n\t\t\tcat_labels.remove(\"genre_min1\")\n\t\t\tcat_labels.remove(\"genre_pls1\")\n\n\t\t# Use specific feature subset\n\t\tif chosen_feats is not None:\n\t\t\tnew_cat = []\n\t\t\tnew_num = []\n\t\t\tfor feat in chosen_feats:\n\t\t\t\tif feat in cat_labels:\n\t\t\t\t\tnew_cat.append(feat)\n\t\t\t\telif feat in num_labels:\n\t\t\t\t\tnew_num.append(feat)\n\t\t\tcat_labels = new_cat\n\t\t\tnum_labels = new_num\n\n\t\tdata = pd.DataFrame(data, columns=headers)\n\t\tdata_encoded, multicol_dict = self.multicol_fit_transform(data, pd.Index(cat_labels))\n\n\t\tdata_x = data_encoded[cat_labels+num_labels].values\n\t\tdata_y = [int(t['wid'] == 1) for t in gold_feats]\n\n\t\tsys.stderr.write(\"o Learning...\\n\")\n\n\t\tif tune_mode is not None:\n\t\t\t# Randomize samples for training\n\t\t\tdata_x = data_encoded[cat_labels+num_labels+[\"label\"]].sample(frac=1,random_state=42)\n\t\t\tdata_y = np.where(data_x['label'] == \"_\", 0, 1)\n\t\t\tdata_x = data_x[cat_labels+num_labels]\n\n\t\t\t# Reserve 10% for validation\n\t\t\tval_x = data_x[int(len(data_y)/9):]\n\t\t\tval_y = data_y[int(len(data_y)/9):]\n\t\t\tdata_x = data_x[:int(len(data_y)/9)]\n\t\t\tdata_y = data_y[:int(len(data_y)/9)]\n\n\t\tif tune_mode == \"importances\":\n\t\t\tsys.stderr.write(\"o Measuring correlation of categorical variables\\n\")\n\t\t\ttheil_implications = report_theils_u(val_x,cat_labels)\n\t\t\tfor (var1, var2) in theil_implications:\n\t\t\t\tif var1 in cat_labels and var2 in cat_labels:\n\t\t\t\t\tdrop_var = var2\n\t\t\t\t\tu = theil_implications[(var1, var2)]\n\t\t\t\t\tsys.stderr.write(\"o Removed feature \" + drop_var + \" due to Theil's U \" + str(u)[:6] + \" of \" + var1 + \"->\" + var2 + \"\\n\")\n\t\t\t\t\tcat_labels.remove(drop_var)\n\n\t\t\tsys.stderr.write(\"o Measuring correlation of numerical variables\\n\")\n\t\t\tcor_mat = report_correlations(val_x[num_labels],thresh=0.95)\n\t\t\tfor (var1, var2) in cor_mat:\n\t\t\t\tif var1 in num_labels and var2 in num_labels:\n\t\t\t\t\tdrop_var = var2\n\t\t\t\t\tcorr_level = cor_mat[(var1, var2)]\n\t\t\t\t\tsys.stderr.write(\"o Removed feature \" + drop_var + \" due to correlation \" + str(corr_level) + \" of \" + var1 + \":\" + var2 + \"\\n\")\n\t\t\t\t\tnum_labels.remove(drop_var)\n\n\t\t\treturn cat_labels, num_labels\n\n\t\tif tune_mode in [\"paramwise\",\"full\"]:\n\t\t\tbest_params = {}\n\t\t\t# Tune individual params separately for speed, or do complete grid search if building final model\n\t\t\tparams_list = [{\"n_estimators\":[100,125,150]},\n\t\t\t\t\t\t {'max_depth': [10,15,20,None]},\n\t\t\t\t\t\t {\"min_samples_split\": [5, 10, 15]},\n\t\t\t\t\t\t {\"min_samples_leaf\":[1,2,3]},\n\t\t\t\t\t\t {\"max_features\":[None,\"sqrt\",\"log2\"]}]\n\t\t\tif tune_mode == \"full\":\n\t\t\t\t# Flatten dictionary if doing full CV\n\t\t\t\tparams_list = [{k: v for d in params_list for k, v in d.items()}]\n\t\t\tfor params in params_list:\n\t\t\t\tbase_params = copy.deepcopy(clf_params) # Copy default params\n\t\t\t\tfor p in params:\n\t\t\t\t\tif p in base_params: # Ensure base_params don't conflict with grid search params\n\t\t\t\t\t\tbase_params.pop(p)\n\t\t\t\tgrid = GridSearchCV(RandomForestClassifier(**base_params),params,cv=3,n_jobs=4,error_score=\"raise\",refit=False)\n\t\t\t\tgrid.fit(data_x,data_y)\n\t\t\t\tfor param in params:\n\t\t\t\t\tbest_params[param] = grid.best_params_[param]\n\t\t\twith io.open(\"best_params.tab\",'a',encoding=\"utf8\") as bp:\n\t\t\t\tcorpus = os.path.basename(training_file).split(\"_\")[0]\n\t\t\t\tbest_clf = RandomForestClassifier(**best_params)\n\t\t\t\tclf_name = best_clf.__class__.__name__\n\t\t\t\tfor k, v in best_params.items():\n\t\t\t\t\tbp.write(\"\\t\".join([corpus, clf_name, k, str(v)]))\n\t\t\t\tbp.write(\"\\n\")\n\t\t\treturn best_clf, best_params\n\t\telif tune_mode == \"hyperopt\":\n\t\t\tfrom hyperopt import hp\n\t\t\tfrom hyperopt.pyll.base import scope\n\t\t\tspace = {\n\t\t\t\t'n_estimators': scope.int(hp.quniform('n_estimators', 50, 150, 10)),\n\t\t\t\t'max_depth': scope.int(hp.quniform('max_depth', 5, 30, 1)),\n\t\t\t\t'min_samples_split': scope.int(hp.quniform('min_samples_split', 2, 10, 1)),\n\t\t\t\t'min_samples_leaf': scope.int(hp.quniform('min_samples_leaf', 1, 10, 1)),\n\t\t\t\t'max_features': hp.choice('max_features', [\"sqrt\", None, 0.5, 0.7, 0.9]),\n\t\t\t\t'clf': hp.choice('clf', [\"rf\",\"et\",\"gbm\"])\n\t\t\t}\n\t\t\t#space = {\n\t\t\t#\t'n_estimators': scope.int(hp.quniform('n_estimators', 50, 150, 10)),\n\t\t\t#\t'max_depth': scope.int(hp.quniform('max_depth', 3, 30, 1)),\n\t\t\t#\t'eta': scope.float(hp.quniform('eta', 0.01, 0.2, 0.01)),\n\t\t\t#\t'gamma': scope.float(hp.quniform('gamma', 0.01, 0.2, 0.01)),\n\t\t\t#\t'colsample_bytree': hp.choice('colsample_bytree', [0.4,0.5,0.6,0.7,1.0]),\n\t\t\t#\t'subsample': hp.choice('subsample', [0.5,0.6,0.7,0.8,1.0]),\n\t\t\t#\t'clf': hp.choice('clf', [\"xgb\"])\n\t\t\t#}\n\n\t\t\tbest_clf, best_params = hyper_optimize(data_x,data_y,cat_labels=cat_labels,space=space,max_evals=50)\n\t\t\treturn best_clf, best_params\n\t\telse:\n\t\t\tclf = chosen_clf\n\t\t\tclf.set_params(**clf_params)\n\t\t\tif clf.__class__.__name__ in [\"RandomForestClassifier\",\"ExtraTreesClassifier\",\"XGBClassifier\"]:\n\t\t\t\tclf.set_params(**{\"n_jobs\":3,\"random_state\":42,\"oob_score\":True,\"bootstrap\":True})\n\t\t\telse:\n\t\t\t\tclf.set_params(**{\"random_state\":42})\n\t\t\tclf.fit(data_x,data_y)\n\n\t\tfeature_names = cat_labels + num_labels\n\n\t\tzipped = zip(feature_names, clf.feature_importances_)\n\t\tsorted_zip = sorted(zipped, key=lambda x: x[1], reverse=True)\n\t\tsys.stderr.write(\"o Feature importances:\\n\\n\")\n\t\tfor name, importance in sorted_zip:\n\t\t\tsys.stderr.write(name + \"=\" + str(importance) + \"\\n\")\n\n\t\tif hasattr(clf, \"oob_score_\"):\n\t\t\tsys.stderr.write(\"\\no OOB score: \" + str(clf.oob_score_)+\"\\n\")\n\n\t\tsys.stderr.write(\"\\no Serializing model...\\n\")\n\n\t\tjoblib.dump((clf, num_labels, cat_labels, multicol_dict, top_n_words, firsts, lasts), model_path, compress=3)", "def train(self):\n raise NotImplementedError()", "def train(self, features, labels, seed=None):\n raise NotImplementedError('Not implemented')", "def train(self, src, labels): # real signature unknown; restored from __doc__\n pass", "def train(self, trainingData, trainingLabels, validationData, validationLabels): \n \n # might be useful in your code later...\n # this is a list of all features in the training set.\n self.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n \n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n \n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)", "def __init__(self, train_y, test_id, train_id, tags, data_dir='data/output/'):\n self.train_y = train_y\n self.test_id = test_id\n self.train_id = train_id\n self.TAGS = tags\n self.data_dir = data_dir", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)", "def buildAndTrain(trainingData):\n\tname = trainingData.drop(['count', 'casual', 'registered'], axis=1).columns\n\ttarget = trainingData['count'].values\n\tfeature = trainingData.drop(['count', 'casual', 'registered'], axis=1).values\n\t# feature scaling\n\tfeature_scaled = preprocessing.scale(feature)\n\t# 0.5 cross validate\n\tcv = cross_validation.ShuffleSplit(len(feature_scaled), n_iter=5, test_size=0.2, random_state=0)\n\t# build model, then training and get accuracy of it\n\tprint('\\n---------岭回归结果--------\\n')\n\tfor train, test in cv:\n\t\tregLR = linear_model.Ridge().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregLR.score(feature_scaled[train], target[train]),\n\t\t regLR.score(feature_scaled[test], target[test])))\n\tprint('\\n---------svm结果--------\\n')\n\tfor train, test in cv:\n\t\tregSvm = svm.SVR().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[test], target[test])))\n\tprint('\\n---------随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRF = RandomForestRegressor(n_estimators=100).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[test], target[test])))\n\t# reduce some low correction feature\n\tfeatureReduced = trainingData.drop(['count', 'casual', 'registered', 'holiday', 'workingday', 'day'], axis=1).values\n\tfeatureReduced_scaled = preprocessing.scale(featureReduced)\n\tprint('\\n---------减少特征维度以避免过拟合后的随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRFImpr = RandomForestRegressor(n_estimators=100).fit(featureReduced_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[test], target[test])))\n\t# use grid search algorithm to improve random forest regression\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_scaled, target, test_size=0.2, random_state=0)\n\ttuned_parameters = [{'n_estimators': [10,100,500], 'max_depth': [2,3,4,5,6,7,8,9,10]}]\n\tscores = ['r2']\n\n\tfor score in scores:\n\t\tprint(score)\n\t\tclf = GridSearchCV(RandomForestRegressor(), tuned_parameters, cv=5, scoring=score)\n\t\tclf.fit(X_train, y_train)\n\t\tprint(clf.best_estimator_)\n\t\tprint('each parameter combination is ')\n\t\tfor params, mean_score, scores in clf.grid_scores_:\n\t\t\tprint('{0:.3f} (+/-{1:.03f}) for {2}'.format(mean_score, scores.std()/2, params))\n\n\tprint('--------最优参数下的随机森林结果--------')\n\tfor train, test in cv:\n\t\tregRFBest = RandomForestRegressor(n_estimators=100, max_depth=10).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[test], target[test])))\n\treturn regRFBest, feature_scaled, target", "def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)", "def train(self):\n self.training = True", "def prepareData(self):\n\t\tprint ('')\n\t\tfrom keras.preprocessing.sequence import pad_sequences\n\t\tfrom sklearn.model_selection import train_test_split\n\t\tfrom keras.utils import to_categorical\n\t\timport numpy as np\n\n\t\tfrom sklearn.preprocessing import LabelBinarizer, LabelEncoder\n\n\t\tX_snt = [[self.word2idx[w] if w in self.word2idx else self.word2idx[self.word_unk_token] for w in s] for s in self.x_document]\n\t\ty_tag = [[self.tag2idx[t]] for t in self.y_document]\n\n\t\tX_snt = pad_sequences(maxlen=self.parameters['max_doc_len'], sequences=X_snt, padding='post', value=self.word2idx[self.word_pad_token])\n\t\ty_tag = to_categorical(y_tag, self.tags_len)\n\n\t\tprint (\"\\tRandom:\\t\", self.random)\n\t\tprint (\"\\tTest size:\\t\", self.split_train_test)\n\n\t\tself.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X_snt, y_tag, test_size=self.split_train_test, random_state=self.random)\n\n\t\tself.X_train = np.array(self.X_train)\n\t\tself.X_test = np.array(self.X_test)\n\t\tself.y_train = np.array(self.y_train)\n\t\tself.y_test = np.array(self.y_test)\n\n\t\tprint ('\\n\\tWords: {}\\t{}'.format(self.X_train.shape, self.X_test.shape) )\n\t\tprint ('\\tTags: {}\\t{}\\n'.format(self.y_train.shape, self.y_test.shape))", "def train(self, args):\n\n ##################################\n # Read the training data\n ##################################\n if not os.path.isdir(args.annotationPath):\n print('annotation path does not exist: {}' \\\n .format(args.annotationPath))\n return -1\n\n data = self.readData(args.annotationPath)\n\n x = []\n y = []\n l = []\n for subject, df in data.items():\n lx = df[['gradient', 'rate']].values.tolist()\n ly = np.array(df[['immersion']].values.tolist()).squeeze(-1)\n x.extend(lx)\n y.extend(ly.tolist())\n l.append(len(lx))\n\n ############################\n # Execute the training\n ############################\n\n print('Training the detector...')\n self._clf.fit(x, y, l)\n\n if not self.save():\n print('Could not persist the trained model to disk (in file {})' \\\n .format(self._modelFile))\n\n return 0", "def train(self)->None:", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def train(self, reader: DataReader, reader_dev: DataReader = None):\n\n xs = []\n ys = []\n\n new_xs, new_ys = get_dataset(reader)\n xs += new_xs\n ys += new_ys\n\n shuffle_concurrent_lists([xs, ys])\n\n # Zip datasets and generate complete dictionary\n examples = [data.Example.fromlist([x, y], self.data_fields) for x, y in zip(xs, ys)]\n\n dataset = data.Dataset(examples, fields=self.data_fields)\n\n self.input_field.build_vocab(dataset)\n self.output_field.build_vocab(dataset)\n\n dataset_size = len(xs)\n\n train_iter = self.prepare_dataset(xs, ys)\n\n dev_iter = self.get_iter(reader_dev)\n\n self.input_field.vocab.load_vectors(\"glove.6B.300d\")\n\n num_classes = len(self.output_field.vocab)\n\n embed = nn.Embedding.from_pretrained(self.input_field.vocab.vectors)\n\n self.network = FrameIDNetwork(self.cM, embed, num_classes)\n\n if dev_iter is None:\n logging.info(\n f\"NOTE: Beginning training w/o a development set! Autostopper deactivated!\"\n )\n\n self.network.train_model(dataset_size, train_iter, dev_iter)", "def train(self, train_set) -> None:\n super().train(train_set)\n # split into data and target\n xlist, y = zip(*train_set)\n x = sparse.vstack(xlist)\n self._classifier.fit(x, y)", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\t \n\t \n\t# might be useful in your code later...\n\t# this is a list of all features in the training set.\n\tself.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n\t\n\tif (self.automaticTuning):\n\t\tkgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n\telse:\n\t\tkgrid = [self.k]\n\t\t\n\tself.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)", "def train_on_raw_data(data_alice, data_bob):\n raw_test_data = pd.concat([data_alice[1], data_bob[1]])\n x = raw_test_data.drop('Class', axis=1)\n y = raw_test_data['Class']\n\n return fit_rf_classifier(x, y)", "def test_training(self):\n self.classifier.train(\"test\", self.message)", "def train(self, dataset, model_dir):\n raise NotImplementedError", "def train(self, training_data, chunk_size=100):\n # For some reason, for the SVM to work, the keys need to be in alphabetical order\n training_data = {k : training_data[k] for k in sorted(training_data)}\n\n # Compile all author texts into one large text to then be broken down\n for auth in training_data:\n training_data[auth] = '\\n\\n'.join(training_data[auth])\n\n self.auths = list(training_data.keys())\n self.chunk_size = chunk_size\n\n # Creates two lists, one of the texts and one of the corresponding author labels.\n labels = []\n texts = []\n for auth in training_data:\n lines = training_data[auth].split('\\n')\n for p in range( chunk_size, len(lines), chunk_size ):\n labels.append(auth) # authors per text in the training corpus\n texts.append('\\n'.join(lines[p-chunk_size : p])) # texts in the training corpus\n labels = array(labels)\n texts = array(texts)\n\n # Cleans the texts\n for i in range(len(texts)):\n texts[i] = self._clean(texts[i])\n\n # Generates the profiles from these tests\n profiles = zeros((len(texts), len(self.alph)**self.N))\n for i in range(len(texts)):\n profiles[i] = self._profile(texts[i])\n\n\n # Reduces the features and fits the model\n self.train_data = [profiles, labels]\n self._reduceFeatures()\n\n self.model = SVC(kernel='linear')\n self.model.probability = True\n self.model.fit(self.train_data[0], self.train_data[1])", "def train(self, trnM, trnL):\n print 'Training ...'\n self.clf.fit(trnM, trnL)", "def train(self, examples):\n # iterate over our sentences in the examples\n for sentence in examples:\n # some testing prints\n # print('---------------------------')\n # print(sentence)\n # print('---------------------------')\n # get every tuple in the sentence\n for i in range(len(sentence)):\n # seperate the word and the state\n word = sentence[i][0]\n state = sentence[i][1]\n # add our word and state to our set of all words and states\n self.vocab.add(word)\n self.states.add(state)\n # if we are at the first word in the sentence need to\n # increment the number of times this tag appeared first in a sentence\n if i == 0:\n self.pi[state] += 1\n # else we need to increment the number of times the\n # current tag was preceeded by the tag before it\n else:\n if sentence[i - 1][1] not in self.transitions:\n self.transitions[sentence[i - 1][1]] = Counter()\n self.transitions[sentence[i - 1][1]][state] += 1\n # now we increment the number of times the word had this tag\n if state not in self.emissions:\n self.emissions[state] = Counter()\n self.emissions[state][word] += 1\n # print(self.emissions)\n # print(self.transitions)\n # print(self.pi)\n # print('---------------------------')\n\n # now we store the counts we will need since during our iterations\n # the counts will change\n # this stores how many sentences we have\n # count(sentences)\n pi_val = sum(self.pi.values())\n # now we are going to get the counts of the tags\n # count(t_i)\n # we are using emissions because each tag occurs in it unlike\n # in transitions where the last tag is lost kind of\n for state in self.emissions.keys():\n # print(state, sum(self.emissions[state].values()))\n self.tag_count[state] = sum(self.emissions[state].values())\n # print('---------------------------')\n # now we do the probability of a sentence starting with each tag\n # count(t_i) / count(sentences)\n for state in self.pi:\n self.pi[state] /= pi_val\n # now we will calculate the probabilites that each tag proceeds the next tag\n # ie p(t_i | t_i-1) = count(t_i-1, t_i) / count(t_i-1)\n for prev_state in self.transitions:\n for state in self.transitions[prev_state]:\n # print(prev_state, state, self.transitions[prev_state][state])\n # print(prev_state, tag_count[prev_state])\n self.transitions[prev_state][state] /= self.tag_count[prev_state]\n # print(self.transitions[prev_state][state])\n # print('---------------------------')\n # and the probability of a word having the tag with laplace smoothing\n # p(w_i | t_i) = count(t_i, w_i) / count(t_i)\n for state in self.emissions:\n for word in self.emissions[state]:\n # print(state, word, self.emissions[state][word])\n # print(state, tag_count[state])\n self.emissions[state][word] = (self.emissions[state][word] + 1) / (\n self.tag_count[state] + len(self.vocab))\n # print(self.emissions[state][word])\n # print('---------------------------')\n # print(self.emissions)\n # print(self.transitions)\n # print(self.pi)\n # print('---------------------------')\n # print(len(self.vocab))\n # print(len(self.states))\n # print('---------------------------')", "def fit(self, tagged_sents):\n\n X = []\n y_true = []\n for sent in tagged_sents:\n #frase=list\n frase = [word[0] for word in sent]\n #print(sent)\n #for w in sent:\n # frase.append(w[0])\n for i in range(0,len(sent)):\n self._palabrasvistas.add(sent[i][0]) # como es set, si ya esta va a obviarla\n x = feature_dict(frase, i)\n X.append(x)\n y_true.append(sent[i][1])\n\n #print(X)\n #print(y_true)\n self.pipeline.fit(X, y_true)", "def get_train_data(self, train_data):\n X = []\n Y = []\n\n # word 2 indices and tag 2 indices\n w2i = {} # word to index\n c2i = {} # char to index\n tag2idx = {} # tag2idx\n\n w2i[\"_UNK\"] = 0 # unk word / OOV\n c2i[\"_UNK\"] = 0 # unk char\n c2i[\"<w>\"] = 1 # word start\n c2i[\"</w>\"] = 2 # word end index\n \n \n num_sentences=0\n num_tokens=0\n for instance_idx, (words, tags) in enumerate(read_conll_file(train_data)):\n instance_word_indices = [] #sequence of word indices\n instance_char_indices = [] #sequence of char indices\n instance_tags_indices = [] #sequence of tag indices\n\n for i, (word, tag) in enumerate(zip(words, tags)):\n\n # map words and tags to indices\n if word not in w2i:\n w2i[word] = len(w2i)\n instance_word_indices.append(w2i[word])\n\n if self.c_in_dim > 0:\n chars_of_word = [c2i[\"<w>\"]]\n for char in word:\n if char not in c2i:\n c2i[char] = len(c2i)\n chars_of_word.append(c2i[char])\n chars_of_word.append(c2i[\"</w>\"])\n instance_char_indices.append(chars_of_word)\n\n if tag not in tag2idx:\n tag2idx[tag]=len(tag2idx)\n\n instance_tags_indices.append(tag2idx.get(tag))\n\n num_tokens+=1\n\n num_sentences+=1\n\n X.append((instance_word_indices, instance_char_indices)) # list of word indices, for every word list of char indices\n Y.append(instance_tags_indices)\n\n\n print(\"%s sentences %s tokens\" % (num_sentences, num_tokens), file=sys.stderr)\n print(\"%s w features, %s c features \" % (len(w2i),len(c2i)), file=sys.stderr)\n if self.c_in_dim == 0:\n print(\"char features disabled\", file=sys.stderr)\n\n assert(len(X)==len(Y))\n\n # store mappings of words and tags to indices\n self.set_indices(w2i, c2i, tag2idx)\n\n return X, Y", "def train(parser):\n cli_args = add_all_args(parser, TRAINING)\n if not cli_args.train_tfrecord and not cli_args.valid_tfrecord:\n assert (\n cli_args.relative_labels or cli_args.xml_labels_folder\n ), 'No labels provided: specify --relative-labels or --xml-labels-folder'\n if cli_args.augmentation_preset:\n assert (\n preset := cli_args.augmentation_preset\n ) in AUGMENTATION_PRESETS, f'Invalid augmentation preset {preset}'\n trainer = Trainer(\n input_shape=cli_args.input_shape,\n model_configuration=cli_args.model_cfg,\n classes_file=cli_args.classes,\n train_tf_record=cli_args.train_tfrecord,\n valid_tf_record=cli_args.valid_tfrecord,\n max_boxes=cli_args.max_boxes,\n iou_threshold=cli_args.iou_threshold,\n score_threshold=cli_args.score_threshold,\n image_folder=cli_args.image_folder,\n )\n trainer.train(\n epochs=cli_args.epochs,\n batch_size=cli_args.batch_size,\n learning_rate=cli_args.learning_rate,\n new_dataset_conf={\n 'dataset_name': (d_name := cli_args.dataset_name),\n 'relative_labels': cli_args.relative_labels,\n 'test_size': cli_args.test_size,\n 'voc_conf': cli_args.voc_conf,\n 'augmentation': bool((preset := cli_args.augmentation_preset)),\n 'sequences': AUGMENTATION_PRESETS.get(preset),\n 'aug_workers': cli_args.workers,\n 'aug_batch_size': cli_args.process_batch_size,\n },\n dataset_name=d_name,\n weights=cli_args.weights,\n evaluate=cli_args.evaluate,\n merge_evaluation=cli_args.merge_evaluation,\n evaluation_workers=cli_args.workers,\n shuffle_buffer=cli_args.shuffle_buffer,\n min_overlaps=cli_args.min_overlaps,\n display_stats=cli_args.display_stats,\n plot_stats=cli_args.plot_stats,\n save_figs=cli_args.save_figs,\n clear_outputs=cli_args.clear_output,\n n_epoch_eval=cli_args.n_eval,\n )", "def train(self, train_X, train_y):\n if self.feat_sel:\n train_X = self.do_feat_sel(train_X, train_y)\n\n train_X, train_y = self.sample.fit_resample(train_X, train_y)\n self.clf.fit(train_X, train_y)", "def test_train(self, tmp_path, wordvec_pretrain_file):\n train_file = str(tmp_path / \"train.conllu\")\n dev_file = str(tmp_path / \"dev.conllu\")\n pred_file = str(tmp_path / \"pred.conllu\")\n\n with open(train_file, \"w\", encoding=\"utf-8\") as fout:\n fout.write(TRAIN_DATA)\n\n with open(dev_file, \"w\", encoding=\"utf-8\") as fout:\n fout.write(DEV_DATA)\n\n args = [\"--wordvec_pretrain_file\", wordvec_pretrain_file,\n \"--train_file\", train_file,\n \"--eval_file\", dev_file,\n \"--gold_file\", dev_file,\n \"--output_file\", pred_file,\n \"--log_step\", \"10\",\n \"--eval_interval\", \"20\",\n \"--max_steps\", \"100\",\n \"--shorthand\", \"en_test\",\n \"--lang\", \"en\"]\n tagger.main(args)", "def train(net, train_data,\n dev_data_filepath, dev_preds_filepath, model_save_path,\n para_selector, # TODO sort these nicely\n ps_threshold=0.1,\n ner_device=torch.device('cpu'), training_device=torch.device('cpu'),\n text_length=250,\n fb_passes=1, coefs=(0.5, 0.5),\n epochs=3, batch_size=1, learning_rate=1e-4,\n eval_interval=None, verbose_evaluation=False, timed=False):\n timer = utils.Timer()\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n\n flair.device = torch.device(ner_device)\n ner_tagger = flair.models.SequenceTagger.load('ner') # this hard-codes flair tagging!\n\n optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)\n\n losses = []\n real_batch_sizes = [] # some data points are not usable; this logs the real sizes\n graph_logging = [0, 0, 0] # [total nodes, total connections, number of graphs]\n point_usage = [0, 0] # [used points, unused points]\n dev_scores = []\n\n # Set the network into train mode\n net.train()\n net = net.to(training_device)\n\n timer(\"training_preparation\")\n\n print(\"Training...\")\n\n best_score = 0\n eval_interval = eval_interval if eval_interval else float('inf') # interval in batches\n a_model_was_saved_at_some_point = False\n\n for epoch in range(epochs):\n # TODO take recurrent times for forward, evaluation saving etc.\n print('Epoch %d/%d' % (epoch + 1, epochs))\n batch_counter = 0\n\n for step, batch in enumerate(tqdm(train_data, desc=\"Iteration\")):\n\n \"\"\" DATA PROCESSING \"\"\"\n ids = []\n queries = []\n contexts = []\n graphs = []\n\n useless_datapoint_inds = []\n\n for i, point in enumerate(batch):\n\n # make a list[ list[str, list[str]] ] for each point in the batch\n context = para_selector.make_context(point,\n threshold=ps_threshold,\n context_length=text_length) # TODO add device and numerated arguments\n graph = EntityGraph.EntityGraph(context,\n context_length=text_length,\n tagger=ner_tagger)\n if graph.graph:\n ids.append(point[0])\n queries.append(point[2])\n contexts.append(context)\n graphs.append(graph)\n graph_logging = [a+b # [total nodes, total connections, number of graphs]\n for a,b in zip(graph_logging, [len(graph.graph),\n len(graph.relation_triplets()),\n 1])]\n point_usage[0] += 1\n else: # if the NER in EntityGraph doesn't find entities, the datapoint is useless.\n useless_datapoint_inds.append(i)\n point_usage[1] += 1\n\n batch = [point for point in batch if point[0] in ids] # update the batch to exclude useless data points\n\n real_batch_sizes.append(batch_size - len(useless_datapoint_inds)) #TODO track the batch sizes!\n\n # if our batch is completely useless, just continue with the next batch. :(\n if len(useless_datapoint_inds) == batch_size:\n continue\n\n # turn the texts into tensors in order to put them on the GPU\n qc_ids = [net.encoder.token_ids(q, c) for q, c in zip(queries, contexts)] # list[ (list[int], list[int]) ]\n q_ids, c_ids = list(zip(*qc_ids)) # tuple(list[int]), tuple(list[int])\n q_ids_list = [torch.tensor(q) for q in q_ids] # list[Tensor] #TODO? maybe put this into forward()?\n c_ids_list = [torch.tensor(c) for c in c_ids] # list[Tensor]\n\n \"\"\" MAKE TRAINING LABELS \"\"\"\n # replace the paragraphs in raw_point with their shortened versions (obtained from PS)\n for (i, p), c in zip(enumerate(batch), contexts):\n batch[i][3] = c\n\n # TODO? change utils.make_labeled_data_for_predictor() to process batches of data?\n labels = [utils.make_labeled_data_for_predictor(g,p,tokenizer) for g,p in zip(graphs, batch)] # list[(support, start, end, type)]\n # list[(Tensor, Tensor, Tensor, Tensor)] -> tuple(Tensor), tuple(Tensor), tuple(Tensor), tuple(Tensor)\n sup_labels, start_labels, end_labels, type_labels = list(zip(*labels))\n # print(f\"in train_dfgn.train(): shapes of labels:\\n{len(sup_labels)}, {len(start_labels)}, {len(end_labels)}, {len(type_labels)}\") #CLEANUP\n\n q_ids_list = [t.to(training_device) if t is not None else None for t in q_ids_list]\n c_ids_list = [t.to(training_device) if t is not None else None for t in c_ids_list]\n for i, g in enumerate(graphs):\n graphs[i].M = g.M.to(training_device) # work with enumerate to actually mutate the graph objects\n\n sup_labels = torch.stack(sup_labels).to(training_device) # (batch, M)\n start_labels = torch.stack(start_labels).to(training_device) # (batch, 1)\n end_labels = torch.stack(end_labels).to(training_device) # (batch, 1)\n type_labels = torch.stack(type_labels).to(training_device) # (batch)\n\n \"\"\" FORWARD PASSES \"\"\"\n optimizer.zero_grad()\n\n sups, starts, ends, types = [], [], [], []\n for query, context, graph in zip(q_ids_list, c_ids_list, graphs): # 'graph' is not a tensor -> for-loop instead of batch processing\n\n o_sup, o_start, o_end, o_type = net(query, context, graph, fb_passes=fb_passes) # (M, 2), (M), (M), (1, 3)\n sups.append(o_sup)\n starts.append(o_start)\n ends.append(o_end)\n types.append(o_type)\n\n sups = torch.stack(sups) # (batch, M, 2)\n starts = torch.stack(starts) # (batch, 1, M)\n ends = torch.stack(ends) # (batch, 1, M)\n types = torch.stack(types) # (batch, 1, 3)\n\n \"\"\" LOSSES & BACKPROP \"\"\"\n weights = torch.ones(2, device=training_device) #TODO maybe extract this to a tiny function?\n sup_label_batch = sup_labels.view(-1)\n weights[0] = sum(sup_label_batch)/float(sup_label_batch.shape[0])\n weights[1] -= weights[0] # assign the opposite weight\n\n sup_criterion = torch.nn.CrossEntropyLoss(weight=weights)\n criterion = torch.nn.CrossEntropyLoss() # for prediction of answer type\n\n # use .view(-1,...) to put points together (this is like summing the points' losses)\n sup_loss = sup_criterion(sups.view(-1,2), sup_label_batch) # (batch*M, 2), (batch*M)\n start_loss = sum([criterion(starts[i], start_labels[i]) for i in range(start_labels.shape[0])]) # batch * ( (1, M, 1), (1) )\n end_loss = sum([criterion(ends[i], end_labels[i]) for i in range(end_labels.shape[0])]) # batch * ( (1, M, 1), (1) )\n type_loss = criterion(types.view(-1,3), type_labels.view(-1)) # (batch, 1, 3), (batch, 1)\n\n # This doesn't have the weak supervision BFS mask stuff from section 3.5 of the paper\n # TODO? maybe start training with start/end loss only first, then train another model on all 4 losses?\n loss = start_loss + end_loss + coefs[0]*sup_loss + coefs[1]*type_loss # formula 15\n\n loss.backward(retain_graph=True)\n losses.append( (loss.item(),\n sup_loss.item(),\n start_loss.item(),\n end_loss.item(),\n type_loss.item())) # for logging purposes\n\n batch_counter += 1\n # Evaluate on validation set after some iterations\n if batch_counter % eval_interval == 0:\n\n # this calls the official evaluation script (altered to return metrics)\n metrics = evaluate(net, #TODO make this prettier\n tokenizer, ner_tagger,\n training_device, dev_data_filepath, dev_preds_filepath,\n fb_passes = fb_passes,\n text_length = text_length,\n verbose=verbose_evaluation)\n score = metrics[\"joint_f1\"]\n dev_scores.append(metrics) # appends the whole dict of metrics\n if score >= best_score:\n print(f\"Better eval found with accuracy {round(score, 3)} (+{round(score - best_score, 3)})\")\n best_score = score\n\n torch.save(net, model_save_path) #TODO make sure that this works (maybe, should we save each of the 3 parts indvidually?)\n a_model_was_saved_at_some_point = True\n else:\n print(f\"No improvement yet...\")\n timer(f\"training_evaluation_{batch_counter/eval_interval}\")\n\n optimizer.step()\n timer(f\"training_epoch_{epoch}\")\n\n #========= END OF TRAINING =============#\n metrics = evaluate(net, # TODO make this prettier\n tokenizer, ner_tagger,\n training_device, dev_data_filepath, dev_preds_filepath,\n fb_passes=fb_passes,\n text_length=text_length,\n verbose=verbose_evaluation)\n score = metrics[\"joint_f1\"]\n dev_scores.append(metrics) # appends the whole dict of metrics\n if score >= best_score:\n torch.save(net,\n model_save_path)\n\n if not a_model_was_saved_at_some_point: # make sure that there is a model file\n print(f\"saving model to {model_save_path}...\")\n torch.save(net, model_save_path)\n\n losses_with_batchsizes = [(b, t[0], t[1], t[2], t[3], t[4]) for b,t in zip(real_batch_sizes, losses)]\n\n if timed:\n return losses_with_batchsizes, dev_scores, graph_logging, point_usage, timer\n else:\n return losses_with_batchsizes, dev_scores, graph_logging, point_usage", "def train(self) -> Any:\n pass", "def train(self):\n for doc, label in zip(self.train_docs(), self.train_labels()):\n yield doc, label", "def train():\n import trace\n trace.train()", "def train(self):\n logging.info(\"Training DINTModel.\")\n start = time.time()\n tr = self.classifier.train()\n return time.time() - start", "def train(self, trainingData, trainingLabels, testData, testLabels, validate): \n\t\t \n\t\tself.features = trainingData[0].keys() # this could be useful for your code later...\n\n\t\tif (self.automaticTuning):\n\t\t\tCgrid = [0.001, 0.002, 0.003, 0.004, 0.005]\n\t\telse:\n\t\t\tCgrid = [self.C]\n\t\t\t\n\t\treturn self.trainAndTune(trainingData, trainingLabels, testData, testLabels, Cgrid, validate)", "def train(self, data):\n \n logger('[.] Training with whole dataset ...')\n \n datalist = self.unpack_data(data)\n self.knn_model.fit(datatuple['features'], datatuple['labels'])", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def train(self, trainfile):\r\n sentences_emb,labels=self.read_data(trainfile)\r\n logReg = LogisticRegression(penalty=\"l2\",C = 10, multi_class='auto',solver='newton-cg')\r\n logReg.fit(sentences_emb,labels)\r\n self.clf=logReg", "def training(training_data, iterations):\n nlp = spacy.blank('en') # create blank Language class\n # create the built-in pipeline components and add them to the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner, last=True)\n\n # add labels\n for _, annotations in training_data:\n for ent in annotations.get('entities'):\n ner.add_label(ent[2])\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']\n with nlp.disable_pipes(*other_pipes): # only train NER\n optimizer = nlp.begin_training()\n for itn in range(iterations):\n print(\"Starting iteration \" + str(itn))\n random.shuffle(TRAIN_DATA)\n losses = {}\n for text, annotations in training_data:\n nlp.update(\n [text], # batch of texts\n [annotations], # batch of annotations\n drop=0.2, # dropout - make it harder to memorise data\n sgd=optimizer, # callable to update weights\n losses=losses)\n print(losses)\n return nlp", "def train(self,\n max_epochs = 10, # number of max possible training iterations\n min_count = 5, # min frequency of usage to enter vocab\n vec_size = 100, # size of feature vectors\n max_alpha = 0.025, # starting learning rate\n min_alpha = 0.00025, # lowest learning rate\n save_name = None):\n\n if not self.tagged_docs and not (self.paperdf and self.authordf):\n print('no data to train.')\n return\n\n self.model.epochs = max_epochs\n self.model.vocabulary.min_count = min_count\n self.model.vector_size = vec_size\n self.model.alpha = max_alpha\n self.model.min_alpha = min_alpha\n\n print('Training model.')\n print('Building Vocabulary.')\n self.model.build_vocab(self.tagged_docs)\n\n print('Training for', max_epochs, 'epochs.')\n self.epoch_logger = EpochLogger()\n self.model.train(self.tagged_docs, total_examples = self.model.corpus_count,\n epochs = self.model.epochs, callbacks = [self.epoch_logger])\n print(\"Finished in {} seconds.\".format(round(time.time() - self.epoch_logger.start_time, 3)))\n\n if save_name:\n filename = str(save_name) + '.model'\n self.model.save(filename)\n print(\"Model Saved as\", filename)\n\n # self._compute_util_data()", "def train():\n with tf.Graph().as_default():\n global_step = tf.contrib.framework.get_or_create_global_step()\n\n # Get images and labels for ocr.\n print(\"Preparing input\")\n # with tf.device('/cpu:0'):\n images, labels, seq_lengths = ocr.distorted_inputs()\n\n # Build a Graph that computes the logits predictions from the\n # inference model.\n print(\"Building graph\")\n logits, timesteps = ocr.inference(images, FLAGS.batch_size, train=True)\n\n # Calculate loss.\n print(\"Creating loss\") \n loss = ocr.create_ctc_loss(logits, labels, timesteps, seq_lengths)\n\n print(\"Creating LER\")\n ler = ocr.create_label_error_rate(logits, labels, timesteps)\n\n print(\"Creating decoder\")\n decoded = ocr.check_decoder(logits, labels, timesteps)\n\n # Build a Graph that trains the model with one batch of examples and\n # updates the model parameters.\n print(\"Creating train OP\")\n train_op, lr = ocr.train_simple(loss, global_step)\n\n print(\"Creating init OP\")\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n\n sess = tf.Session()\n\n sess.run(init_op)\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n \n train_writer = tf.summary.FileWriter(FLAGS.train_dir,\n sess.graph)\n saver = tf.train.Saver()\n summary_op = tf.summary.merge_all()\n\n print(\"Starting training\")\n print_every_n = 1000\n start_time = time.time()\n mean_ler = 0\n while not coord.should_stop():\n try: \n _, loss_res, lr_res, ler_res, summary_op_result, global_step_result, decoded_res = sess.run([train_op, loss, lr, ler, summary_op, global_step, decoded])\n mean_ler += ler_res\n if global_step_result % print_every_n == 0 or global_step_result == 1:\n mean_steps_time = (time.time() - start_time) / print_every_n\n mean_ler = mean_ler / print_every_n\n status_string = \"Step: {} Loss: {:.4f} LR: {:.6f} LER: {:.4f} Step time: {:.3f} sec\"\n print(status_string.format(global_step_result, loss_res, lr_res, ler_res, mean_steps_time)) \n # print(\"Decoded:\")\n # print(str(decoded_res))\n # print(\"Timesteps:\" + str(timesteps_res))\n train_writer.add_summary(summary_op_result, global_step=global_step_result)\n saver.save(sess, os.path.join(FLAGS.train_dir, 'checkpoint'), global_step=global_step)\n start_time = time.time()\n mean_ler = 0\n\n # images_res = sess.run(images)\n # print(images_res) \n # for img in images_res:\n # cv2.imshow(\"img\", img)\n # cv2.waitKey(0)\n except Exception as e:\n print(e)\n coord.request_stop(e)\n\n # class _LoggerHook(tf.train.SessionRunHook):\n # \"\"\"Logs loss and runtime.\"\"\"\n #\n # def begin(self):\n # self._step = -1\n #\n # def before_run(self, run_context):\n # self._step += 1\n # self._start_time = time.time()\n # return tf.train.SessionRunArgs(loss) # Asks for loss value.\n #\n # def after_run(self, run_context, run_values):\n # duration = time.time() - self._start_time\n # loss_value = run_values.results\n # if self._step % 10 == 0:\n # num_examples_per_step = FLAGS.batch_size\n # examples_per_sec = num_examples_per_step / duration\n # sec_per_batch = float(duration)\n #\n # format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '\n # 'sec/batch)')\n # print (format_str % (datetime.now(), self._step, loss_value,\n # examples_per_sec, sec_per_batch))\n #\n # with tf.train.MonitoredTrainingSession(\n # checkpoint_dir=FLAGS.train_dir,\n # hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),\n # tf.train.NanTensorHook(loss),\n # _LoggerHook()],\n # config=tf.ConfigProto(\n # log_device_placement=FLAGS.log_device_placement)) as mon_sess:\n # while not mon_sess.should_stop():\n # print(\"Running session\")\n # mon_sess.run(train_op)" ]
[ "0.70876074", "0.7059196", "0.6855058", "0.6802832", "0.6785829", "0.67681587", "0.67512184", "0.6747457", "0.67143035", "0.6711", "0.66792077", "0.66118145", "0.660746", "0.66058165", "0.66058165", "0.66058165", "0.66058165", "0.66058165", "0.65927374", "0.6588397", "0.6588397", "0.65572727", "0.65565", "0.6553021", "0.6540194", "0.653238", "0.6488329", "0.6433304", "0.6431215", "0.64251614", "0.6423196", "0.6406166", "0.63679653", "0.6364063", "0.63505983", "0.63323724", "0.63238174", "0.63214165", "0.6320146", "0.6317543", "0.6316558", "0.6316032", "0.631359", "0.6298805", "0.6297476", "0.6288075", "0.62797785", "0.62766653", "0.62686515", "0.62591964", "0.6244007", "0.62412053", "0.62411326", "0.62368935", "0.6233027", "0.6226435", "0.6222583", "0.6211171", "0.6209719", "0.6198141", "0.6198105", "0.61739886", "0.6161104", "0.61602926", "0.61467594", "0.6145638", "0.6141645", "0.6138913", "0.61322284", "0.61284083", "0.6125092", "0.6124019", "0.61157876", "0.61144286", "0.6109559", "0.6105832", "0.6099658", "0.6095639", "0.6086422", "0.60822684", "0.60806173", "0.6060221", "0.6042255", "0.60291356", "0.6027701", "0.6027674", "0.6024853", "0.60233027", "0.6022177", "0.6020482", "0.601706", "0.6014703", "0.60121536", "0.6011741", "0.6010072", "0.60087764", "0.60084796", "0.59967995", "0.59934735", "0.5993347" ]
0.6713298
9
Excludes where learners played against learners
def get_reward_lists(f): with open(f, 'r',newline='') as f: lines = f.readlines() return lines
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exclude_words(self, words):\n idcs = []\n for i in range(len(self)):\n if not self.transcript(i) in words:\n idcs.append(i)\n subset = self.sub_set(idcs)\n return subset", "def prune_teachers(self):\n self.teacher_policies = self.teacher_policies[: self.num_teachers]\n self.teacher_envs = self.teacher_envs[: self.num_teachers]\n self.teacher_expl_strats = self.teacher_expl_strats[: self.num_teachers]\n self.teacher_critics = self.teacher_critics[: self.num_teachers]\n self.teacher_ex_dirs = self.teacher_ex_dirs[: self.num_teachers]", "def _filter_unanswerable_samples(self):\n a = []\n q = []\n annotations = []\n for i in range(len(self.answers)):\n if len(self.answers[i].nonzero()) > 0:\n a.append(self.answers[i])\n q.append(self.questions[i])\n\n annotations.append(self.annotations[i])\n self.answers = a\n self.questions = q\n self.annotations = annotations", "def exclude(self, *args, **kwargs):", "def test_remove_learner_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_REMOVE_LEARNER, self.learner_groups[1]))", "def _remove_experts(self):\n self.experts = [ex for ex in self.experts if np.mean(\n ex.weight) >= self.theta]", "def anti_bot(self, message):\n msg_list = self.ts.get_human_readable_message(message).lower().split(' ')\n bot_creation_date = self._get_creation_date(msg_list[1])\n viewers = self.ts.fetch_chatters_from_API()['viewers']\n mod_list = self.ts.get_mods()\n with codecs.open('whitelist.json', 'r', 'utf-8') as f:\n whitelist = json.load(f)\n for viewer in viewers:\n if self._get_creation_date(viewer) == bot_creation_date and viewer not in whitelist:\n self.ts.send_message('/ban {}'.format(viewer))\n mod_str = ', '.join(mod_list)\n self._add_to_whisper_queue(viewer, 'We\\'re currently experiencing a bot attack. If you\\'re a human and were accidentally banned, please whisper a mod: {}'.format(mod_str))", "def cleanOrphanedLearners(self):\n\n # Before deleting Learners, ensure that if any Learners that are about to be\n # deleted point to a Team as their action, then that Team's count of\n # referincing Learners is decremented.\n for learner in self.learner_pop:\n if learner.getNumReferencingTeams() == 0 and not learner.isActionAtomic():\n learner.action.decrementNumReferencingLearners()\n\n # Remove all orphaned Learners from the Learner population\n self.learner_pop = [l for l in self.learner_pop if not l.getNumReferencingTeams() == 0]", "def exclude_nodes(self, nodes):", "def primers_are_useless(self):\r\n #TODO: send a message telling these primers can be taken out.\r\n for feature in self.gt_seq_region:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")\r\n for feature in self.pcr_product:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")\r\n for feature in self.forward_primer:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")\r\n for feature in self.reverse_primer:\r\n if feature.attributes.active:\r\n feature.attributes.disable_feature(\"has no interesting sequence variation\")", "def generate_exclusions(proteins):\n pass", "def exclude_other_class_items(_items, class_name):\n\n class_skills = class_skill_names(class_name)\n other_skill_names = list(set(all_class_skill_names()) - set(class_skills)) + class_attributes(Classes.DEMON_HUNTER)\n\n def match_invert_skills(item):\n \"\"\" filter items based on if they match a class skill \"\"\"\n text = item.text\n\n if any([skill in text for skill in other_skill_names]):\n if any([skill in text for skill in class_skills]): # double check\n print('found a wizard skill', [skill for skill in class_skills if skill in text])\n print(item)\n return True\n return False\n return True\n\n return list(filter(match_invert_skills, _items))\n\n # def match_invert_skills(_item):\n # \"\"\" filter items based on if they match a class skill \"\"\"\n # text = _item.text\n #\n # if any([skill in text for skill in other_skill_names]):\n #\n # if any([skill in text for skill in class_skills]): # double check\n # print('found aa wizard skill', [skill for skill in class_skills if skill in text])\n # print(_item)\n # return True\n # return False\n #\n # print('lolll')\n # return True\n #\n # print(other_skill_names)\n # to_return = []\n # for item in _items:\n # if match_invert_skills(item):\n # to_return.append(item)\n #\n #\n # return to_return", "def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbrosters.difference(rosters)\n return notactive", "def prune(self):\n target_user_ids = self.get_queryset().values_list('id', flat=True)\n exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(),\n drip=self.drip_model,\n user__id__in=target_user_ids)\\\n .values_list('user_id', flat=True)\n self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids)", "def test_remove_learner_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_REMOVE_LEARNER, self.learner_groups[1]))", "def prune_losers(self):\n self.log.debug(\"PRUNE LOSERS\")\n # check to see if people i followed follow me back\n cutoff_time = (datetime.now()\n - timedelta(hours=self.reciprocation_window))\n ingrates = Target.objects.filter(\n hunter=self.user, status=Target.PURGATORY,\n modified__lt=cutoff_time) # They didn't follow back in time\n\n for ingrate in ingrates:\n ingrate.status = Target.INGRATE\n ingrate.save()\n self.log.debug(\" => Unfollowed %s\" % ingrate.hunted.screen_name)\n try:\n self.api.destroy_friendship(ingrate.hunted)\n except Exception, e:\n print e\n return\n finally:\n pass\n #self.contact(ingrate)", "def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n study2 = factories.StudyFactory.create()\n datasets2 = factories.SourceDatasetFactory.create_batch(\n 5, source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only datasets from the correct study are found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that the other study's datasets do not show up.\n self.assertEqual(len(returned_pks), len(self.source_datasets))\n for dataset in datasets2:\n self.assertNotIn(dataset.i_id, returned_pks)\n for dataset in self.source_datasets:\n self.assertIn(dataset.i_id, returned_pks)", "def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n study2 = factories.StudyFactory.create()\n datasets2 = factories.SourceDatasetFactory.create_batch(\n 5, source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only datasets from the correct study are found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that the other study's datasets do not show up.\n self.assertEqual(len(returned_pks), len(self.source_datasets))\n for dataset in datasets2:\n self.assertNotIn(dataset.i_id, returned_pks)\n for dataset in self.source_datasets:\n self.assertIn(dataset.i_id, returned_pks)", "def get_all_game_players_but_indicated(self, user):\n return GamePlayer.objects.filter(Q(game=self) & ~Q(player=user))", "def exclude_list(self):\n pass", "def avoids(w, forbidden):\n\treturn set(w).isdisjoint(set(forbidden))", "def excluded(cls):\n return []", "async def test_subjects_to_ignore_by_name(self):\n self.set_source_parameter(\"subjects_to_ignore\", [\"S2\"])\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(response, value=str(int(len(self.entities) / 2)), total=self.expected_software_metrics)", "def test_other_study_not_in_queryset(self):\n # Delete all but five source datasets, so that there are 5 from each study.\n study2 = factories.StudyFactory.create()\n datasets2 = factories.SourceDatasetFactory.create_batch(\n 5, source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only datasets from the correct study are found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that the other study's datasets do not show up.\n self.assertEqual(len(returned_pks), len(self.source_datasets))\n for dataset in datasets2:\n self.assertNotIn(dataset.i_id, returned_pks)\n for dataset in self.source_datasets:\n self.assertIn(dataset.i_id, returned_pks)", "def remove_ignore_list(book_list):\n my_ignore_list = ['a','able','about','across','after','all','almost','also',\n 'am','an','and','any','are','as','at','be','because',\n 'been','but','by','can','cannot','could','did','do','does',\n 'for','from','get','got','had','has','have','he','her','hers',\n 'him','his','how','however','i','if','in','into','is']\n\n book_list = filter(lambda book_list: book_list not in my_ignore_list, book_list)\n\n return book_list", "def get_excluded_videos():\n excluded_list = open('exclude.txt', 'r')\n return set([line.strip() for line in excluded_list.readlines()])", "def available_sets(session, player):\n excluded_sets = set(session.taken.keys())\n for grouping in session.exclusives:\n if player.sets.intersection(grouping):\n excluded_sets.update(grouping)\n return [s for s in session.sets if s not in excluded_sets]", "def unknown(self, w):\n return w not in self._vocab", "def strip_ds(ds):\n if 'brain' in np.unique(ds.sa.all_ROIs):\n ds = ds[(ds.sa.all_ROIs != 'brain'), :]\n print('excluded the rest of the brain from the dataset')\n if 'overlap' in np.unique(ds.sa.all_ROIs):\n ds = ds[(ds.sa.all_ROIs != 'overlap'), :]\n print('excluded overlap from the dataset')\n return ds", "def no_filter(blast_subject_entry):\r\n return True", "def exclude(self, *args, **kwargs):\n return self.filter(~F(*args, **kwargs))", "def remove_false_positives(headlines,exclusions):\r\n for headline in headlines:\r\n for word in exclusions:\r\n if headline.lower().find(word) != -1: #If headline contains exclusionary word.\r\n headlines.remove(headline)\r\n break\r\n return headlines", "def remove_speaker_and_first_u(words, losses, speakers):\n first_u = []\n clean_words = []\n clean_losses = []\n for word, loss in zip(words, losses):\n if word in speakers and (loss == 100 or loss == -100):\n continue\n if word == \":\" and (loss == 100 or loss == -100):\n continue\n if loss == 100:\n first_u.append(word)\n continue\n\n clean_words.append(word)\n clean_losses.append(loss)\n return clean_words, clean_losses, first_u", "async def test_skipped_already_unsilenced(self):\n self.cog.scheduler.__contains__.return_value = False\n self.cog.previous_overwrites.get.return_value = None\n\n for channel in (MockVoiceChannel(), MockTextChannel()):\n with self.subTest(channel=channel):\n self.assertFalse(await self.cog._unsilence(channel))\n channel.set_permissions.assert_not_called()", "def drop_irrelevant_practices(df):\n\n is_relevant = df.groupby(\"practice\").value.any()\n return df[df.practice.isin(is_relevant[is_relevant == True].index)]", "def flag_epoch_nonwearing(self, wearing_col: str):\n for wearable in self.wearables.values():\n if wearing_col not in wearable.data.keys():\n raise KeyError(\n \"Column %s not found for wearable (pid=%s). Did you forget to run ``NonWearingDetector.detect_non_wear(...)``?\" % (\n wearing_col, wearable.get_pid()))\n\n wearable.data.loc[wearable.data[wearing_col] == False, self.invalid_col] |= InvCode.FLAG_EPOCH_NON_WEARING", "def exclude_auditor_emails(emails):\n acl = all_models.AccessControlList\n acr = all_models.AccessControlRole\n acp = all_models.AccessControlPerson\n\n if not isinstance(emails, set):\n emails = set(emails)\n\n auditor_emails = db.session.query(\n all_models.Person.email\n ).join(\n acp\n ).join(\n acl\n ).join(\n acr\n ).filter(\n acr.name == \"Auditors\",\n all_models.Person.email.in_(emails)\n ).distinct().all()\n\n emails_to_exlude = {line.email for line in auditor_emails}\n return emails - emails_to_exlude", "def reject_fairness(experiment: List[bool]) -> bool:\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 469 or num_heads > 531", "def skip_experiment(conf):\n return (\n (conf.dataset == 'rfw' and conf.feature == 'arcface')\n or (conf.dataset == 'bfw' and conf.feature == 'facenet')\n )", "def excludeFromDumpByName(self, nameList):\n pass", "def retrieveIgnoredWords(self):\n words = self.con.getIgnoredWords()\n guilds = self.con.getGuildsInfo()\n\n for item in guilds:\n self.ignored[item.split(',')[0]] = []\n\n for word in words:\n data = word.split(',')\n self.ignored[data[0]].append(data[1])", "def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits))\n for trait in source_traits2:\n self.assertNotIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)", "def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits))\n for trait in source_traits2:\n self.assertNotIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)", "def test_filter_remove(self):\n words = ['cart', 'fate', 'date', 'daft']\n filtered = filter_scores(score_words(words), 'fate', -1)\n self.assertEqual([(8, 'daft'), (7, 'date'), (7, 'cart')], filtered)", "def words_uses_only(letters):\n\treturn {w for w in word_set if uses_only(w, letters)}", "def skip(self):\r\n self.owning_letters=list()\r\n for _ in range(7):\r\n self.owning_letters.append(random.choice(self.letter))\r\n return self.owning_letters", "def _get_private_team_ids_to_exclude(self, course_module):\n if has_access(self.request.user, 'staff', course_module.id):\n return set()\n\n private_teamset_ids = [ts.teamset_id for ts in course_module.teamsets if ts.is_private_managed]\n excluded_team_ids = CourseTeam.objects.filter(\n course_id=course_module.id,\n topic_id__in=private_teamset_ids\n ).exclude(\n membership__user=self.request.user\n ).values_list('team_id', flat=True)\n return set(excluded_team_ids)", "def losses(self):\n return [g for g in self.games if g.winner is not self.team]", "def test_negate_tips_to_keep(self):\r\n t = DndParser(\"((S5:0.00014,S7:0.00015)0.752:0.45762,(S3:0.00014,\"\r\n \"seq6:0.00014)0.180:0.00015,(Seq1:0.00014,s2:0.00014)0.528:1.0466);\")\r\n\r\n tips_to_keep = [\"S5\", \"Seq1\", \"s2\"]\r\n expected = [\"S7\", \"S3\", \"seq6\"]\r\n self.assertItemsEqual(negate_tips_to_keep(tips_to_keep, t), expected)\r\n\r\n tips_to_keep = [\"S5\", \"Seq1\"]\r\n expected = [\"S7\", \"S3\", \"seq6\", \"s2\"]\r\n self.assertItemsEqual(negate_tips_to_keep(tips_to_keep, t), expected)\r\n\r\n tips_to_keep = []\r\n expected = [\"S7\", \"S3\", \"seq6\", \"s2\", \"S5\", \"Seq1\"]\r\n self.assertItemsEqual(negate_tips_to_keep(tips_to_keep, t), expected)\r\n\r\n tips_to_keep = [\"S7\", \"S3\", \"seq6\", \"s2\", \"S5\", \"Seq1\"]\r\n expected = []\r\n self.assertItemsEqual(negate_tips_to_keep(tips_to_keep, t), expected)", "def ignores(self):\n return self._ignores", "def exclude(self):\n\n self.eod.value = 0\n self.public.value = 0", "def remove_noise(text):\n\n text = text.split()\n word = [word for word in text if word not in [\n 'pertain',\n 'estimate',\n 'link',\n 'and',\n 'more',\n 'fetch',\n 'be',\n 'there',\n 'do',\n 'you',\n 'have',\n 'any',\n 'is',\n 'my',\n 'on',\n 'can',\n 'i',\n 'get',\n 'some',\n 'am',\n 'look',\n 'for',\n 'the',\n 'to',\n 'share',\n 'me',\n 'of',\n 'please',\n 'a',\n 'very',\n 'at',\n 'with',\n 'relate',\n 'sorry'\n ]]\n return ' '.join(word)", "def negated(input_words, include_nt=True):\n input_words = [str(w).lower() for w in input_words]\n neg_words = []\n neg_words.extend(NEGATE)\n for word in neg_words:\n if word in input_words:\n return True\n if include_nt:\n for word in input_words:\n if \"n't\" in word:\n return True\n if \"least\" in input_words:\n i = input_words.index(\"least\")\n if i > 0 and input_words[i - 1] != \"at\":\n return True\n return False", "def test_other_study_not_in_queryset(self):\n # Delete all source traits and make 5 new ones, so there are only 5 for study 1.\n models.SourceTrait.objects.all().delete()\n self.source_traits = factories.SourceTraitFactory.create_batch(5, source_dataset=self.source_dataset)\n study2 = factories.StudyFactory.create()\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits))\n for trait in source_traits2:\n self.assertNotIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)", "def is_ignored(self):", "def uses_only(w, letters):\n\treturn set(w).issubset(set(letters))", "def reject_fairness(experiment):\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 469 or num_heads > 531", "def reject_fairness(experiment):\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 469 or num_heads > 531", "def reject_fairness(experiment):\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 469 or num_heads > 531", "def test_student_id_exclude(self, db, course_dir):\n run_nbgrader([\"db\", \"assignment\", \"add\", \"ps1\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"foo\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"bar\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"baz\", \"--db\", db])\n self._copy_file(join(\"files\", \"submitted-unchanged.ipynb\"), join(course_dir, \"source\", \"ps1\", \"p1.ipynb\"))\n run_nbgrader([\"assign\", \"ps1\", \"--db\", db])\n\n for student in [\"foo\", \"bar\", \"baz\"]:\n self._copy_file(join(\"files\", \"submitted-unchanged.ipynb\"), join(course_dir, \"submitted\", student, \"ps1\", \"p1.ipynb\"))\n run_nbgrader([\"autograde\", \"ps1\", \"--db\", db])\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--db\", db, \"--CourseDirectory.student_id_exclude=bar,baz\"])\n\n for student in [\"foo\", \"bar\", \"baz\"]:\n assert exists(join(course_dir, \"autograded\", \"foo\", \"ps1\", \"p1.ipynb\"))\n\n assert exists(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"p1.html\"))\n assert not exists(join(course_dir, \"feedback\", \"bar\", \"ps1\", \"p1.html\"))\n assert not exists(join(course_dir, \"feedback\", \"baz\", \"ps1\", \"p1.html\"))", "def reject_fairness(experiment):\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 468 or num_heads > 531", "def negate_tips_to_keep(tips_to_keep, tree):\r\n tips_to_keep = set(tips_to_keep)\r\n # trees can return node names in ways that have multiple quotes, e.g.\r\n # '\"node_1\"' or ''node_1''. remove them or it can cause problems with\r\n # tips_to_keep not matching\r\n tmp_tips = set([tip.Name for tip in tree.tips()])\r\n tips = set([t.strip('\\'').strip('\\\"') for t in tmp_tips])\r\n return tips - tips_to_keep", "def nontrainable_weights(self):\n return list(filter(lambda x: not x.requires_grad, self.get_parameters(expand=True)))", "def test_negation():\n char1 = Character(court=['winter'])\n char2 = Character()\n char3 = Character(court=['summer'])\n res = npc.commands.find_characters([\"court~:winter\"], [char1, char2, char3])\n assert char1 not in res\n assert char2 in res\n assert char3 in res", "def negative_sampling(self):\n \n self.train_arr = []\n sample_list = np.random.choice(list(range(self.item_count)), size = 10 * len(self.interactions) * self.num_ns)\n \n sample_idx = 0\n for user, pos_item, _ in self.interactions:\n ns_count = 0\n \n while True:\n neg_item = sample_list[sample_idx]\n if not is_visited(self.rating_mat, user, neg_item):\n self.train_arr.append((user, pos_item, neg_item))\n sample_idx += 1\n ns_count += 1\n if ns_count == self.num_ns:\n break\n \n sample_idx += 1", "def still_deciding(self):\n for player in self.players:\n if isinstance(player, user.User):\n if not player.has_played:\n return True\n return False", "def negations(self) -> str:", "def helper(reviewer: Any, graph: Graph) -> set:\n reviewers_so_far = set()\n\n for movie in graph.get_neighbours(reviewer):\n for user in graph.get_neighbours(movie):\n if graph.get_weight(user, movie) >= 8:\n reviewers_so_far.add(user)\n return reviewers_so_far", "def testWithoutNoise(self):\n self.checkMatching(self.references)", "def _exclude_visitor():\n return _is_robot() or _is_ignored_ip_address()", "async def test_subjects_to_ignore_by_uuid(self):\n first_subject_uuid = first(first(self.reports[\"reports\"])[\"subjects\"].keys())\n self.set_source_parameter(\"subjects_to_ignore\", [first_subject_uuid])\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(response, value=str(int(len(self.entities) / 2)), total=self.expected_software_metrics)", "def naked_twins(values):\n\n # Find all instances of naked twins\n # Eliminate the naked twins as possibilities for their peers", "def filter_unknown_bases(self):\n self.failed[\"unknowns\"] = self.stats.index[\n self.stats[\"unknowns\"] > self.tolerance[\"unknowns\"]\n ]\n self.passed = self.stats.drop(self.failed[\"unknowns\"])", "def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), models.SourceTrait.objects.all().count())\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)", "def negative_sampling(word_input, target, unigrams_table, neg_examples_size=5):\n negative_examples = []\n while len(negative_examples) is not neg_examples_size:\n neg_sample = np.random.choice(unigrams_table)\n # Make sure that the negative example is not the same as the training or as the target.\n # This will block if there only is one value within the unigram table\n if (neg_sample != word_input) and (neg_sample != target):negative_examples.append(neg_sample)\n else:pass\n return negative_examples", "def filter_dataset(self):\n articles = list(self.data.keys())\n for article in articles:\n if self.levels[0] not in self.data[article].keys() or \\\n self.levels[1] not in self.data[article].keys():\n del self.data[article]\n continue\n for level in self.data[article].keys():\n self.data[article][level] = [sent for sent in self.data[article][level]\n if len(sent[\"strings\"]) <= self.max_words]\n if len(self.data[article][level]) > self.max_sents:\n random.shuffle(self.data[article][level])\n self.data[article][level] = self.data[article][level][:self.max_sents]", "def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)", "def exclude(self, *q, **kwargs):\n return self._filter_or_exclude(*q, _inverse=True, **kwargs)", "def filter(q_words):\n filtered_words = [\"how\",\"what\"]\n for word in q_words:\n if word in filtered_words:\n q_words.remove(word)", "def __exclude_wgid_from_citadel(wgid:int) -> None:\n citadelroleid = 636372439261249566\n discordserverid = CommonFramework.RetrieveConfigOptions('discord')\n discordserverid = discordserverid['serverid']\n result = CosmosFramework.QueryItems('SELECT * FROM c WHERE c.discordid=\"{0}\"'.format(wgid),'users')\n if bool(result):\n result = result[0]\n DiscordFramework.RemoveUserRole(citadelroleid,result['discordid'],discordserverid)\n DiscordFramework.send_discord_private_message('You have been removed from RDDT citadel due clan/rank changes',result['discordid'])\n del result['citadel']\n CosmosFramework.ReplaceItem(result['_self'],result)", "def stopwordsRem(tokens):\n no_sw = [t for t in tokens if not t in stopwords.words('english')]\n return no_sw", "def getNotMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() != 1):\n r.append(p)\n return r", "def _remove_stopwords(self, words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def keepAllowedWords(self, words, keepWords):\n\t\tkept = [word for word in words if word in keepWords]\t\t\n\t\treturn kept", "def nonplayer_deaths(self):\n return self.deaths.filter(or_(Death.mindkey == 'null', Death.mindkey == None))", "def filter_out_sentences(raw_korpus: str):\n\n return raw_korpus", "def without(self, *args):\n return self.reject(lambda x: x in args)", "def filter_bots(users):\n human_users = []\n for user in users:\n user_info = client.users_info(user=user)['user']\n if not user_info['is_bot']:\n human_users.append(user)\n return human_users", "def skip_train(walks, window_size, negative_size):\n\tP_m = frequency(walks)\n\tNum, Prob = negative_frequency(P_m)\n\ttargets = []\n\tcontexts = []\n\tsimilarity = []\n\tnegative_samples = []\n\tfor walk in walks:\n\t\tfor source_id, source in enumerate(walk):\n\t\t\treduced_window = np.random.randint(window_size)\n\t\t\tstart = max(0, source_id - window_size + reduced_window)\n\t\t\tfor target_id in range(start, source_id + window_size + 1 - reduced_window):\n\t\t\t\tif target_id != source_id:\n\t\t\t\t\ttry:\n\t\t\t\t\t\ttarget = walk[target_id]\n\t\t\t\t\t\ttargets.append(target)\n\t\t\t\t\t\tcontexts.append(source)\n\t\t\t\t\t\tnegative_samples.append(get_negative_sample(target, Num, Prob, negative_size))\n\t\t\t\t\t\tsimilarity.append(np.concatenate((np.ones(1), np.zeros(negative_size))))\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tpass\n\treturn map(np.array, (targets, contexts, similarity, negative_samples))", "def unknown(self, w):\n\n return w not in self._palabrasvistas", "def ignores(self):\n pass # make ignore_tags unaccessible", "def exclude(self, *args, **kwargs):\n self._not_support_combined_queries(\"exclude\")\n return self._filter_or_exclude(True, args, kwargs)", "def critere_keys(key):\n critere = (key not in [\"input_observation\", \"y_true\", \"y_action\", \"y\"])\n critere = critere & (key[-3:] != \"_ph\") & (key[-7:] != \"_assign\")\n\n return critere", "def filter_ignored(self, node):\n return node.getText() not in self.ignored_terms", "def add_excl_parts(db, usernames):\n desc = \"Replicating the effect \" + \\\n \"of priming with common vs rare ideas in individual \" + \\\n \"brainstorming with revised interface\"\n exp_id= 'tN33ATDiCukWfj5G7'\n # exps = db.experiments.find()\n exp = db.experiments.find_one({'_id': exp_id})\n\n db.experiments.update({'_id': exp_id},\n {'$set': {'excludeUsers': list(usernames), 'description': desc}})\n # exp['excludeUsers'] = list(usernames)\n exp = db.experiments.find_one({'_id': exp_id})\n print exp['excludeUsers']\n print exp['description']", "def exclude(requestContext, seriesList, pattern):\n regex = re.compile(pattern)\n return [s for s in seriesList if not regex.search(s.name)]", "def discard(self):\n for f in self.featureNames:\n self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']\n return", "def test_does_not_sample_negligible_weight_ppswor(self):\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n s.process(\n \"a\",\n math.log(\n FAILURE_PROBABILITY_INVERSE / (FAILURE_PROBABILITY_INVERSE - 1),\n math.e))\n self.assertEmpty(s.elements)", "def words_without_letter(l):\n\treturn {w for w in word_set if has_no_letter(w, l)}", "def dislikes(self):\n return self.get_queryset().filter(vote__lt=0)", "async def team_unignore(self, ctx: commands.Context):\n await self.config.user(ctx.author).do_not_message.set(False)\n await ctx.send('Okay, I\\'ll include you back in team-wide DMs.')" ]
[ "0.65722376", "0.5929489", "0.58905196", "0.5826653", "0.58197314", "0.5794661", "0.5784924", "0.5743494", "0.5724692", "0.56861764", "0.5636551", "0.56264764", "0.5577885", "0.5564499", "0.5544913", "0.5514717", "0.55069286", "0.55069286", "0.5471333", "0.5468246", "0.5453299", "0.54438335", "0.5379929", "0.5377862", "0.5377365", "0.5374249", "0.5356799", "0.53559345", "0.5313876", "0.5301113", "0.529544", "0.52917033", "0.5272584", "0.5260122", "0.52590704", "0.52561325", "0.5243909", "0.5242401", "0.5238545", "0.52379996", "0.52294314", "0.52292925", "0.52292925", "0.5227979", "0.52191895", "0.5217874", "0.5206354", "0.5196174", "0.5195192", "0.5172663", "0.5166846", "0.5161625", "0.5155", "0.51547796", "0.5147388", "0.5142853", "0.51348025", "0.51348025", "0.51348025", "0.51337755", "0.51292264", "0.512717", "0.5126101", "0.51199126", "0.5112176", "0.51092046", "0.50984716", "0.50957507", "0.5094347", "0.50842166", "0.508236", "0.5081425", "0.5080369", "0.5079904", "0.5077235", "0.5076055", "0.50749993", "0.5074777", "0.5071823", "0.5070241", "0.5070181", "0.5064378", "0.5058201", "0.50577986", "0.505486", "0.5054031", "0.5052261", "0.5052104", "0.50444955", "0.50439554", "0.50390345", "0.50376195", "0.5036801", "0.5031293", "0.50169265", "0.5011817", "0.50045276", "0.4998957", "0.49945778", "0.49913707", "0.4991263" ]
0.0
-1
loads the next N images from the binary mraw file into a numpy array.
def load_images(mraw, h, w, N, bit=16, roll_axis=True): if int(bit) == 16: images = np.memmap(mraw, dtype=np.uint16, mode='r', shape=(N, h, w)) elif int(bit) == 8: images = np.memmap(mraw, dtype=np.uint8, mode='r', shape=(N, h, w)) elif int(bit) == 12: warnings.warn("12bit images will be loaded into memory!") #images = _read_uint12_video(mraw, (N, h, w)) images = _read_uint12_video_prec(mraw, (N, h, w)) else: raise Exception(f"Unsupported bit depth: {bit}") #images=np.fromfile(mraw, dtype=np.uint16, count=h * w * N).reshape(N, h, w) # about a 1/3 slower than memmap when loading to RAM. Also memmap doesn't need to read to RAM but can read from disc when needed. if roll_axis: return np.rollaxis(images, 0, 3) else: return images
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X", "def le_binario_mgbq(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def __readImages(self, filename):\n print 'Reading images from %s ...' % filename\n images = []\n with open(filename, 'rb') as infile:\n infile.read(4) # ignore magic number\n count = struct.unpack('>i', infile.read(4))[0]\n rows = struct.unpack('>i', infile.read(4))[0]\n columns = struct.unpack('>i', infile.read(4))[0]\n\n for i in xrange(count):\n data = infile.read(rows*columns)\n image = np.fromstring(data, dtype=np.uint8)\n image = image.reshape((rows, columns))\n image = 255 - image # now black digit on white background\n images.append(image)\n return images", "def load_images(filename='training_images'): \n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read() # hope ya get it all\n\n # grab the first four numbers ...\n # fmt='>i' means big-endian int32\n magic, n_images, n_rows, n_cols = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(4))\n\n # i am a god-fearing man\n assert magic[0] == 2051, \"bad magic number, what do?\"\n\n\n # so i think you can use the standard libary's \"array\" for this, just\n # because binary data of any sort is kinda dodgy, but this grabs 'the rest'\n # format='B' means unsigned char === 'uint8', and apparently endianness doesn't matter\n image_stream = array.array('B', b[16:])\n\n # so each 28*28 byte portion of image_stream is a flattened image. these two\n # numpy.reshape calls get it into the desired shape for A. maybe could\n # combine it into one call, idk. anyway, each flattened image appears as a\n # row, and there is a row for each image.\n image_first = numpy.reshape(image_stream, (n_images[0], n_rows[0], n_cols[0]))\n images = image_first.reshape(n_images[0], n_rows[0]*n_cols[0])\n\n # convert to float in [0,1]\n images = images.astype('f') / 255\n\n return images", "def _extract_images(self, filename):\n log.info('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = self._read32(bytestream)\n rows = self._read32(bytestream)\n cols = self._read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def le_binario_mgbp(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgbp(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def extract_images(f):\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def read_vanhateren_images (n_imgs=5):\n folder_name = r'D:\\VanHateren\\vanhateren_imc' # change this to point to the directory which holds the van hateren data\n # files = listdir(folder_name)\n onlyfiles = [ f for f in listdir(folder_name) if isfile(join(folder_name,f)) ]\n imgs = []\n for i in range(n_imgs):\n filename = join(folder_name, onlyfiles[i])\n with open(filename, 'rb') as handle:\n s = handle.read()\n arr = array.array('H', s)\n arr.byteswap()\n img_i = np.array(arr, dtype='uint16').reshape(1024, 1536)\n imgs.append(img_i) \n return imgs\n #pylab.imshow(img)\n #pylab.show()", "def _load_batch_file(filename):\n # Load the pickled data-file.\n data = _unpickle(filename)\n # Get the raw images.\n raw_images = data[b'data']\n # Get the class-numbers for each image. Convert to numpy-array.\n cls = np.array(data[b'labels'])\n # Convert the images.\n images = _convert_images(raw_images)\n\n return images, cls", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def read(self, index):\n assert type(index) is int\n img = self.db.get_node('/images/img{:04d}'.format(index))\n return np.array(img)", "def load_nrrd(full_path_filename):\n data = sitk.ReadImage( full_path_filename )\n data = sitk.Cast( sitk.RescaleIntensity(data), sitk.sitkUInt8 )\n data = sitk.GetArrayFromImage(data)\n return(data)", "def hdr_to_Nifti(files):\r\n array = []\r\n for element in files:\r\n array = np.append(array, nib.load(element))\r\n\r\n print('array size: ', array.shape, '\\narray type: ', type(array))\r\n\r\n return array", "def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_images(f):\n\tprint('Extracting', f.name)\n\twith gzip.GzipFile(fileobj=f) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2051:\n\t\t\traise ValueError('Invalid magic number %d in MNIST image file: %s' %\n\t\t\t\t\t\t\t\t\t\t\t (magic, f.name))\n\t\tnum_images = _read32(bytestream)\n\t\trows = _read32(bytestream)\n\t\tcols = _read32(bytestream)\n\t\tbuf = bytestream.read(rows * cols * num_images)\n\t\tdata = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tdata = data.reshape(num_images, rows, cols, 1)\n\t\treturn data", "def load_rbc( fname, skiprows, nx, ny ):\n C = numpy.loadtxt( fname, skiprows=skiprows ) \n cell_frames = [ C[i].reshape(( nx,ny )) for i in range( 5000-skiprows ) ]\n return cell_frames", "def load_set(directName, n = np.inf):\n # Loaded a set of images\n\n files = os.listdir(directName)\n n = min(n, len(files))\n #n = len(files)\n print(\"Loading \" + str(n) + \" images\")\n imgs = [mpimg.imread(directName + files[i]) for i in range(n)]\n\n return imgs", "def read_batch(self):\n imgs = []\n labels = []\n idx = np.random.choice(self.nImgs,self.batch_size)\n \tfor i in idx:\n imgs.append(cv2.imread(self.data_files[i]))\n \t labels.append(cv2.imread(self.label_files[i]))\n \timgs,labels = np.array(imgs),np.array(labels)\n imgs = (imgs - self.mean)/self.stddev\n \tlabels = (labels - self.mean)/self.stddev\n return imgs,labels", "def read_image(images_root):\n im_array = np.load(images_root)\n return im_array", "def load_chunk(self, idx):\n for f in self.filenames[idx:]:\n ...", "def _load_multipage_tiff(path):\n return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])", "def extract_images(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)[0]\n rows = _read32(bytestream)[0]\n cols = _read32(bytestream)[0]\n #print('check', magic, num_images, rows, cols, rows * cols * num_images)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)\n return data", "def unpack_mraw_frame_10bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*10/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*10/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::5]\n bytes_2 = int_array[1::5] \n bytes_3 = int_array[2::5]\n bytes_4 = int_array[3::5] \n bytes_5 = int_array[4::5]\n\n \n # Here 4 pixels from the image are shared between 5 bytes of data like\n #\n # | byte 1 | byte 2 | byte 3 | byte 4 | byte 5 |\n # |o o o o o o o o | o o | o o o o o o | o o o o | o o o o | o o o o o o | o o | o o o o o o o o|\n # | Pixel 1 | Pixel 2 | Pixel 3 | Pixel 4 |\n #\n # byte 2 is shared between pixel and we need only the right-most bits for pixel 2 and\n # only the left most bits for pixel 1. \n \n # right-most bits of byte 2 = Most significant bits of Pixel 2\n # left-most bits of byte 2 = Least significant bits of Pixel 1\n \n pix_1 = np.array(4.0*bytes_1 + np.right_shift(bytes_2,6),dtype=np.uint16)\n pix_2 = np.array(16.0*np.bitwise_and(bytes_2,0b111111) + np.right_shift(bytes_3,4),dtype=np.uint16)\n pix_3 = np.array(64.0*np.bitwise_and(bytes_3,0b1111) + np.right_shift(bytes_4,2),dtype=np.uint16)\n pix_4 = np.array(256.0*np.bitwise_and(bytes_4,0b11) + bytes_5,dtype=np.uint16)\n #try:\n image = (np.dstack([pix_1,pix_2,pix_3,pix_4])).reshape((1,n_pixels))[0]\n #except:\n # image = np.zeros(n_pixels)\n return image", "def load_nifty_volume_as_array(filename):\n img = sitk.ReadImage(filename)\n img_arr = sitk.GetArrayFromImage(img)\n return img_arr", "def test_imibread(self):\n gen = imibread(TEST_MIB)\n arr = next(gen)\n self.assertEqual(arr.shape, (256, 256))\n self.assertEqual(arr.dtype, np.dtype(\">u2\"))", "def _images(path):\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def load_food_image_batch(filename, num):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f)\n url_parts = datadict['Image URL'].split(\"/\")\n img_fn = url_parts[-1]\n with open(img_fn):\n X = f.read()\n Y = datadict['coarse_labels']\n X = X.reshape(num, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y", "def extract_data(filename, num_images, IMAGE_WIDTH):\n\n# this function definition has been taken from internet\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_WIDTH * IMAGE_WIDTH * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32) #Interpret a buffer as a 1-dimensional array\n data = data.reshape(num_images, IMAGE_WIDTH*IMAGE_WIDTH)\n return data", "def extract_images(filename):\n\tprint('Extracting', filename)\n\twith gzip.open(filename) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2051:\n\t\t\traise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, filename))\n\t\tnum_images = _read32(bytestream)\n\t\trows = _read32(bytestream)\n\t\tcols = _read32(bytestream)\n\t\tbuf = bytestream.read(rows * cols * num_images)\n\t\tdata = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tdata = data.reshape(num_images, rows, cols, 1)\n\t\treturn data", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def extract_images(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data", "def load_chunks(self):\n for key, array in self.chunks.items():\n loaded_array = np.asarray(array)\n self.chunks[key] = loaded_array", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def _parse_raw_labels(self, lines):\r\n images = []\r\n labels = []\r\n idx = 0\r\n while idx < len(lines):\r\n image_path = lines[idx].strip()\r\n images.append(self._real_image_path(image_path))\r\n idx += 1\r\n\r\n num = int(lines[idx])\r\n idx += 1\r\n\r\n labels_ = []\r\n for _ in range(num):\r\n x1, y1, w, h, blur, expression, illumination, invalid, \\\r\n occlusion, pose = [int(v) \r\n for v in lines[idx].strip().split()]\r\n x2, y2 = x1 + w - 1, y1 + h - 1 # -1 to get the read x2, y2\r\n\r\n labels_.append([x1, y1, x2, y2])\r\n idx += 1\r\n \r\n labels.append(np.array(labels_))\r\n\r\n self._data_map[self._real_image_path(image_path)] = np.array(labels_)\r\n return np.array(images), np.array(labels)", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n# data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE * IMAGE_SIZE)\n return data", "def read_picture_data(filename):\n file_name = os.path.join('.', 'datas', filename)\n\n try:\n with open(file_name, 'rb') as file:\n read_data = file.read()\n except FileNotFoundError:\n print(f'Oups, the file {filename} was not found')\n\n try:\n if filename == 'train-images.idx3-ubyte':\n number_of_pics = 60000\n else:\n number_of_pics = 10000\n except LookupError:\n print(f'Oups, the file {filename} was not named as a MNist file')\n\n picture_data = np.zeros((number_of_pics, 28 * 28)) # 28*28 = 784\n\n s = 0\n for n in range(16, number_of_pics * 784, 784): # 16 header bytes being dumped\n for t, byte in enumerate(read_data[n: n + 784]):\n picture_data[s, t] = byte\n s += 1\n\n print(f'\\nPicture data read from {filename}\\n')\n\n return picture_data", "def read_batch(batch_size ,file_dir):\n batch_images = []\n batch_labels = []\n temp,size= get_files(file_dir)\n\n image_list = list(temp[:, 0])\n label_list = list(temp[:, 1])\n Size = size-1\n for i in range(batch_size):\n # random class choice\n # (randomly choose a folder of image of the same class from a list of previously sorted wnids)\n # class of the im\n class_index = random.randint(0, Size)\n batch_images.append(read_image(image_list[class_index]))\n batch_labels.append(onehot(int(label_list[class_index])))\n np.vstack(batch_images)\n np.vstack(batch_labels)\n return batch_images, batch_labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data", "def _load(self) -> np.ndarray:\n with self._fs.open(self._filepath, mode=\"r\") as f:\n image = Image.open(f).convert(\"RGBA\")\n return np.asarray(image)", "def load(self, i: int) -> np.ndarray:\n raise NotImplementedError(\"Do not call load from BaseLoader\")", "def _load( self, i ):\n if ir.config.verbosity_level >= 2: print(\"[observation] Lazy loading raster\")\n self._raster_data[i] = raster_cube( self._raster_files, line=self._line_info['description'][i], keep_null=self._keep_null )", "def loadRaw(self, path, preprocfunc=None):\n # Only for 8 and 32 bit images\n depth = self.getDepth()\n if depth==1:\n mamba.raiseExceptionOnError(mambaCore.ERR_BAD_DEPTH)\n \n # Loading the file\n f = file(path, 'rb')\n data = f.read()\n f.close()\n \n # Preprocessing the data if a function was given\n if preprocfunc:\n data = preprocfunc(data)\n \n # Verification over data size\n (w,h) = self.getSize()\n im_size = w*h*(depth/8)\n assert(len(data)==im_size*self.length)\n \n # Loading the data\n for i,im in enumerate(self.seq):\n err = mambaCore.MB_Load(im.mbIm, data[i*im_size:(i+1)*im_size], im_size)\n mamba.raiseExceptionOnError(err)\n self.name = path", "def load_batch(n):\r\n print ('Loadng one batch...')\r\n batchfilename = flist[n - 1] + '.pkl'\r\n if not os.path.exists(batchfilename):\r\n set_batch_data()\r\n with open(batchfilename, 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def _convert_images(raw):\n # Convert the raw images from the data-files to floating-points.\n #raw_float = np.array(raw, dtype=float) / 255.0\n\n # Reshape the array to 4-dimensions.\n images = raw.reshape([-1, num_channels, img_size, img_size])\n\n # Reorder the indices of the array.\n images = images.transpose([0, 2, 3, 1])\n\n return images", "def load_fmnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_chunk(self, start): # TODO parallelize this whole process\n self.X = queue.Queue()\n n = 0 # number of loaded batches\n print('stop loading : %s' % self.stop_loading())\n print('start + n : %s' % str(start + n))\n while (not self.stop_loading()) and (start + n) < self.size:\n print('load')\n self.X.put(np.load(self.data_filenames[start+n]))\n n += 1\n print('return chunk')\n return n", "def load_data(N,df):\n # allocate a numpy array for the images (N, 96x96px, 3 channels, values 0 - 255)\n X = np.zeros([N,96,96,3],dtype=np.uint8) \n #convert the labels to a numpy array too\n y = np.squeeze(df.as_matrix(columns=['label']))[0:N]\n #read images one by one, tdqm notebook displays a progress bar\n for i, row in tqdm_notebook(df.iterrows(), total=N):\n if i == N:\n break\n X[i] = cv2.imread(row['path'])\n \n return X,y", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n # datadict = p.load(f)\n datadict = pickle.load(f, encoding = 'bytes')\n X = datadict[b'data']\n Y = datadict[b'labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def image_decoder(rawbytes):\n img = Image.open(BytesIO(rawbytes))\n array = np.asarray(img, dtype=np.uint8)\n return array", "def get_rawimage(self, raw_file, det):\n # Check for file; allow for extra .gz, etc. suffix\n fil = glob.glob(raw_file + '*')\n if len(fil) != 1:\n msgs.error(\"Found {:d} files matching {:s}\".format(len(fil)))\n\n # Read\n msgs.info(\"Reading MMIRS file: {:s}\".format(fil[0]))\n hdu = fits.open(fil[0])\n head1 = fits.getheader(fil[0],1)\n\n detector_par = self.get_detector_par(hdu, det if det is None else 1)\n\n # get the x and y binning factors...\n binning = head1['CCDSUM']\n xbin, ybin = [int(ibin) for ibin in binning.split(' ')]\n\n # First read over the header info to determine the size of the output array...\n datasec = head1['DATASEC']\n x1, x2, y1, y2 = np.array(parse.load_sections(datasec, fmt_iraf=False)).flatten()\n\n # ToDo: I am currently using the standard double correlated frame, that is a difference between\n # the first and final read-outs. In the future need to explore up-the-ramp fitting.\n if len(hdu)>2:\n data = mmirs_read_amp(hdu[1].data.astype('float64')) - mmirs_read_amp(hdu[2].data.astype('float64'))\n else:\n data = mmirs_read_amp(hdu[1].data.astype('float64'))\n array = data[x1-1:x2,y1-1:y2]\n\n ## ToDo: This is a hack. Need to solve this issue. I cut at 998 due to the HK zero order contaminating\n ## the blue part of the zJ+HK spectrum. For other setup, you do not need to cut the detector.\n if (head1['FILTER']=='zJ') and (head1['DISPERSE']=='HK'):\n array = array[:int(998/ybin),:]\n rawdatasec_img = np.ones_like(array,dtype='int')\n oscansec_img = np.ones_like(array,dtype='int')\n\n # Need the exposure time\n exptime = hdu[self.meta['exptime']['ext']].header[self.meta['exptime']['card']]\n # Return, transposing array back to orient the overscan properly\n return detector_par, np.flipud(array), hdu, exptime, np.flipud(rawdatasec_img),\\\n np.flipud(np.flipud(oscansec_img))", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n datadict = p.load(f, encoding='iso-8859-1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = load_pickle(f)\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000,3072)\n Y = np.array(Y)\n return X, Y", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = load_pickle(f)\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000,3072)\r\n Y = np.array(Y)\r\n return X, Y", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb')as f:\r\n datadict = p.load(f)\r\n \r\n X = datadict['data']\r\n Y = datadict['labels']\r\n \r\n print X.shape\r\n X = X.reshape(X.shape[0], SHAPE[0], SHAPE[1], SHAPE[2])\r\n Y = np.array(Y)\r\n return X, Y", "def read_npy_chunk(filename, start_row, num_rows):\n assert start_row >= 0 and num_rows > 0\n with open(filename, 'rb') as fhandle:\n major, minor = np.lib.format.read_magic(fhandle)\n shape, fortran, dtype = np.lib.format.read_array_header_1_0(fhandle)\n assert not fortran, \"Fortran order arrays not supported\"\n # Make sure the offsets aren't invalid.\n assert start_row < shape[0], (\n 'start_row is beyond end of file'\n )\n assert start_row + num_rows <= shape[0], (\n 'start_row + num_rows > shape[0]'\n )\n # Get the number of elements in one 'row' by taking\n # a product over all other dimensions.\n row_size = np.prod(shape[1:])\n start_byte = start_row * row_size * dtype.itemsize\n fhandle.seek(start_byte, 1)\n n_items = row_size * num_rows\n flat = np.fromfile(fhandle, count=n_items, dtype=dtype)\n return flat.reshape((-1,) + shape[1:])", "def loadIRframe(f):\n import numpy as np\n from StringIO import StringIO\n frame = np.zeros((240,320),dtype=float)\n for i in range(240):\n s=StringIO(f.readline())\n if s:\n frame[i]=np.genfromtxt(s,delimiter=';')\n else:\n print('Did not load the line.\\n')\n return 0\n return frame#[35:195,80:260] ", "def imread(filename):\n return np.asarray(Image.open(filename), dtype=np.uint8)[..., :3]", "def ImportDataMRI(picType=-1, batchNumber=1):\n data = np.load('mri_scans_shaped.npy', encoding='bytes')\n picsAll = np.asarray(data[0:374:1,:])\n test_pic = np.asarray(data[374:400:1,:])\n# picsAll = np.asarray(data[b'data'])\n# if picType < 0 or picType > 9:\n# picIndex = np.arange(picsAll.shape[0])\n# else:\n# picIndex = np.where(np.asarray(data[b'labels'])==picType)[0]\n #pics = picsAll[picIndex,:]\n# lenpic = len(pics[0,:])//3\n# return np.floor(0.299*pics[:, :lenpic]+0.587*pics[:, lenpic:2*lenpic]+0.114*pics[:, 2*lenpic:3*lenpic])\n return picsAll,test_pic", "def load_data(self, from_idx):\n length = len(self.filenames)\n # we assume all images have the same dimensions\n shape = cv2.imread(filenames[0], int(self.color)).shape\n if not self.color:\n shape += (1,) # add additionnal channel for black and white\n X = []\n for f in tqdm(self.filenames[:5000]):\n if psutil.virtual_memory()[2] >= 60.0:\n break # preserve memory\n img = cv2.imread(f, int(self.color))\n if img is not None:\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n # change range of image to [-1, 1]\n # TODO : different procedure for colored images\n if not self.color:\n img = img.astype('float32')\n mx = np.max(img)\n mn = np.min(img)\n m = mx/2 + mn/2\n r = mx/2 - mn/2\n else:\n mx = np.amax(np.amax(img, axis=0), axis=0)\n mn = np.amin(np.amin(img, axis=0), axis=0)\n m = mx/2 + mn/2\n r = mx/2 - mn/2\n if np.all(r):\n img = (img - m)/r # works in both cases\n # add to dataset\n X.append(img)\n self.X = np.array(X)", "def extract_data(filename, num_images):\n filepath = os.path.join(WORK_DIRECTORY, filename)\n print('Extracting', filepath)\n with open(filepath, mode='rb') as bytestream:\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'%s-labels-idx1-ubyte.gz'% kind)\n\n images_path = os.path.join(path,'%s-images-idx3-ubyte.gz'% kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,offset=16).reshape(len(labels), 784)\n\n print(\"Dataset Loaded\")\n \n return images, labels", "def loadImages(loadPath):\n img_array = []\n for filename in glob.glob(loadPath):\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n img_array.append(img)\n\n return img_array", "def extract_images(filename,lx):\n print('Extracting', filename,'aaaaaa')\n \n data=numpy.loadtxt(filename,dtype='int64')\n dim=data.shape[0]\n data=data.reshape(dim, lx, lx, 1) \n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n data = data.reshape(data.shape[0],\n data.shape[1] * data.shape[2])\n # Convert from [0, 255] -> [0.0, 1.0].\n data = data.astype(numpy.float64)\n # images = numpy.multiply(images, 1.0 / 255.0) # commented since it is ising variables\n data = numpy.multiply(data, 1.0 ) # multiply by one, instead\n print(data.shape)\n return data", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\tprint(data_chunk.shape)\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def extract_labels(filename, num_images):\n filepath = os.path.join(WORK_DIRECTORY, filename)\n print('Extracting', filepath)\n with open(filepath, mode='rb') as bytestream:\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = pickle.load(f, encoding='latin1')\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\r\n Y = np.array(Y)\r\n return X, Y", "def load_images(filenames):\n h,w,c = SIZE\n images = np.empty((len(filenames),h,w,c))\n for i,img_path in enumerate(filenames):\n # Base64\n with open(img_path,'rb') as image_file:\n img_base64_encode = base64.b64encode(image_file.read())\n img_base64 = base64.b64decode(img_base64_encode)\n images[i] = np.array(Image.open(io.BytesIO(img_base64)))/255.0 # Reducción en distancia\n return images", "def load_full_im(self, im_name):\n # return np.genfromtxt(im_name, delimiter=self.delim)#[:,1:] # first column gives column number\n try: \n return np.loadtxt(im_name, delimiter=self.delim,\n usecols=range(1,self.pic_width+1))\n except IndexError as e:\n error('Image analysis failed to load image '+im_name+'\\n'+str(e))\n return np.zeros((self.pic_width, self.pic_height))", "def read_in():\n list_images_out = []\n index = 0\n current_image_points = []\n with open(\"./darknet/output.txt\") as results:\n lines = results.readlines()\n for line in lines:\n try:\n i = int(line[-(4+6+1):][:6]) # ex of file 000000.jpg 6 + 4 characters\n if i != index:\n print(\"PROBLEM\")\n print(i, index)\n exit()\n \n list_images_out.append(parse_current_images(current_image_points, index))\n # Reset the current image\n current_image_points = []\n index += 1\n except ValueError:\n # Real line\n left, top, right, bottom = line.split(\" \")\n left, right, top, bottom = int(left), int(right), int(top), int(bottom)\n current_image_points.append([0, left, right, bottom, top])\n\n list_images_out.append(parse_current_images(current_image_points, index))\n\n # We remove the first image due to line 36 test\n list_images_out.pop(0)\n print(len(list_images_out))\n return list_images_out", "def read_mhd_and_raw(path, numpyFlag=True):\n img = sitk.ReadImage(path)\n if not numpyFlag:\n return img\n\n nda = sitk.GetArrayFromImage(img) # (img(x,y,z)->numpyArray(z,y,x))\n return nda", "def batch_to_ndarray(file):\n file_batch = os.listdir(file)\n file_batch.sort(key=lambda x: int(re.findall('\\d+', x)[0]))\n posterior_sample = np.load(file + \"\\\\\" + file_batch[0])\n for batch in file_batch[1:]:\n #print(batch)\n new_batch = np.load(file + \"\\\\\" + batch)\n if new_batch.ndim > 1:\n posterior_sample = np.concatenate((posterior_sample, new_batch),\n axis = posterior_sample.ndim - 1)\n posterior_sample = pd.DataFrame(posterior_sample).dropna(axis=1)\n return posterior_sample", "def load_dataset_into_numpy_array(img_path, mode=\"int32\"):\n files = os.listdir(img_path)\n result = np.asarray([])\n for file in files:\n result = np.concatenate(result, load_image_into_numpy_array(img_path + \"/\" + file, mode).reshape((-1, 1)))\n return result", "def read_gz(images,labels):\n\t# Open the images with gzip in read binary mode\n\t# images = gzip.open('../MNIST-data/train-images-idx3-ubyte.gz', 'rb')\n\t# labels = gzip.open('../MNIST-data/train-labels-idx1-ubyte.gz', 'rb')\n\n\t# Read the binary data\n\n\t# We have to get big endian unsigned int. So we need '>I'\n\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]#28\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]#28\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0] #60000\n\t# print(number_of_images);\n\n\tif number_of_images != N:\n\t raise Exception('number of labels did not match the number of images')\n\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array #60000X28X28\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t if i % 1000 == 0:\n\t print(\"i: %i\" % i)\n\t for row in range(rows):\n\t for col in range(cols):\n\t tmp_pixel = images.read(1) # Just a single byte\n\t tmp_pixel = unpack('>B', tmp_pixel)[0]\n\t x[i][row][col] = tmp_pixel\n\t tmp_label = labels.read(1)\n\t y[i] = unpack('>B', tmp_label)[0]\n\t # print(y.shape)#60000X1\n\treturn (x, y)", "def read_image(self, ifd):\n ifd.img_data = np.array([], dtype='uint8')\n strips = ifd.get_strips() # [(strip_offset, strip_byte_count)]\n for strip in strips:\n ifd.img_data = np.append(ifd.img_data, self.tif_file.read(size=strip[1], location=strip[0]))", "def load_mnist(kind='train'):\r\n with open('%s-labels.idx1-ubyte' % kind, 'rb') as lbpath:\r\n magic, n = struct.unpack('>II', lbpath.read(8))\r\n labels = np.fromfile(lbpath, dtype=np.uint8)\r\n\r\n with open('%s-images.idx3-ubyte' % kind, 'rb') as imgpath:\r\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\r\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\r\n\r\n return images, labels", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f, encoding='latin1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(\"float64\")\n Y = np.array(Y)\n return X, Y", "def process_image(self, image_path):\n\n img = load_img(image_path, target_size=IMAGE_SIZE)\n img_array = img_to_array(img)\n # Create a batch by increase dimensions\n img_array = expand_dims(img_array, 0)\n print(img_array.shape)\n return img_array", "def load_images(self, image_path):\n X_train = []\n\n # Load all files from the image path using Image.open.\n for i in recursive_list(image_path):\n # Open images as ???\n img = Image.open(i)\n # Convert to NP array.\n img = np.asarray(img)\n # Append them into higher order array.\n if img.shape == (128, 128, 3):\n X_train.append(img)\n\n # return all the images concatenated as a 4D array\n return np.asarray(X_train)", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def read_raw(rawfile, shape, dtype=np.uint16, kind='middleton'):\n\n # -- alert\n print(\"READ_RAW: reading {0}...\".format(rawfile))\n\n\n # -- read file\n if kind=='middleton':\n return np.fromfile(open(rawfile),dtype) \\\n .reshape(shape[2],shape[0],shape[1])[:,:,::-1] \\\n .transpose(1,2,0) \\\n .astype(float)" ]
[ "0.6541903", "0.63630474", "0.6321544", "0.63167053", "0.6276605", "0.6192698", "0.6192698", "0.6186343", "0.61686295", "0.61686295", "0.61686295", "0.61686295", "0.61686295", "0.61686295", "0.61289364", "0.61266387", "0.61094004", "0.6033004", "0.60216844", "0.59940916", "0.5986096", "0.5975577", "0.5949083", "0.5943144", "0.59400517", "0.5939802", "0.5937374", "0.59363717", "0.5912498", "0.59087104", "0.58986497", "0.5861063", "0.58597296", "0.58529305", "0.5850779", "0.5835343", "0.5828976", "0.5819258", "0.5815122", "0.58127975", "0.5806744", "0.5794674", "0.5789154", "0.57888186", "0.5781721", "0.57664555", "0.57543045", "0.5743541", "0.57403535", "0.57345235", "0.57276636", "0.5717725", "0.5717635", "0.5716327", "0.57129353", "0.57043624", "0.5702856", "0.5699917", "0.56978524", "0.56974643", "0.56970596", "0.56836236", "0.5682257", "0.5680149", "0.5671515", "0.56712705", "0.56681186", "0.5662532", "0.5658413", "0.56546867", "0.56533265", "0.56501997", "0.5648958", "0.56457216", "0.5640165", "0.56382024", "0.5635738", "0.562736", "0.5623861", "0.5615451", "0.561456", "0.5611476", "0.56088704", "0.5598444", "0.55887127", "0.5584482", "0.5579976", "0.5567715", "0.5559041", "0.5556145", "0.55439466", "0.55335575", "0.55258036", "0.55143505", "0.55013484", "0.54994065", "0.5494839", "0.54935634", "0.54935634", "0.548954" ]
0.68674904
0
Loads and returns images and a cih info dict.
def load_video(cih_file): cih = get_cih(cih_file) mraw_file = path.splitext(cih_file)[0] + '.mraw' N = cih['Total Frame'] h = cih['Image Height'] w = cih['Image Width'] bit = cih['Color Bit'] images = load_images(mraw_file, h, w, N, bit, roll_axis=False) return images, cih
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_metadata(self):\n\n cub_dir = self.root / \"CUB_200_2011\"\n images_list: Dict[int, List] = OrderedDict()\n\n with open(str(cub_dir / \"train_test_split.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n is_train_instance = int(row[1]) == 1\n if is_train_instance == self.train:\n images_list[img_id] = []\n\n with open(str(cub_dir / \"images.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n images_list[img_id].append(row[1])\n\n with open(str(cub_dir / \"image_class_labels.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n # CUB starts counting classes from 1 ...\n images_list[img_id].append(int(row[1]) - 1)\n\n with open(str(cub_dir / \"bounding_boxes.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n box_cub = [int(float(x)) for x in row[1:]]\n box_avl = [box_cub[1], box_cub[0], box_cub[3], box_cub[2]]\n # PathsDataset accepts (top, left, height, width)\n images_list[img_id].append(box_avl)\n\n images_tuples = []\n for _, img_tuple in images_list.items():\n images_tuples.append(tuple(img_tuple))\n self._images = images_tuples # type: ignore\n\n # Integrity check\n for row_check in self._images:\n filepath = self.root / CUB200.images_folder / row_check[0]\n if not filepath.is_file():\n if self.verbose:\n print(\"[CUB200] Error checking integrity of:\", filepath)\n return False\n\n return True", "def _GetImageInfo(self,path):\n hd = Header(path, scan=True)\n hdr = hd.hdr\n self.hdr = hdr\n if hdr is None:\n# Either a ref.dat file or it isn't an imaging file.\n if 'ref' in path and 'dat' in path:\n self.refdats[os.path.realpath(path)] = True\n info = {'type':'refdat'}\n return info\n else:\n return None\n elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'):\n# Write a yaml file to the raw data directory if possible.\n dirname, outfile = self._yaml_filename(path)\n yaml_name = '%s/%s' % (dirname, outfile)\n if not os.path.exists(yaml_name):\n# Create yaml file using dirname,\n# e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml\n try:\n hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile))\n except IOError:\n# This is a nonessential function, so ignore exceptions\n# such as access violations.\n pass\n elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile':\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n shdr = hdr['subhdr']\n nhdr = hdr['native_header']\n self.shdr = shdr\n if 'dti' in shdr.get('PulseSequenceName','').lower() \\\n or 'dti' in nhdr.get('PulseSequenceFile',''):\n psdname = 'dti'\n else:\n psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower())\n info = {'psdname':psdname, \\\n 'acqtime':shdr['AcqTime'], \\\n 'series':int(shdr['SeriesNumber']), \\\n 'plane':hdr['plane'].strip(), \\\n 'type':self.imgtype.get(psdname,None), \\\n 'plane':hdr['plane'], \\\n 'acqtime':shdr['SeriesTime'], \\\n# 'fmapdir':None, \\\n 'refdat':None, \\\n 'imgfile':None, \\\n 'base':None, \\\n 'tdim':int(hdr['tdim']), \\\n 'echo_spacing':None, \\\n 'filetype':'brik', \\\n 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \\\n 'data_filetype':hdr['filetype']}\n if info['type'] == 'localizer':\n# Don't process the localizer.\n return info\n if isinstance(info['acqtime'], int):\n info['acquisition_time'] = time.ctime(info['acqtime'])\n if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi':\n# Sometimes screenshots are defined as epis.\n info['type'] = None\n\n# Call the method appropriate to the type of scan in this series.\n stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \\\n [info, path])\n if stat:\n info = {'type':'break'}\n return info\n info['suffix'] = self.suffix.get(info['filetype'], 'brik')\n return info", "def _load_components(self):\n compsf = self._fetch_components_file()\n comps_img = niimg.load_img(compsf)\n return comps_img", "def images(self) -> dict:\n raise NotImplementedError", "def loadImagesAvatar(self): \n dictionary = {}\n dictionary[\"body\"] = None\n dictionary[\"shoes\"] = None\n dictionary[\"shirt\"] = None\n dictionary[\"trousers\"] = None\n dictionary[\"skirt\"] = None\n dictionary[\"head\"] = None\n dictionary[\"hair\"] = None\n dictionary[\"mask\"] = None\n return dictionary", "def load_data():\n # Dictionary mapping image names to labels\n image_name_to_label = dict()\n\n # Store labels associated with image names\n notifier.send(\" Reading metadata...\")\n with open(\"data/metadata.csv\") as file: # Original dataset\n # Use images for normal, virus (unknown type), COVID-19, SARS\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n if row[\"Label\"].lower() == \"normal\":\n label = 2\n elif row[\"Label_2_Virus_category\"].lower() == \"covid-19\":\n label = 0\n elif row[\"Label_1_Virus_category\"].lower() == \"virus\":\n label = 1\n else:\n continue\n image_name_to_label[row[\"X_ray_image_name\"]] = label\n with open(\"data/metadata2.csv\") as file: # GitHub dataset\n # Use COVID-19, SARS\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n if row[\"filename\"] in image_name_to_label: # Image already added\n continue\n if \"covid-19\" in row[\"finding\"].lower():\n label = 0\n elif row[\"finding\"].lower() == \"sars\":\n label = 1\n else:\n continue\n image_name_to_label[row[\"filename\"]] = label\n with open(\"data/metadata_COVID-19.csv\") as file: # Additional COVID-19 images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"COVID-19/\" + row[\"FILE NAME\"] + \".\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 0\n with open(\"data/metadata_ViralPneumonia.csv\") as file: # Additional virus images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"ViralPneumonia/\" + row[\"FILE NAME\"].replace(\"-\", \"(\") + \").\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 1\n with open(\"data/metadata_Normal.csv\") as file: # Additional normal images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"Normal/\" + row[\"FILE NAME\"].replace(\"-\", \"(\") + \").\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 2\n\n notifier.send(\" Loading images...\")\n images, labels = load_images(image_name_to_label)\n\n notifier.send(\" Splitting data...\")\n return split_data(images, labels)", "def get_data(self):\n data_str = get_cls_img(root=self.root, suffix=self.suffix)\n\n if not self.load_images:\n return data_str\n\n cls_img_data = dict.fromkeys(data_str.keys())\n for cls_ in data_str:\n temp = [0] * len(data_str[cls_])\n for i, img_name in enumerate(data_str[cls_]):\n img = _load_image(\n img_url=os.path.join(self.root, cls_, img_name),\n expand_dim=self.expand_dim\n )\n temp[i] = img\n cls_img_data[cls_] = list(temp)\n\n return cls_img_data", "def getimgs():", "def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data", "def load_test_images(images):\n loaded = {}\n for description, _ in images.items():\n loaded[description] = load_from_netcdf(description)\n return loaded", "def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images", "def load_image(self, image_id):\n \n # load image infos\n \n info = self.image_info[image_id]\n patch_path = info['path']\n width = info['width']\n height = info['height']\n impath = os.path.join(patch_path,\"images\")\n file_list = os.listdir(impath) \n channels = info['channels']\n \n image = []\n \n # stack channels to be loaded.\n \n for channel in channels:\n \n if channel == \"none\":\n channel_image = skimage.img_as_ubyte(np.zeros( (height,width) ) )\n \n else:\n channel_image_name = [x for x in file_list if channel in x][0] \n channel_image_path = os.path.join(impath, channel_image_name)\n channel_image = skimage.io.imread(channel_image_path)\n channel_image = skimage.img_as_ubyte(channel_image)\n image.append(channel_image)\n \n image = np.stack(image, axis=2)\n \n return image", "def get_images_and_labels(tampered_path, authentic_path):\n tampered_dir = tampered_path\n authentic_dir = authentic_path\n images = {}\n for im in glob.glob(authentic_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 0\n for im in glob.glob(tampered_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 1\n return images", "def loadImages(self):\n for map_name, img in self.maps.items():\n if img is None or map_name not in __class__.input_tr:\n continue\n getCyclesImage(img)", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def global_metadata(paths):\n\n # Weakly group images to partition image set size- crucial optimization step\n if os.path.exists(paths.image_preprocess):\n clumped_paths = json.loads(open(paths.image_preprocess).read())\n else:\n clumped_paths = network.alpha_categorize(paths)\n print(\"Hashed source images\")\n\n with open(paths.image_preprocess, 'w') as json_file:\n json.dump(clumped_paths, json_file)\n\n # Combinatorial image grouping to graph\n image_graph = network.load_graph(paths.image_network_path)\n\n total = len(list(chain(*clumped_paths.values())))\n counter = 0.\n\n for image_paths in clumped_paths.values():\n counter += len(image_paths)\n print(str(int(counter / float(total) * 100)) + \"% complete\")\n\n if len(image_paths) > 1:\n image_grouping = images.load_paths(paths.default_patches, image_paths)\n image_graph = metadata.network.network_images(\n image_grouping, threshold=0, network=image_graph)\n else:\n image_graph.add_node(image_paths[0])\n\n metadata.network.save_graph(paths.image_network_path, image_graph)\n print(\"Updated image graph.\")\n\n # Create informational json files for templates and files\n templates.build(paths, image_graph)\n mappings.build(paths, image_graph)\n print(\"Created JSON metadata files.\")", "def _get_image_info(\n image_id: int,\n width: int,\n height: int,\n file_name: str,\n license_id=1,\n flickr_url=\"\",\n coco_url=\"\",\n date_captured=datetime.datetime.utcnow().isoformat(' ')):\n image_info = {\n \"id\": image_id,\n \"width\": width,\n \"height\": height,\n \"file_name\": file_name,\n \"license\": license_id,\n \"flickr_url\": flickr_url,\n \"coco_url\": coco_url,\n \"date_captured\": date_captured,\n }\n\n return image_info", "def load_images():\n\n def load_image(img_file_name):\n \"\"\"Return the loaded pygame image with the specified file name.\n\n This function looks for images in the game's images folder\n (./images/). All images are converted before being returned to\n speed up blitting.\n\n Arguments:\n img_file_name: The file name (including its extension, e.g.\n '.png') of the required image, without a file path.\n \"\"\"\n file_name = os.path.join('.', 'images', img_file_name)\n img = pygame.image.load(file_name)\n img.convert()\n return img\n\n return {'background': load_image('background.png'),\n 'pipe-end': load_image('pipe_end.png'),\n 'pipe-body': load_image('pipe_body.png'),\n # images for animating the flapping bird -- animated GIFs are\n # not supported in pygame\n 'bird-wingup': load_image('bird_wing_up.png'),\n 'bird-wingdown': load_image('bird_wing_down.png')}", "def _get_imagenet_as_dict(self):\n real_file_path = os.path.realpath(self.map_file)\n if not os.path.exists(real_file_path):\n raise IOError(\"map file {} not exists\".format(self.map_file))\n\n label_dict = {}\n with open(real_file_path) as fp:\n line = fp.readline()\n while line:\n labels = line.split(\" \")\n label_dict[labels[1]] = labels[0]\n line = fp.readline()\n\n # get all the dir which are n02087046, n02094114, n02109525\n dir_paths = {}\n for item in label_dict:\n real_path = os.path.join(self.image_dir, label_dict[item])\n if not os.path.isdir(real_path):\n logger.warning(\"{} dir is not exist\".format(real_path))\n continue\n dir_paths[item] = real_path\n\n if not dir_paths:\n raise PathNotExistsError(\"not valid image dir in {}\".format(self.image_dir))\n\n # get the filename, label and image binary as a dict\n for label in dir_paths:\n for item in os.listdir(dir_paths[label]):\n file_name = os.path.join(dir_paths[label], item)\n if not item.endswith(\"JPEG\") and not item.endswith(\"jpg\"):\n logger.warning(\"{} file is not suffix with JPEG/jpg, skip it.\".format(file_name))\n continue\n data = {}\n data[\"file_name\"] = str(file_name)\n data[\"label\"] = int(label)\n\n # get the image data\n real_file_path = os.path.realpath(file_name)\n image_file = open(real_file_path, \"rb\")\n image_bytes = image_file.read()\n image_file.close()\n if not image_bytes:\n logger.warning(\"The image file: {} is invalid.\".format(file_name))\n continue\n data[\"image\"] = image_bytes\n yield data", "def Build( self, image_paths, output_path, idType ):\n global log_mess\n \n ico_data, partial_log = [ '' for _ in range(2) ]\n img_data = b''\n ## Define header of ICO file.\n num_images = len(image_paths)\n ico_data = self.HeaderIcondir( num_images, idType )\n\n ## Size of all the headers (image headers + file header)\n ## (1byte)bWidth - (1byte)bHeight - (1byte)bColorCount - (1byte)bReserved -\n ## -(2bytes)wPlanes - (2bytes)wBitCount - (4bytes)dwBytesInRes - (4bytes)dwImageOffset.\n dataoffset = calcsize('4B2H2I') * num_images + calcsize('HHH')\n\n ## Create ICO.\n for ii, image in enumerate(image_paths):\n values_or_err = self.IcondirEntry( image, dataoffset, num_images )\n try:\n icondirentry, imgdata, dataoffset = values_or_err\n ico_data += icondirentry\n img_data += imgdata\n partial_log += (log_mess + '; ' if num_images - ii > 1 else log_mess)\n except ValueError:\n return values_or_err\n \n ## Save ICO.\n with open(output_path, 'wb') as f_ico: \n f_ico.write(ico_data)\n f_ico.write(img_data)\n\n log_mess = partial_log\n log_mess += ' --> Successfully wrote icon to %s.' %output_path\n return log_mess", "def loadImagesTag(self): \n dictionary = {}\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(GENDER_FRONT)\n dictionary[\"gender\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIN_BACK)\n dictionary[\"skin\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(HEAD_BACK)\n dictionary[\"head\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BODY_BACK)\n dictionary[\"body\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(MASK_BACK)\n dictionary[\"mask\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(HAIR_BACK)\n dictionary[\"hair\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n if self.avatarConfiguration[\"gender\"] == \"boy\":\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHIRT_BACK)\n dictionary[\"shirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(TROUSERS_BACK)\n dictionary[\"trousers\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIRT_BACK)\n dictionary[\"skirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n else:\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHIRT_DISABLED)\n dictionary[\"shirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(TROUSERS_DISABLED)\n dictionary[\"trousers\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIRT_BACK)\n dictionary[\"skirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHOES_BACK)\n dictionary[\"shoes\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n return dictionary", "def _load_images_and_labels(image_dir):\n\n print('Extracting images from: ', image_dir)\n\n image_paths = _load_image_paths(image_dir)\n images = _extract_images(image_paths)\n num_images = len(image_paths)\n labels = np.ones(num_images, dtype=np.int64)\n\n return images, labels", "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def load():\r\n\r\n data = dict()\r\n global IMAGES_FILE_PATH\r\n chdir(IMAGES_FILE_PATH)\r\n try:\r\n with open('Descriptions_File.txt', 'r') as f:\r\n reader = csv.DictReader(f, delimiter=',')\r\n for row in reader:\r\n url = row['url']\r\n data[url] = row['description']\r\n f.close()\r\n except Exception: # If no Descriptions found\r\n return data\r\n return data", "def pics_dict(self):\n\n img_dict = {}\n\n for name, path in zip(ICON_NAMES,ICON_PATHS):\n\n if name == \"main_icon\":\n tk_pic = cGUIf.get_TkImage(path,32,32)\n\n else:\n tk_pic = cGUIf.get_TkImage(path,64,64)\n \n img_dict.update({name : tk_pic})\n\n return img_dict", "def image_info(self):\n\n if not self._image_info:\n path_image_info = os.path.join(\n self._path, f\"ImageSet_{self._image['ImageSetID']}.ImageInfo\"\n )\n\n # Make sure the ImageInfo file really exists\n if not os.path.exists(path_image_info):\n self.logger.warning(\"ImageInfo path doesn't exist: %s\", path_image_info)\n return None\n\n self.logger.debug(\"Reading image data from: %s\", path_image_info)\n self._image_info = pinn_to_dict(path_image_info)\n\n return self._image_info", "def image_classes():\n\n image_data_path = PROJECT_ROOT + \"/data/CUB_200_2011/\"\n\n # <class_id> <class_name>\n classes = open(image_data_path + \"classes.txt\").readlines()\n classes = [i.strip().split() for i in classes]\n\n # <image_id> <class_id>\n labels = open(image_data_path + \"image_class_labels.txt\").readlines()\n labels = [i.strip().split() for i in labels]\n\n class_ids = {}\n for i in classes:\n class_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels:\n label_ids[int(i[0])] = int(i[1])\n\n return class_ids, label_ids", "def get_imgs_from_json(self):\n # instantiate COCO specifying the annotations json path\n # Specify a list of category names of interest\n catIds = self.coco.getCatIds(catNms=[self.categ])\n print(\"catIds: \", catIds)\n # Get the corresponding image ids and images using loadImgs\n imgIds = self.coco.getImgIds(catIds=catIds)\n images = self.coco.loadImgs(imgIds)\n print(f\"{len(images)} images in '{self.json_path}' with '{self.categ}' instances\")\n self.catIds = catIds # list\n return images", "def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels", "def get_images():\n images = {}\n for k, v in DB.IMAGES.iteritems():\n images[k] = v.__dict__\n return images", "def load_image(self, image_id):\n# logger.info(\"image {}\".format(image_id))\n info = self.image_info[image_id]\n if info[\"image\"] is None:\n im = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n image = np.ones([info['height'], info['width'], 1], dtype=np.uint8)\n image[:,:,0] = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n# image[:,:,1] = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n# image[:,:,2] = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n self.image_info[image_id][\"image\"] = image\n# logger.info(\"cached {}\".format(image_id))\n else:\n image = self.image_info[image_id][\"image\"]\n# logger.info(\"missed {}\".format(image_id))\n\n return image", "def load_images():\n print(\"[+] UPDATE - Begin loading images\")\n\n colors = [\"w\", \"b\"]\n piece_types = [\"p\", \"R\", \"N\", \"B\", \"K\", \"Q\"]\n for color in colors:\n for type in piece_types:\n piece = color + type\n IMAGES[piece] = p.transform.scale(p.image.load(\"images/\" + piece + \".png\"), (SQ_SIZE, SQ_SIZE))\n\n print(\"[+] UPDATE - Images loaded\")", "def get_photos_info():\n photos_info = np.loadtxt(PHOTOS_INFO, delimiter=',', unpack=True, dtype=str, usecols=(0, 1))\n photos_info_dict = dict(zip(photos_info[0], photos_info[1]))\n return photos_info_dict", "def build_img_info(img_root):\n imgs = []\n feats = []\n K = []\n for i, name in enumerate(os.listdir(img_root)):\n if '.jpg' in name or '.JPG' in name:\n path = os.path.join(img_root, name)\n img = cv2.imread(path)\n imgs.append(img)\n feature_process = FeatureProcess(img)\n kpt, des = feature_process.extract_features()\n photo_info = PhotoExifInfo(path)\n photo_info.get_tags()\n K.append(photo_info.get_intrinsic_matrix())\n A = photo_info.get_area()\n D = photo_info.get_diam()\n feats.append({'kpt': kpt, 'des': des, 'A': A, 'D': D})\n return imgs, feats, K", "def load(cls):\n\n cls.images[\"Wall\"] = pygame.image.load(\n \"ressources/images/wall.png\").convert()\n cls.images[\"MacGyver\"] = pygame.image.load(\n \"ressources/images/Mac.png\").convert()\n cls.images[\"Guardian\"] = pygame.image.load(\n \"ressources/images/Guardian.png\").convert()\n cls.images[\"Path\"] = pygame.image.load(\n \"ressources/images/path.png\").convert()\n cls.images[\"Tube\"] = pygame.image.load(\n \"ressources/images/tube.png\").convert()\n cls.images[\"Ether\"] = pygame.image.load(\n \"ressources/images/ether.png\").convert()\n cls.images[\"Needle\"] = pygame.image.load(\n \"ressources/images/needle.png\").convert()\n cls.images[\"gr\"] = pygame.image.load(\n \"ressources/images/but_du_jeu.png\").convert()", "def read_image_data(self):\n\n for sequence_name in self.sequence_name_list:\n sequence = self.sequences[sequence_name]\n for image_id in sequence.image_id_list:\n sequence.image_dict[image_id].image_path = '{}{}/{}'.format(self.root_dir, self.name, sequence.image_dict[image_id].filename)", "def load_from_images(self):\n logging.debug(\"load_from_images called\")\n return True", "def populate_images(self):\n print \"Populating images info...\"\n images = self.get_all_images()\n for i in images:\n\n associated_snapshots = self.get_snapshots_of(i)\n\n self.spreadsheet[i.id] = dict(name=i.name, Name_tag=self.get_name_tag(i), id=i.id,\n KEEP_tag=self.get_keep_tag(i), PROD_tag=self.is_production(i),\n region=i.region.name,\n created=i.creationDate,\n associated_snapshots=associated_snapshots,\n description=i.description)", "def load_base_images(base_img):\n if base_img is not None:\n if not os.path.exists(base_img):\n base_img = os.path.join(LIGHTHOUSES_DIR, base_img)\n return (\n Image.open(os.path.join(base_img, 'on.gif')).convert('RGBA'),\n Image.open(os.path.join(base_img, 'off.gif'))\n )\n return None, None", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def load(self, dirname):\n loaded_filenames = set()\n ini_filename = os.path.join(dirname, \"xpresser.ini\")\n if os.path.exists(ini_filename):\n config = ConfigParser.ConfigParser()\n config.read(ini_filename)\n for section_name in config.sections():\n if section_name.startswith(\"image \"):\n image_name = section_name.split(None, 1)[1]\n try:\n image_filename = config.get(section_name, \"filename\")\n except ConfigParser.NoOptionError:\n raise ImageDirError(\"Image %s missing filename option\"\n % image_name)\n image_filename = os.path.join(dirname, image_filename)\n if not os.path.exists(image_filename):\n raise ImageDirError(\"Image %s file not found: %s\" %\n (image_name, image_filename))\n try:\n image_similarity = config.getfloat(section_name,\n \"similarity\")\n except ConfigParser.NoOptionError:\n image_similarity = None\n except ValueError:\n value = config.get(section_name, \"similarity\")\n raise ImageDirError(\"Image %s has bad similarity: %s\"\n % (image_name, value))\n \n try:\n value = config.get(section_name, \"focus_delta\")\n match = CLICK_POSITION_RE.match(value)\n if not match:\n raise ImageDirError(\"Image %s has invalid click \"\n \"position: %s\" %\n (image_name, value))\n image_focus_delta = (int(match.group(\"x\")),\n int(match.group(\"y\")))\n except ConfigParser.NoOptionError:\n image_focus_delta = None\n image = Image(name=image_name,\n filename=image_filename,\n similarity=image_similarity,\n focus_delta=image_focus_delta)\n self._images[image_name] = image\n loaded_filenames.add(image_filename)\n\n # Load any other images implicitly with the default arguments.\n for basename in os.listdir(dirname):\n filename = os.path.join(dirname, basename)\n if filename not in loaded_filenames:\n ftype, fencoding = mimetypes.guess_type(filename)\n if ftype and ftype.startswith(\"image/\"):\n image_name = os.path.splitext(basename)[0]\n self._images[image_name] = Image(name=image_name,\n filename=filename)", "def __initDataFromImages(self):\n #Check if the local_db exist\n initial_dirs = os.listdir(os.getcwd())\n is_db_empty = False\n if len(os.listdir(self.base_dir)) == 1: #Empty here means no person data\n [images_dir] = os.listdir(self.base_dir)\n is_db_empty = images_dir == cfg.local[\"IMG_DIR\"]\n if cfg.local[\"DEFAULT_IMGS_DIR\"] in initial_dirs and is_db_empty:\n default_path = os.path.join(os.getcwd(), cfg.local[\"DEFAULT_IMGS_DIR\"])\n self.X, self.y = loadDataFromImagesPath(self.detector, default_path)\n self.le = LabelEncoder()\n #Nothing relate to mapping name to dir here, we don't care about\n #This data because of the user doesn't exist in the database\n self.__savePreProcessedData()", "def load_image(self, **kwargs):\n ...", "def get_image_info(path):\n try:\n image = Image.open(path)\n except IOError:\n logger.error(f\"'{path}' is not an image\")\n return\n\n if image.format != \"JPEG\":\n logger.error(f\"'{path}' is not a JPEG\")\n return\n\n info = {\n \"filename\": path,\n \"width\": image.width,\n \"height\": image.height,\n \"fileSize\": os.path.getsize(path),\n \"md5\": md5sum_file(path),\n }\n return info", "def read_data(case_dir):\n dict_images = dict()\n list_files = ['MR_512.nii.gz', 'landmarks_512.csv', ]\n # In fact, there is no Mask during inference, so we cannot load it.\n\n for file_name in list_files:\n file_path = case_dir + '/' + file_name\n assert os.path.exists(file_path), case_dir + ' does not exist!'\n\n if file_name.split('.')[-1] == 'csv':\n landmarks = pd.read_csv(file_path)\n dict_images['list_landmarks'] = landmark_extractor(landmarks)\n elif file_name.split('.')[0].split('_')[0] == 'MR':\n dict_images['MR'] = sitk.ReadImage(file_path, sitk.sitkFloat32)\n dict_images['MR'] = sitk.GetArrayFromImage(dict_images['MR'])[np.newaxis, :, :, :]\n elif file_name.split('.')[0].split('_')[0] == 'Mask':\n dict_images['Mask'] = sitk.ReadImage(file_path, sitk.sitkInt16)\n dict_images['Mask'] = sitk.GetArrayFromImage(dict_images['Mask'])[np.newaxis, :, :, :]\n\n return dict_images", "def load_sample_images():\n # Try to import imread from scipy. We do this lazily here to prevent\n # this module from depending on PIL.\n try:\n try:\n from scipy.misc import imread\n except ImportError:\n from scipy.misc.pilutil import imread\n except ImportError:\n raise ImportError(\"The Python Imaging Library (PIL) \"\n \"is required to load data from jpeg files\")\n ROOT_Dir = os.getcwd()\n module_path = os.path.join(ROOT_Dir, \"images\")\n with open(os.path.join(module_path, 'README.txt')) as f:\n descr = f.read()\n filenames = [os.path.join(module_path, filename)\n for filename in os.listdir(module_path)\n if filename.endswith(\".jpg\")]\n # Load image data for each image in the source folder.\n images = [imread(filename) for filename in filenames]\n\n return Bunch(images=images,\n filenames=filenames,\n DESCR=descr)", "def image_process(image_info):\n path = os.path.join(cfg.IMAGESET, image_info.get(\"index\") + \".jpg\")\n if not os.path.exists(path):\n raise IOError(\"please check your file is not exists: \" + path)\n def load_image(path):\n image = Image.open(path)\n return image\n return load_image(path)", "def load_isolated_images(Args):\n # load first galaxy images\n name = 'first_gal_band_wldeb_noise.fits'\n filename = os.path.join(out_dir, Args.model, name)\n Y1 = load_images(filename, ['i'], Args)\n # load second galaxy images\n name = 'second_gal_band_wldeb_noise.fits'\n filename = os.path.join(out_dir, Args.model, name)\n Y2 = load_images(filename, ['i'], Args)\n Y = {'Y1': Y1,\n 'Y2': Y2}\n return Y", "def __init__(self, images, loader):\n super().__init__()\n self._images = images\n self._loader = loader", "def hload_pil(filepath):\n img = Image.open(filepath)\n return img", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def load_images(pool, entries):\n start = time.perf_counter()\n images = pool.map(ski.io.imread, [x.path for x in entries])\n logger.info(\"Loaded %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images", "def image_info(path):\n global working_img\n working_img = Image.open(path)\n print('=======================================')\n print(f'이미지 파일 이름:{working_img.filename}')\n print(f'이미지 파일 파일 형식:{working_img.format}')\n print(f'이미지 용량:{working_img.size}')\n print(f'이미지 색상모드:{working_img.mode}')\n print(f'이미지 크기:{working_img.width}x{working_img.height}')", "def facts(url_file_stream_or_string):\n source = imagefacts.open_resource._open_resource(url_file_stream_or_string, _handle_url)\n data = source.read()\n return imagefacts.getimageinfo.getImageInfo(data)", "def get_image_data(imagedir, model_kwds=dict(layer='fc2'),\n img_kwds=dict(size=(224,224)), timestamps_kwds=dict(source='auto'),\n pca_kwds=None):\n fingerprints_fn = pj(imagedir, ic_base_dir, 'fingerprints.pk')\n images_fn = pj(imagedir, ic_base_dir, 'images.pk')\n if os.path.exists(images_fn):\n print(f\"reading image arrays {images_fn} ...\")\n images = read_pk(images_fn)\n else:\n print(f\"create image arrays {images_fn}\")\n images = read_images(imagedir, **img_kwds)\n write_pk(images, images_fn)\n if os.path.exists(fingerprints_fn):\n print(f\"reading fingerprints {fingerprints_fn} ...\")\n fingerprints = read_pk(fingerprints_fn)\n else:\n print(f\"create fingerprints {fingerprints_fn}\")\n fingerprints = ic.fingerprints(images, ic.get_model(**model_kwds))\n if pca_kwds is not None:\n fingerprints = ic.pca(fingerprints, **pca_kwds)\n write_pk(fingerprints, fingerprints_fn)\n print(f\"reading timestamps ...\")\n if timestamps_kwds is not None:\n timestamps = read_timestamps(imagedir, **timestamps_kwds)\n return images, fingerprints, timestamps", "def load_image(nom):\n print(\"load_image : [\", nom, \"]\")\n fic = gdal.Open(nom)\n print(fic)\n return fic.ReadAsArray(), fic.GetGeoTransform()", "def loadRes(self, resFile):\n res = COCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n\n print('Loading and preparing results...')\n tic = time.time()\n if type(resFile) == str: #or type(resFile) == unicode:\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsImgIds = [ann['image_id'] for ann in anns]\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n if 'caption' in anns[0]:\n imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\n res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\n for id, ann in enumerate(anns):\n ann['id'] = id+1\n elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n bb = ann['bbox']\n x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]\n if not 'segmentation' in ann:\n ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n ann['area'] = bb[2]*bb[3]\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'segmentation' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n # now only support compressed RLE format as segmentation results\n ann['area'] = maskUtils.area(ann['segmentation'])\n if not 'bbox' in ann:\n ann['bbox'] = maskUtils.toBbox(ann['segmentation'])\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'keypoints' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n s = ann['keypoints']\n x = s[0::3]\n y = s[1::3]\n x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)\n ann['area'] = (x1-x0)*(y1-y0)\n ann['id'] = id + 1\n ann['bbox'] = [x0,y0,x1-x0,y1-y0]\n print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def read_cliffs(self):\n cliff_list = Cliff.list()\n rtn = {}\n\n for clf in cliff_list:\n rtn[clf] = self.read_cliff(clf)\n\n return rtn", "def load_cifar_images(filename):\n\n from load_cifar import load_file\n from load_cifar import label_dict\n\n data,labels = load_file(filename)\n\n # two classes to keep\n class0 = label_dict['airplane']\n class1 = label_dict['bird']\n # remove all but two classes\n keep = np.logical_or(labels==class0,labels==class1)\n data = data[keep,...]\n labels = labels[keep]\n # set labels to 0 or 1\n labels[labels==class0]=0\n labels[labels==class1]=1\n\n # rgb -> grayscale\n gray_data = rgb2gray(data)\n return data,gray_data,labels", "def _get_im(self, idx):\n # load images\n path = self.uids[idx]\n img = self._load_im(path)\n\n # get information of each instance (e.g., tree) in a given image.\n # Each instance has its own row in the csv file,\n # so they need to be regrouped according to their path.\n groups = self.df.groupby('rgb_path')\n instances = groups.get_group(path) # contains all instances in given image\n\n num_objs = len(instances)\n boxes = [0.0] * num_objs\n labels = torch.zeros((num_objs,), dtype=torch.int64)\n #extras: cannot take string\n# uid = [''] * num_objs\n# sci_name = [''] * num_objs\n# nlcd_class = [''] * num_objs\n for i in range(num_objs):\n# import pdb; pdb.set_trace()\n boxes[i] = [instances.xmin.iloc[i], instances.ymin.iloc[i],\n instances.xmax.iloc[i], instances.ymax.iloc[i]]\n# uid[i] = self.df.uid.iloc[idx]\n# sci_name[i] = instances.scientific_name.iloc[i]\n# nlcd_class[i] = instances.nlcd_class.iloc[i]\n if self.object_rec == False:\n labels[i] = float(instances.class_id.iloc[i])\n\n if self.object_rec == True: # overwrite labels for object recognition task\n labels = torch.ones((num_objs,), dtype=torch.int64)\n\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\n image_id = torch.tensor([idx])\n # for pycocotools MAP evaluation metric\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"image_id\"] = image_id\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n #extras: cannot take string\n# target[\"site_id\"] = instances.site_id.iloc[0]\n# target[\"uid\"] = uid\n# target[\"sci_name\"] = sci_name\n# target[\"nlcd_class\"] = nlcd_class\n \n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target", "def load_image(self, image_index):\n\t\t\timage_info = self.coco.loadImgs(self.image_ids[image_index])[0]\n\t\t\tpath = os.path.join(self.data_dir, 'images', self.set_name, image_info['file_name'])\n\t\t\treturn read_image_bgr(path)", "def get_images_and_labels_nc():\n refs = get_ref_df()\n images = {}\n for _, data in refs.iterrows():\n if data['ProbeFileName'] in images:\n continue\n im = data['ProbeFileName']\n images[im] = 1 if data['IsTarget'] == 'Y' else 0\n return images", "def _load_images(self, resolutions=None):\n images = {}\n\n for block, url in self.image_declarations:\n file_name = normalize_filename(url)\n if file_name not in images:\n img_resolutions = {}\n img = Image.open(file_name)\n img_resolutions[1] = img\n width, height = img.size\n\n if resolutions:\n for resolution in resolutions:\n # Get the correct filename for this resolution\n if resolution != 1:\n root, ext = os.path.splitext(file_name)\n res_file_name = '{root}-{resolution}x{ext}'.format(\n root=root, resolution=resolution, ext=ext)\n\n img = Image.open(res_file_name)\n if img.size[0] / resolution != width:\n raise ValueError('Invalid width for {0}'.format(\n res_file_name))\n if img.size[1] / resolution != height:\n raise ValueError('Invalid height for {0}'.format(\n res_file_name))\n img_resolutions[resolution] = img\n\n images[file_name] = img_resolutions\n\n return images", "def get_image_data():\n #mac\n #user_images = [i.replace('static/img/', \"\") for i in glob.glob('static/img/*.png')]\n #pc\n #user_images = [i.replace('static\\\\img\\\\', \"\") for i in glob.glob('static\\\\img\\\\*.png')]\n user_images = [i.replace('static/img/', \"\") for i in glob.glob('static/img/*.png')]\n sports = [inflection.titleize(i.replace('.png', \"\").capitalize().replace(\"_\", \" \")) + \"!\" for i in user_images]\n data = list(zip(sports, user_images))\n return data", "def load(self, file, lazy=True):\n # individual files for each slice\n # we got one file, nice!\n \n if not lazy:\n\n if file in self.imagedict.keys():\n return self.imagedict[file]\n else:\n self.imagedict[file] = self.load(file, True)\n self.imagedict[file] *= 1\n return self.imagedict[file]\n \n else:\n \n ending = splitext(file)[-1].lower()\n if ending in ['.nii', '.hdr', '.nii.gz', '.gz']:\n if self.correct_orientation:\n vol = ni.open_image(file, verbose=False)\n self.affine = vol.get_aligned_transformation(\"RAS\")\n data = vol.aligned_volume\n else:\n f = nib.load(file)\n self.affine = f.affine\n self.pixdim = np.asarray(f.header['pixdim'][1:])\n data = f.get_data()\n return data\n # elif ending in ['.nrrd', '.nhdr']:\n # if self.correct_orientation:\n # vol = nr.open_image(file, verbose=False)\n # self.affine = vol.get_aligned_transformation(\"RAS\")\n # f = vol.aligned_volume\n # else:\n # try:\n # f, h = nrrd.read(file)\n # except:\n # print('could not read file {}'.format(file))\n # logging.getLogger('data').error('could not read file {}'.format(file))\n # raise Exception('could not read file {}'.format(file))\n # self.affine = np.eye(4)\n # return f\n # elif ending in ['.dcm']:\n # f = pydicom.dcmread(file).pixel_array\n # return f\n # elif ending in ['.mha', '.mhd']:\n # f = skio.imread(file, plugin='simpleitk')\n # self.affine = np.eye(4)\n # return f\n elif ending in ['.png', '.pgm', '.pnm']:\n data = imread(file)\n if len(data.shape) > 2:\n return np.transpose(data, [2, 0, 1])\n else:\n return data\n return imread(file)\n else:\n raise Exception('{} not known'.format(ending))", "def image_data_info(page):\n xObject = page['/Resources']['/XObject'].getObject()\n\n for obj_key in xObject:\n obj = xObject[obj_key]\n if obj['/Subtype'] == '/Image':\n width, height = (obj['/Width'], obj['/Height'])\n num_bytes = len(obj._data)\n density = num_bytes * 1.0 / (width * height)\n return {'width': width, 'height': height, 'size': num_bytes, 'density': density}\n\n return None", "def get_data(input_path):\n all_imgs = []\n classes_count = {}\n class_mapping = {}\n\n # parsing Flag\n visualise = False\n\n # MSCOCO directory\n data_path = input_path\n\n print('Parsing annotation files')\n annot_path = os.path.join(data_path, 'annotations_bbox')\n imgs_path = os.path.join(data_path, 'images')\n\n # images directory (train, val, trainval, test)\n imgsets_path_trainval = os.path.join(data_path, 'images', 'trainval.txt')\n imgsets_path_train = os.path.join(data_path, 'images', 'train.txt')\n imgsets_path_val = os.path.join(data_path, 'images', 'val.txt')\n imgsets_path_test = os.path.join(data_path, 'images', 'test.txt')\n\n trainval_files = []\n train_files = []\n val_files = []\n test_files = []\n\n with open(imgsets_path_trainval) as f:\n for line in f:\n trainval_files.append(line.strip())\n\n with open(imgsets_path_train) as f:\n for line in f:\n train_files.append(line.strip())\n\n with open(imgsets_path_val) as f:\n for line in f:\n val_files.append(line.strip())\n\n # test-set (default) not included in MSCOCO\n if os.path.isfile(imgsets_path_test):\n with open(imgsets_path_test) as f:\n for line in f:\n test_files.append(line.strip())\n\n # annotation read\n annots_train = json.load(open(os.path.join(annot_path, 'bbox_train2017.json'), 'r'))\n annots_val = json.load(open(os.path.join(annot_path, 'bbox_val2017.json'), 'r'))\n annots = dict()\n annots['train'] = annots_train\n annots['val'] = annots_val\n\n for part in ['train', 'val']:\n annots_keys = tqdm(annots[part].keys())\n for img_name in annots_keys:\n annots_keys.set_description(\"Processing %s\" % img_name)\n for bbox in annots[part][img_name]:\n class_name = bbox['label'].replace(' ', '')\n all_imgs.append({\n \"filepath\": os.path.join(data_path, 'images', '%s2017' % part, \"%s.jpg\" % img_name),\n \"width\": None,\n \"height\": None,\n \"bboxes\": [{\n \"class\": class_name,\n \"x1\": bbox['bbox']['x1'],\n \"y1\": bbox['bbox']['x2'],\n \"x2\": bbox['bbox']['y1'],\n \"y2\": bbox['bbox']['y2'],\n \"difficult\": False\n }],\n \"image_id\": img_name,\n \"imageset\": part\n })\n if class_name not in classes_count:\n classes_count[class_name] = 1\n else:\n classes_count[class_name] += 1\n if class_name not in class_mapping:\n class_mapping[class_name] = len(class_mapping)\n\n # visualise bounding boxes\n if visualise:\n img = cv2.imread(annotation_data['filepath'])\n for bbox in annotation_data['bboxes']:\n cv2.rectangle(img, (bbox['x1'], bbox['y1']), (bbox['x2'], bbox['y2']), (0, 0, 255))\n cv2.imshow('img', img)\n print(annotation_data['imageset'])\n cv2.waitKey(0)\n\n return all_imgs, classes_count, class_mapping", "def readImages(self):\r\n\r\n #Read the file camera.csv for the image file name\r\n lines = [line.strip() for line in open(self.cameraFile)]\r\n i = 0;\r\n\tself.centers = []\r\n\tself.lefts = []\r\n\tself.rights = []\r\n\r\n for line in lines:\r\n info = line.split(',')\r\n \r\n\r\n if info[0] == 'seq':\r\n i += 1\r\n continue\r\n \r\n if info[4] == 'left_camera':\r\n self.lefts.append(info)\r\n if info[4] == 'center_camera':\r\n self.centers.append(info)\r\n if info[4] == 'right_camera':\r\n self.rights.append(info)\r\n i += 1\r\n\r\n print \"Total Frames: %d \" % (len(self.centers))", "def load_dataset():\n # Get the start time\n start_time = time.time()\n\n # Load dataset YAML file\n # This contains all of our image labels, as well as locations of the images themself\n print(\"Reading dataset/dataset.yaml... \", end=\"\")\n with open(\"dataset/dataset.yaml\", \"r\") as file:\n dataset = yaml.safe_load(file)\n\n # Get paths, labels\n paths = []\n labels = []\n for sample in dataset:\n # Assign a \"1\" label if we're looking at the ground\n # 0 for everything else: trees, buildings, cars, etc\n label_semantic = max(sample[\"labels\"].keys(), key=sample[\"labels\"].get)\n if max(sample[\"labels\"].values()) < 0.80:\n # Samples that are not obviously in any one category: unsafe\n label=0\n elif label_semantic == \"GROUND\":\n # Safe if >80% ground\n label = 1\n else:\n # Unsafe otherwise, this is usually water\n label = 0\n\n paths.append(sample[\"path\"])\n labels.append(label)\n print(\"done!\", flush=True)\n\n print(\"Loading images\", end=\"\")\n # Get images\n images = np.zeros((len(paths), 128, 128, 3), dtype=np.float32)\n progress = 0.0\n for i, path in enumerate(paths):\n images[i] = np.array(PIL.Image.open(path).resize((128, 128))) / 255.0\n if i / len(paths) > progress:\n progress += 1.0 / 20.0\n print(\".\", end=\"\", flush=True)\n print(\" done!\")\n labels = np.array(labels, dtype=np.int)\n\n # Return\n print(f\"Loaded {len(images)} images in {time.time() - start_time} seconds!\")\n return images, labels", "def load_infos(self):\n xml = self.api.photos_getInfo(photo_id=self.id)\n xml = xml.find(\"photo\")\n out = xml.attrib\n out[\"title\"] = xml.find(\"title\").text\n out[\"description\"] = xml.find(\"description\").text\n out[\"dates\"] = xml.find(\"dates\").attrib\n\n # Load urls\n out[\"urls\"] = {}\n for url_xml in xml.find(\"urls\").findall(\"url\"):\n out[\"urls\"][url_xml.attrib[\"type\"]] = url_xml.text\n\n # Load tags\n out[\"tags\"] = []\n for tag_xml in xml.find(\"tags\").findall(\"tag\"):\n tag = tag_xml.attrib\n tag[\"tag\"] = tag_xml.text\n out[\"tags\"].append(tag)\n\n return out", "def _load_colabeled_img(self) -> np.ndarray:\n return tifffile.imread(str(self.colabel_img))", "def iiif_info_json(images):\n return json.dumps([image[\"image\"].info() for image in images])", "def load_cifar(hparams):\n all_labels = []\n\n total_batches_to_load = 5\n assert hparams.train_size + hparams.validation_size <= 50000\n if hparams.eval_test:\n total_batches_to_load += 1\n # Determine how many images we have loaded\n total_dataset_size = 50000\n train_dataset_size = total_dataset_size\n if hparams.eval_test:\n total_dataset_size += 10000\n\n if hparams.dataset == 'cifar10':\n all_images = []\n elif hparams.dataset == 'cifar100':\n all_images = np.empty((1, 50000, 3072), dtype=np.uint8)\n if hparams.eval_test:\n test_data = np.empty((1, 10000, 3072), dtype=np.uint8)\n if hparams.dataset == 'cifar10':\n datafiles = [\n 'data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',\n 'data_batch_5']\n\n if hparams.eval_test:\n datafiles.append('test_batch')\n num_classes = 10\n elif hparams.dataset == 'cifar100':\n datafiles = ['train']\n if hparams.eval_test:\n datafiles.append('test')\n num_classes = 100\n else:\n raise NotImplementedError('Unimplemented dataset: ', hparams.dataset)\n if hparams.dataset != 'test':\n for file_num, f in enumerate(datafiles):\n d = unpickle(os.path.join(hparams.data_path, f))\n if hparams.dataset == 'cifar10':\n labels = np.array(d['labels'])\n else:\n labels = np.array(d['fine_labels'])\n if f == 'test':\n test_data[0] = copy.deepcopy(d['data'])\n if hparams.dataset == 'cifar10':\n all_images.append(test_data)\n else:\n all_images = np.concatenate([all_images, test_data], axis=1)\n else:\n if hparams.dataset == 'cifar10':\n all_images.append(copy.deepcopy(d['data']))\n else:\n all_images[file_num] = copy.deepcopy(d['data'])\n nsamples = len(labels)\n for idx in range(nsamples):\n all_labels.append(labels[idx])\n if hparams.dataset == 'cifar10':\n all_images = np.concatenate(all_images, axis=0)\n all_images = all_images.reshape(-1, 3072)\n all_images = all_images.reshape(-1, 3, 32, 32) # pylint: disable=too-many-function-args\n all_images = all_images.transpose(0, 2, 3, 1).copy()\n all_images = all_images / 255.0\n mean = augmentation_transforms.MEANS\n std = augmentation_transforms.STDS\n tf.logging.info('mean:{} std: {}'.format(mean, std))\n all_images = (all_images - mean) / std\n all_labels = np.eye(num_classes)[np.array(all_labels, dtype=np.int32)]\n\n assert len(all_images) == len(all_labels)\n tf.logging.info(\n 'In CIFAR10 loader, number of images: {}'.format(len(all_images)))\n\n extra_test_images = None\n extra_test_labels = None\n if hparams.extra_dataset == 'cifar10_1':\n extra_test_ds = tfds.as_numpy(\n tfds.load('cifar10_1', split='test', batch_size=-1))\n extra_test_images = ((extra_test_ds['image'] / 255.0) - mean) / std\n extra_test_labels = np.eye(num_classes)[np.array(\n extra_test_ds['label'], dtype=np.int32)]\n\n # Break off test data\n if hparams.eval_test:\n test_images = all_images[train_dataset_size:]\n test_labels = all_labels[train_dataset_size:]\n else:\n test_images = []\n test_labels = []\n all_images = all_images[:train_dataset_size]\n all_labels = all_labels[:train_dataset_size]\n return all_images, all_labels, test_images, test_labels, extra_test_images, extra_test_labels", "def _load_metadata_from_asset():\n\n with rasterio.Env(AWS_NO_SIGN_REQUEST='YES',\n GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'):\n with rasterio.open(href) as src:\n # Retrieve metadata stored in COG file\n metadata = src.profile\n metadata.update(src.tags())\n metadata['shape'] = src.shape\n\n # Retrieve COG CRS. Note: these COGs do not appear to have CRS info that can be\n # accessed via the .crs method. If this occurs assume it is in WGS84.\n # All COGs in AWS appear to be projected in WGS84.\n if src.crs is None:\n metadata['crs'] = rasterio.crs.CRS.from_epsg(4326)\n else:\n metadata['crs'] = src.crs\n\n # Compute bounding box, image footprint, and gsd\n bbox, footprint, metadata = _get_geometries(src, metadata)\n\n # Derive some additional metadata from the filename\n fname = os.path.basename(href)\n metadata = _parse_filename(fname, metadata)\n\n return metadata, bbox, footprint", "def load_defects(self, val_dir):\n \n img_list_1 = os.listdir(val_dir+'/'+'1')\n img_list_2 = os.listdir(val_dir+'/'+'2')\n img_list_3 = os.listdir(val_dir+'/'+'3')\n img_list_4 = os.listdir(val_dir+'/'+'4')\n\n\n\n img_list_1 = self.make_imgs_list(val_dir + '/' + '1', img_list_1)\n img_list_2 = self.make_imgs_list(val_dir + '/' + '2', img_list_2)\n img_list_3 = self.make_imgs_list(val_dir + '/' + '3', img_list_3)\n img_list_4 = self.make_imgs_list(val_dir + '/' + '4', img_list_4)\n\n\n img_list_1 = self.load_imgsLabels(img_list_1)\n img_list_2 = self.load_imgsLabels(img_list_2)\n img_list_3 = self.load_imgsLabels(img_list_3)\n img_list_4 = self.load_imgsLabels(img_list_4)\n\n\n img_list_1 = self.features_to_np_array(img_list_1)\n img_list_2 = self.features_to_np_array(img_list_2)\n img_list_3 = self.features_to_np_array(img_list_3)\n img_list_4 = self.features_to_np_array(img_list_4)\n\n lbl_list_1 = img_list_1.shape[0]*[1]\n lbl_list_2 = img_list_2.shape[0]*[2]\n lbl_list_3 = img_list_3.shape[0]*[3]\n lbl_list_4 = img_list_4.shape[0]*[4]\n\n\n imgs = np.concatenate((img_list_1, img_list_2, img_list_3, img_list_4))\n lbls = lbl_list_1 + lbl_list_2 + lbl_list_3 + lbl_list_4\n\n\n lbls = np.array(lbls)\n \n lbls = lbls - 1\n \n lbls = to_categorical(lbls)\n \n return imgs, lbls", "def load_images(tags_pict):\n img_data_list = []\n for p in tags_pict.index :\n img_path = tags_pict.full_path[p]\n img = load_img(img_path, target_size= inputShape)\n x = img_to_array(img)\n x = np.expand_dims(img, axis=0)\n # pre-process the image using the appropriate function based on the\n # model that has been loaded (i.e., mean subtraction, scaling, etc.)\n x = preprocess_input(x)\n img_data_list.append(x)\n img_data = np.array(img_data_list)\n img_data=np.rollaxis(img_data,1,0)\n img_data=img_data[0]\n return(img_data)", "def __loadKeys(self):\n key_image_file_names = os.listdir(self.key_image_full_path)\n\n self.maple_logger.info(\"Loading {0} keys.\", len(key_image_file_names))\n\n for key_image_file_name in key_image_file_names:\n self.__loadKey(key_image_file_name)", "def load_images(image_filename):\n\n # Write code here to loop over image data and populate DB.", "def load_image(self, image_id):\n info = self.image_info[image_id]\n label_path = info['path']\n\n # 读取json文件\n with open(os.path.join(self.DATA_ROOT_DIR, label_path), encoding='utf-8') as json_file:\n labelmeJson = json.load(json_file)\n # height = labelmeJson['imageHeight']\n # width = labelmeJson['imageWidth']\n # shape_list = labelmeJson['shapes']\n image = self.img_b64_to_arr(labelmeJson['imageData'])\n # bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n # image = np.ones([labelmeJson['height'], labelmeJson['width'], 3], dtype=np.uint8)\n # image = image * bg_color.astype(np.uint8)\n #\n # for shape, color, dims in info['shapes']:\n # image = self.draw_shape(image, shape, dims, color)\n\n return image", "def make_image_dict(self):\n sprite_sheet = setup.GFX['treasurechest']\n image_dict = {'closed': self.get_image(0, 0, 32, 32, sprite_sheet),\n 'opened': self.get_image(32, 0, 32, 32, sprite_sheet)}\n\n return image_dict", "def __make_processing(self, img_name, abspath_dir_img, id_foot):\n data = {}\n data['data'] = ImageInfo.get_date(abspath_dir_img)\n data['total_part'] = TOTAL_PART\n data['nuvens'] = ImageInfo.get_cloud(abspath_dir_img)\n self.__make_tms(abspath_dir_img)\n data['geom'] = self.__make_footprint(abspath_dir_img, shp_out=id_foot)\n abspath_rgb, img_name_rgb = ImageInfo.get_image_rgb(\n abspath_dir_img, img_name\n )\n data['tms'] = ImageInfo.get_xml_tms(img_name_rgb)\n data['image'] = img_name_rgb\n data['quicklook'] = self.__make_png(abspath_rgb)\n data['path'] = ImageInfo.get_path(img_name)\n return data", "def load_info_file(info):\n with open(info, \"r\") as info_file:\n info = json.load(info_file)\n input_height = info['input_height']\n input_width = info['input_width']\n input_layer = info['input_layer']\n output_layer = info['output_layer']\n labels = info['labels']\n return input_height, input_width, input_layer, output_layer, labels", "def getAllForImages(self):\n imageDict = {}\n for id, name in self.getAll().items():\n imageDict[id] = {}\n imageDict[id][\"name\"] = name\n imageDict[id][\"filename\"] = \"The_Steamer_Great_Western_small.jpg\"\n\n return imageDict", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def load(image_path):\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n # Use skimage io.imread\n out = io.imread(image_path)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def load_image(self, image_id):\n # Load image\n# print(self.image_info[image_id]['path'])\n image = cv2.imread(self.image_info[image_id]['path'],cv2.IMREAD_GRAYSCALE) \n image = image[:,:, np.newaxis] #Add 1 dimension for grayscale images\n return image", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def _load_data_worker(self,img_dir,lbl_dir):\n data = []\n\n for img,lbl in zip(glob(img_dir+\"/*.jpg\"),glob(lbl_dir+\"/*.txt\")):\n im = np.array(Image.open(img))\n im = make_square_image_with_padding(im, self.core_config.num_colors)\n lbl_fh = open(lbl,encoding='utf-8')\n\n objects = self._get_objects(lbl_fh)\n sorted_objects = sort_object_list(objects)\n object_class = self._get_object_classes(sorted_objects)\n \n image_with_objects = {\n 'img':im,\n 'objects':sorted_objects,\n 'object_class': object_class\n }\n\n image_with_mask = convert_to_mask(image_with_objects, self.core_config)\n\n data.append(image_with_mask)\n lbl_fh.close()\n\n return data", "def _getImage(self, img):\n\n # lazily fill in some attributes\n if not 'local_file_path' in img:\n img['local_file_path'] = os.path.join(self.image_root, img['filename'])\n if not 'feat' in img: # also fill in the features\n # NOTE: imgid is an integer, and it indexes into features\n fn = os.path.basename(img['filename'])\n return img", "def get_imgid_dict(ann):\n return {item[1][\"file_name\"]: item[0] for item in ann.imgs.items()}", "def _load_image(self, filename):\n\n path = filename.split(\"/\")\n image_id = path[len(self.directory.split(\"/\")) - 1]\n\n try:\n img = imread(filename)[:, :, :self.num_channels]\n except IndexError:\n tmp = imread(filename)\n img = np.stack([tmp] * 3).transpose(1, 2, 0)\n orig_shape = img.shape[:2]\n img = self._process(img)\n\n masks = np.zeros(self.imsize)\n\n # Load training labels if we're loading a training dataset\n if self.train:\n masks = self._load_mask(image_id)\n\n return (img, masks, image_id, orig_shape)", "def load_image(self, image_id):\n # Load image\n path = self.image_info[image_id]['path']\n if path.endswith(\".png\" or \".jpg\"):\n image = skimage.io.imread(path)\n elif path.endswith(\".dcm\"):\n ds = pydicom.read_file(path)\n image = ds.pixel_array\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "def main():\n images = Images()\n #print images.create_image_urls()\n print images.get_image_random()\n print images.get_image(12)", "def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None", "def load_image(self):\n\n if self.image_file is None:\n raise IOError(\"Set image_file before calling this method\")\n self.hdu = fits.open(self.image_file)[0]\n self.wcs = astropy_wcs.WCS(self.hdu.header)\n self.header = self.hdu.header", "def load_imgsLabels(self, image_paths):\n \n# label = image_paths[-1]\n \n images = self.load_images(image_paths)\n \n images = self.resize_images(images)\n \n images_list = self.greyscale_images(images)\n\n return images_list", "def get_icons():\n ICONS = {\n \"http://files.heuritech.com/raw_files/surfrider/bottle.png\" : \".mot/resources/bottle.png\",\n \"http://files.heuritech.com/raw_files/surfrider/fragment.png\" : \".mot/resources/fragment.png\",\n \"http://files.heuritech.com/raw_files/surfrider/other.png\" : \".mot/resources/other.png\"\n }\n\n home = os.path.expanduser(\"~\")\n if not os.path.isdir(os.path.join(home, \".mot/\")):\n os.mkdir(os.path.join(home, \".mot/\"))\n if not os.path.isdir(os.path.join(home, \".mot/resources\")):\n os.mkdir(os.path.join(home, \".mot/resources\"))\n\n for k,v in ICONS.items():\n path = os.path.join(home, v)\n if not os.path.isfile(path):\n wget.download(k, path)\n print(\"\\ndownloaded to \", path)\n return [cv2.imread(filename,-1) for filename in [os.path.join(home, \".mot/resources/bottle.png\"),\n os.path.join(home, \".mot/resources/fragment.png\"),\n os.path.join(home, \".mot/resources/other.png\")]]" ]
[ "0.69167143", "0.67644393", "0.65785575", "0.64346397", "0.64260566", "0.6405014", "0.6335255", "0.630171", "0.63005936", "0.62530017", "0.6173102", "0.61003965", "0.6039576", "0.60270804", "0.6020482", "0.6004541", "0.59933895", "0.59829694", "0.59729624", "0.5952639", "0.59442675", "0.59319264", "0.59317786", "0.59317786", "0.59317786", "0.5917408", "0.5902042", "0.5900744", "0.5887995", "0.5884901", "0.5875651", "0.5874125", "0.5860211", "0.5858661", "0.5830783", "0.5825329", "0.58082646", "0.5802549", "0.5800711", "0.5798126", "0.5754451", "0.57519823", "0.57476753", "0.5741282", "0.57412374", "0.5738697", "0.5715557", "0.5704954", "0.56971705", "0.5685742", "0.5685667", "0.5664711", "0.56642723", "0.5639404", "0.56378955", "0.56308293", "0.5625614", "0.5606042", "0.55953866", "0.5587613", "0.5582818", "0.55827636", "0.5565751", "0.5555637", "0.55534893", "0.555097", "0.55481297", "0.55441856", "0.55406165", "0.5537988", "0.5537821", "0.5534475", "0.5534211", "0.5531367", "0.55300695", "0.5529096", "0.5528847", "0.55243814", "0.552384", "0.55182487", "0.551801", "0.55171615", "0.5515738", "0.55032736", "0.5499111", "0.5494204", "0.54886574", "0.5481941", "0.5475751", "0.54703575", "0.54691255", "0.5466949", "0.5458597", "0.5452913", "0.54432523", "0.544146", "0.54344517", "0.54337496", "0.54286975", "0.5427741", "0.54265106" ]
0.0
-1
Saves given sequence of images into .mraw file.
def save_mraw(images, save_path, bit_depth=16, ext='mraw', info_dict={}): filename, extension = path.splitext(save_path) mraw_path = '{:s}.{:s}'.format(filename, ext) cih_path = '{:s}.{:s}'.format(filename, '.cih') directory_path = path.split(save_path)[0] if not path.exists(directory_path): os.makedirs(directory_path) bit_depth_dtype_map = { 8: np.uint8, 16: np.uint16 } if bit_depth not in bit_depth_dtype_map.keys(): raise ValueError('Currently supported bit depths are 8 and 16.') if bit_depth < 16: effective_bit = bit_depth else: effective_bit = 12 if np.max(images) > 2**bit_depth-1: raise ValueError( 'The input image data does not match the selected bit depth. ' + 'Consider normalizing the image data before saving.') # Generate .mraw file with open(mraw_path, 'wb') as file: for image in images: image = image.astype(bit_depth_dtype_map[bit_depth]) image.tofile(file) file_shape = (int(len(images)), image.shape[0], image.shape[1]) file_format = 'MRaw' image_info = {'Record Rate(fps)': '{:d}'.format(1), 'Shutter Speed(s)': '{:.6f}'.format(1), 'Total Frame': '{:d}'.format(file_shape[0]), 'Original Total Frame': '{:d}'.format(file_shape[0]), 'Start Frame': '{:d}'.format(0), 'Image Width': '{:d}'.format(file_shape[2]), 'Image Height': '{:d}'.format(file_shape[1]), 'Color Type': 'Mono', 'Color Bit': bit_depth, 'File Format' : file_format, 'EffectiveBit Depth': effective_bit, 'Comment Text': 'Generated sequence. Modify measurement info in created .cih file if necessary.', 'EffectiveBit Side': 'Lower'} image_info.update(info_dict) cih_path = '{:s}.{:s}'.format(filename, 'cih') with open(cih_path, 'w') as file: file.write('#Camera Information Header\n') for key in image_info.keys(): file.write('{:s} : {:s}\n'.format(key, str(image_info[key]))) return mraw_path, cih_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(images, output):\n for image, frame in images:\n image.save(output(frame))", "def save_step_1(imgs, output_path='./output/step1'):\n # ... your code here ...\n i=0\n for each in imgs:\n i+=1\n cv2.imwrite(output_path+\"/output\"+str(i)+\".jpg\", each)", "def saveFrames(filepath, frames):\n\n for i, frame in enumerate(frames):\n image = Image.fromarray(frame)\n image.save(filepath + str(i).zfill(8) + '.png')", "def save_images(images, filenames, output_dir):\n for i, filename in enumerate(filenames):\n # Images for inception classifier are normalized to be in [-1, 1] interval,\n # so rescale them back to [0, 1].\n with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n imsave(f, (images[i] + 1.0) * 0.5, format='png')", "def save_images(images, filenames, output_dir):\n for i, filename in enumerate(filenames):\n # Images for inception classifier are normalized to be in [-1, 1] interval,\n # so rescale them back to [0, 1].\n with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n imsave(f, (images[i, :, :, :] + 1.0) * 0.5, format='png')", "def save_images(images, filenames, output_dir):\n for i, filename in enumerate(filenames):\n # Images for inception classifier are normalized to be in [-1, 1] interval,\n # so rescale them back to [0, 1].\n with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n imsave(f, (images[i, :, :, :] + 1.0) * 0.5, format='png')", "def save_images(images, filenames, output_dir):\n for i, filename in enumerate(filenames):\n # Images for inception classifier are normalized to be in [-1, 1] interval,\n # so rescale them back to [0, 1].\n with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n img = np.round(((images[i] + 1.0) * 0.5) * 255.0).astype(np.uint8)\n Image.fromarray(img).save(f, format='PNG')", "def save_images(images, filenames, output_dir):\n for i, filename in enumerate(filenames):\n\n with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n image = images[i, :, :, :]\n image[:, :, 0] += _R_MEAN\n image[:, :, 1] += _G_MEAN\n image[:, :, 2] += _B_MEAN\n image = imresize(image, [299, 299])\n imsave(f, image, format='png')", "def test_save_images(self):\n save_file(self.quart.save_images, to_single_file=False)", "def save_step_4(imgs, output_path=\"./output/step4\"):\n # ... your code here ...\n cv2.imwrite(output_path+\"/output.jpg\", imgs)", "def save_images(images, filenames, output_dir):\n for i, filename in enumerate(filenames):\n # Images for inception classifier are normalized to be in [-1, 1] interval,\n # so rescale them back to [0, 1].\n with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)\n Image.fromarray(img).save(f, format='PNG')", "def save_images(img_list, img_saving_path, label_list, label_saving_path):\n img_index = SAVING_INDEX\n label_index=SAVING_INDEX\n for img in img_list:\n img.save(img_saving_path + str(img_index) + '.png', 'PNG')\n img_index+=1\n for label in label_list:\n label.save(label_saving_path + str(label_index) + '.png', 'PNG')\n label_index += 1", "def write_frames(self, images):\n for img in images:\n self.write_frame(img)", "def save_step_2(imgs, match_list, output_path=\"./output/step2\"):\n # ... your code here ...\n for i in range(len(imgs)):\n name1,tail1 = str.split(filenames[match_list[i][0]],\".\")\n name2,tail2 = str.split(filenames[match_list[i][2]],\".\")\n cv2.imwrite(output_path+\"/\"+name1+\"_\"+str(match_list[i][1])+\"_\"+name2+\"_\"+str(match_list[i][3])+\"_\"+str(match_list[i][4])+\".jpg\", imgs[i])", "def save_images(self, step, images):\n\n # Save\n with self.summary_writer.as_default():\n for name, batch in images.items():\n image = batch[0]\n image = tf.expand_dims(image, axis=0)\n tf.summary.image(name, image, step)", "def save_images(out_dir, names, pred_trimaps_softmax, pred_mattes_u, gt_trimap_3, logger=logging.getLogger('utils')):\n matte_path = os.path.join(out_dir, 'matte')\n matte_u_path = os.path.join(out_dir, 'matte_u')\n trimap_path = os.path.join(out_dir, 'trimap')\n\n os.makedirs(matte_path, exist_ok=True)\n os.makedirs(matte_u_path, exist_ok=True)\n os.makedirs(trimap_path, exist_ok=True)\n\n # logger.debug(f'Saving {len(names)} images to {out_dir}')\n\n for idx, name in enumerate(names):\n if pred_mattes_u is not None:\n matte_u = pred_mattes_u[idx]\n save_path = os.path.join(matte_u_path, name)\n torchvision.utils.save_image(matte_u, save_path)\n\n if pred_trimaps_softmax is not None:\n trimap = pred_trimaps_softmax[idx]\n trimap = trimap.argmax(dim=0)\n trimap = trimap / 2.\n save_path = os.path.join(trimap_path, name)\n torchvision.utils.save_image(trimap, save_path)\n\n if pred_mattes_u is not None:\n if pred_trimaps_softmax is None:\n trimap = gt_trimap_3[idx].argmax(dim=0)\n trimap = trimap / 2.\n\n matte = matte_u\n matte[(trimap == 1.).unsqueeze(0)] = 1.\n matte[(trimap == 0.).unsqueeze(0)] = 0.\n\n save_path = os.path.join(matte_path, name)\n torchvision.utils.save_image(matte, save_path)", "def save_test_images(images):\n for description, img in images.items():\n save_to_image(img, description)\n save_to_netcdf(img, description)", "def save_images(images, filenames, output_dir):\n for i, filename in enumerate(filenames):\n dirname = os.path.dirname(filename)\n\n if dirname!='':\n dirpath = os.path.join(output_dir, dirname)\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n # Images for inception classifier are normalized to be in [-1, 1] interval,\n # so rescale them back to [0, 1].\n with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)\n Image.fromarray(img).save(f)", "def saveImages(image_list, name_list, path):\n\ti = 0\n\tfor image in image_list:\n\t\tname = name_list[i]\n\t\tio.imsave(path + \"/\" + name + \".jpg\", image)\n\t\ti += 1", "def saveanimation(frames,address=\"./movie.gif\"):\n imageio.mimsave(address, frames)", "def save_result(save_path, npyfile):\n for i, item in enumerate(npyfile):\n img = item[:, :, 0]\n io.imsave(os.path.join(save_path, '%d_pred.png' % i), img)", "def save_minibatch(self, examples, labels):\n # First make both examples and labels into numpy arrays\n examples = examples.cpu().numpy()\n labels = labels.cpu().numpy()\n\n # Make a name for the files\n random_string = str(random.random())[2:] # DO THIS BETTER WHEN I HAVE INTERNET\n\n # Save both files\n example_file = '%s.examples.npy' % random_string\n example_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,\n example_file)\n np.save(example_path, examples)\n\n label_file = '%s.labels.npy' % random_string\n label_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,\n label_file)\n np.save(label_path, labels)", "def save_reconstructed_images(self, filename, rows, cols):\n # print(filename)\n num_images = self.reconstructed.shape[0]\n images = np.zeros((rows, cols*num_images))\n for i in range(num_images):\n start = int(i * cols)\n end = int((i+1) * cols)\n images[0:rows, start:end] = self.reconstructed[i].reshape((rows, cols))\n\n plt.imshow(images, cmap='gray', aspect='equal', interpolation='none')\n #plt.show()\n plt.savefig(filename)\n return", "def maybe_save_images(images, filenames):\n\n if FLAGS.output_dir is not None:\n batch_size = images.shape[0]\n for i in xrange(batch_size):\n image_array = images[i, :, :]\n file_path = os.path.join(FLAGS.output_dir, filenames[i])\n image = Image.fromarray(np.uint8(image_array))\n image.save(file_path)", "def imageSaveOutput(image,name,number):\n FileName = name +\" \"+number\n mpimg.imsave(\"test_images_output\"+'//'+FileName,image)\n return 0;", "def save_images(images, save_dir, image_type):\n for image in images:\n raw_img = urllib2.urlopen(image).read()\n count = len([i for i in os.listdir(save_dir) if image_type in i]) + 1\n f = open(save_dir + '/' + image_type + '_' + str(count), 'wb')\n f.write(raw_img)\n f.close()", "def save(self, x, y, names, path=\"\", zoom=False):\n for i in range(len(x)):\n image = self.generate(x[i], label=np.argmax(y[i]), zoom=zoom)\n image = Image.fromarray((image*255).astype(\"uint8\"))\n image.save(path + names[i] + \".png\", \"PNG\")", "def save_images(PATH, show_img, datasets, from_dataset):\n dataset = datasets[from_dataset]\n imgModels = dataset['models']\n for modelname, model in imgModels.items():\n print('save', modelname)\n plt.imshow(model[70])\n plt.set_cmap(\"gray\")\n plt.axis('off')\n plt.savefig(PATH + '/' + from_dataset + '_' + modelname + '.png', dpi=400)\n\n if show_img == True:\n plt.show()", "def save_smiles(smiles, filename):\n with open(filename, 'w') as f:\n for smi in smiles:\n f.write(smi + '\\n')", "def saveImgs(img, filename=None):\n\tif filename is None:\n\t\tdate = time.strftime(\"%Y%m%d\")\n\t\tfilename = \"T\" + str(date)\n\t\tjpg = \".jpg\"\n\t\tcount = 0\n\t\tfor item in img:\n\t\t\tname = filename + str(count) + jpg\n\t\t\tcv2.imwrite(name, item)\n\t\t\tcount += 1\n\telse:\n\t\tfor i in range(0, len(img)):\n\t\t\tcv2.imwrite(filename[i], img[i])", "def save_skimage_stack_as_mp4(filepaths, savepath, **kwargs):\n # Save as .mp4\n cv2_frames = [cv2.imread(filepath) for filepath in filepaths]\n height, width, layers = cv2_frames[0].shape\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n fps = 5\n video = cv2.VideoWriter(savepath, fourcc, fps,\n (width, height))\n for img in cv2_frames:\n video.write(img)\n print(f'mp4 saved at\\n{savepath}')\n # Release file from memory\n video.release()", "def save_imgs(self, epoch):\n row, column = 5, 5\n\n # Generates r*c images from the model, saves them individually and as a gallery\n images_generated = self.generate_images(row * column)\n\n # ???\n images_generated = 0.5 * images_generated + 0.5\n\n for index, np_array_image in enumerate(images_generated):\n path = f\"{self.output_directory}/generated_{self.img_size[0]}x{self.img_size[1]}\"\n if not os.path.exists(path):\n os.makedirs(path)\n imsave(path + f\"/{unique_name()}_{epoch}_{index}.png\", np_array_image)\n\n # 4D array:\n nindex, height, width, intensity = images_generated.shape\n\n nrows = nindex // column\n\n assert nindex == nrows * column\n\n # Form the gallery by combining the data at pixel levels (may not be the best approach)\n # want result.shape = (height*n-rows, width*n-cols, intensity)\n gallery = (\n images_generated.reshape(nrows, column, height, width, intensity)\n .swapaxes(1, 2)\n .reshape(height * nrows, width * column, intensity)\n )\n\n path = f\"{self.output_directory}/gallery_generated_{self.img_size[0]}x{self.img_size[1]}\"\n if not os.path.exists(path):\n os.makedirs(path)\n imsave(path + f\"/{unique_name()}_{epoch}.png\", gallery)", "def write_data(data_to_write_train, data_to_write_test, train_path, eval_path ,test_path):\n aligned_lists_train = data_to_write_train[0]\n raw_lists_train = data_to_write_train[2]\n\n aligned_lists_eval = data_to_write_test[0]\n raw_lists_eval = data_to_write_test[2]\n\n aligned_lists_test = data_to_write_test[1]\n raw_lists_test = data_to_write_test[3]\n\n filelist = list([train_path, eval_path, test_path])\n\n for file in filelist:\n aligned_path = os.path.join(file, 'aligned_image')\n raw_path = os.path.join(file, 'raw_image')\n os.mkdir(aligned_path)\n os.mkdir(raw_path)\n\n # raw image data\n for Idx, train_raw in enumerate(raw_lists_train):\n img = Image.open(train_raw)\n img.save(train_path+'/raw_image/img_'+f'{Idx:04d}.jpg')\n if Idx%100 == 0:\n print('\\t%d images are saved'% Idx); \n print('\\tTrain raw images saved! ')\n\n for Idx, eval_raw in enumerate(raw_lists_eval):\n img = Image.open(eval_raw)\n img.save(eval_path+'/raw_image/img_'+f'{Idx:04d}.jpg')\n if Idx%100 == 0:\n print('\\t%d images are saved'% Idx); \n print('\\tEval raw images saved! ')\n\n for Idx, test_raw in enumerate(raw_lists_test):\n img = Image.open(test_raw)\n img.save(test_path+'/raw_image/img_'+f'{Idx:04d}.jpg')\n if Idx%100 == 0:\n print('\\t%d images are saved'% Idx); \n print('\\tTest raw images saved! ')\n\n # aligned image data\n for Idx, train_aligned in enumerate(aligned_lists_train):\n img = Image.open(train_aligned)\n img.save(train_path+'/aligned_image/img_'+f'{Idx:04d}.jpg')\n if Idx%100 == 0:\n print('\\t%d images are saved'% Idx); \n print('\\tTrain aligned images saved! ')\n\n for Idx, eval_aligned in enumerate(aligned_lists_eval):\n img = Image.open(eval_aligned)\n img.save(eval_path+'/aligned_image/img_'+f'{Idx:04d}.jpg')\n if Idx%100 == 0:\n print('\\t%d images are saved'% Idx); \n print('\\tEval aligned images saved! ')\n\n for Idx, test_aligned in enumerate(aligned_lists_test):\n img = Image.open(test_aligned)\n img.save(test_path+'/aligned_image/img_'+f'{Idx:04d}.jpg')\n if Idx%100 == 0:\n print('\\t%d images are saved'% Idx); \n print('\\tTest aligned images saved! ')", "def save_image(start, stop, imgcount, label):\n text = \"\"\n imgfile = select_file(label)\n for p in range(imgcount):\n pxcnt = randint(start, stop)\n imgcurrent = create_image(imgfile, pxcnt)\n filename = \"img_train_\" + str(label) + \"_\" + str(p) + \"_\" + str(pxcnt) + \".png\"\n text += \"ctq/dataset/train/\" + filename + \" \" + str(label) + \"\\n\"\n imgcurrent.save(filename)\n text_file = open(imgfile + \"_train_label.txt\", \"w\")\n text_file.write(text)\n text_file.close()", "def save_images(self):\n for q in range(self.N_itr):\n plt.clf()\n self.plot_EM_estimate(q)\n plt.savefig('img%d.png' % (100 + q))", "def save_sequence(seq_dir, seq_data, frm_idx_lst=None, to_bgr=False):\n\n if to_bgr:\n seq_data = seq_data[..., ::-1] # rgb2bgr\n\n # use default frm_idx_lst is not specified\n tot_frm = len(seq_data)\n if frm_idx_lst is None:\n frm_idx_lst = ['{:04d}.png'.format(i) for i in range(tot_frm)]\n\n # save for each frame\n os.makedirs(seq_dir, exist_ok=True)\n for i in range(tot_frm):\n cv2.imwrite(osp.join(seq_dir, frm_idx_lst[i]), seq_data[i])", "def save_step_3(img_pairs, match_list, output_path=\"./output/step3\"):\n # ... your code here ...\n for i in range(len(img_pairs)):\n name1,tail1 = str.split(filenames[match_list[i][0]],\".\")\n name2,tail2 = str.split(filenames[match_list[i][1]],\".\")\n cv2.imwrite(output_path+\"/\"+name1+\"_\"+name2+\".jpg\", img_pairs[i][0])\n cv2.imwrite(output_path+\"/\"+name2+\"_\"+name1+\".jpg\", img_pairs[i][1])", "def saveImgWithSegmentations(segmented_volume, volume_name, model_name, save_dir):\n # Convert from RGB to grayscale\n gray3d = []\n for i in range(len(segmented_volume)):\n gray3d.append(color.rgb2gray(segmented_volume[i]))\n gray3d = np.asarray(gray3d)\n gray3d = np.transpose(gray3d, (2, 1, 0))\n\n filename = volume_name + '_' + model_name\n\n if not os.path.isdir(save_dir): \n os.mkdir(save_dir)\n\n # Save as nrrd\n #nrrd.write(filename=save_dir+filename+'.nrrd', data=gray3d)\n #print('Successfully save results as nrrd.')\n\n # Save as gif (fps=5 is recommended)\n img_seq = gray3d.transpose((2, 1, 0)) * 255.0\n img_seq = img_seq.astype(np.uint8)\n if gif(save_dir+filename, img_seq, fps=5):\n print('Successfully save results as gif.')", "def save_images(images, db, path):\n images = [int(image) for image in images]\n files = get_img_files(images, db)\n copy_files(files, path)", "def save_images(path, images, filenames):\n if not os.path.exists(path):\n return False\n for i in range(len(images)):\n img_rgb = cv2.cvtColor(images[i], cv2.COLOR_BGR2RGB)\n cv2.imwrite(os.path.join(path, filenames[i]), img_rgb)\n return True", "def save_images(images, file_path, nickname, num_cameras):\r\n print(\"Saving images...\")\r\n # determine the expected length of the image name\r\n # MultiDIC requires leading zeros if there are more than 9 images in the run\r\n num_rounds = len(images)//num_cameras\r\n name_length = len(str(num_rounds))\r\n for image_name in images.keys():\r\n # determine where to save the images\r\n # adjust the image name as needed\r\n underscore = image_name.index(\"_\")\r\n number = image_name[underscore+1:]\r\n if len(str(number)) < name_length:\r\n zeros = \"0\"*(name_length - len(str(number)))\r\n save_name = image_name[:underscore+1] + zeros + image_name[underscore+1:]\r\n else:\r\n save_name = image_name\r\n if nickname:\r\n # pull the camera number from the image name - assumes less than 10 cameras in a setup\r\n cam_number = image_name[3]\r\n path = file_path + \"\\\\Camera_\" + str(cam_number) + \"\\\\\" + save_name + \".png\"\r\n else:\r\n # pull the serial number from the image name\r\n cam_serial = image_name[3:image_name.index(\"i\")]\r\n path = file_path + \"\\\\Camera_\" + str(cam_serial) + \"\\\\\" + save_name + \".png\"\r\n images[image_name].Save(path)\r\n # rotate the image for better viewing\r\n im = Image.open(path)\r\n im = im.transpose(Image.ROTATE_270)\r\n im.save(path)\r\n \r\n print(\"Images saved\")", "def save_images(self, sess, epoch):\n if not os.path.exists(self._images_dir):\n os.makedirs(self._images_dir)\n\n names = ['inputA_', 'inputB_', 'fakeA_',\n 'fakeB_', 'cycA_', 'cycB_']\n\n with open(os.path.join(\n self._output_dir, 'epoch_' + str(epoch) + '.html'\n ), 'w') as v_html:\n for i in range(0, self._num_imgs_to_save):\n print(\"Saving image {}/{}\".format(i, self._num_imgs_to_save))\n inputs = sess.run(self.inputs)\n fake_A_temp, fake_B_temp, cyc_A_temp, cyc_B_temp = sess.run([\n self.fake_images_a,\n self.fake_images_b,\n self.cycle_images_a,\n self.cycle_images_b\n ], feed_dict={\n self.input_a: inputs['images_i'],\n self.input_b: inputs['images_j']\n })\n\n tensors = [inputs['images_i'], inputs['images_j'],\n fake_B_temp, fake_A_temp, cyc_A_temp, cyc_B_temp]\n\n for name, tensor in zip(names, tensors):\n image_name = name + str(epoch) + \"_\" + str(i) + \".jpg\"\n imsave(os.path.join(self._images_dir, image_name),\n ((tensor[0] + 1) * 127.5).astype(np.uint8)\n )\n v_html.write(\n \"<img src=\\\"\" +\n os.path.join('imgs', image_name) + \"\\\">\"\n )\n v_html.write(\"<br>\")", "def save2file(self):\n ids_input = []\n labels_input = []\n ids_path = os.path.join(self.path, 'ids')\n if not os.path.exists(ids_path):\n os.makedirs(ids_path)\n labels_path = os.path.join(self.path, 'labels')\n if not os.path.exists(labels_path):\n os.makedirs(labels_path)\n ids_total = len(self.test)\n for i in range(ids_total):\n ids_input = self.test[i][0]\n labels_input = self.test[i][1]\n file_name = \"ids/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(ids_input, dtype=np.int32).tofile(file_path)\n file_name = \"labels/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(labels_input, dtype=np.int32).tofile(file_path)\n print(\"\\n ****** Success! ******\\n \")", "def saveImages(saveImagePath,dataForSaving,enumeratedList):\n \n for i in range(len(dataForSaving[0])):\n singleChar = dataForSaving[0][i]\n singleImage = dataForSaving[1][i]\n \n if singleChar not in enumeratedList:\n enumeratedList.append(singleChar)\n \n dimension = int(singleImage.shape[0]**0.5)\n singleImage = Image.fromarray(np.resize(singleImage,(dimension,dimension)), 'L')\n \n copyVal = 0\n while os.path.exists('{}\\\\{}_copy{}.png'.format(saveImagePath,\\\n enumeratedList.index(singleChar),copyVal)):\n copyVal += 1\n \n singleImage.save('{}\\\\{}_copy{}.png'.format(saveImagePath,\\\n enumeratedList.index(singleChar),copyVal))", "def save_frames(frames, out_dir, as_row=True, as_gif=False):\n os.makedirs(out_dir, exist_ok=True)\n if frames.dtype == torch.uint8: # save_image needs float value in [0, 1]\n frames = frames.float()\n frames = frames / 255.\n if as_gif:\n gif_dir = 'gif_images'\n os.makedirs(os.path.join(out_dir, gif_dir), exist_ok=True)\n for i, frames_i in enumerate(frames):\n if as_row:\n out_file = os.path.join(out_dir, f'img_{i:04d}.png')\n save_image(frames_i.clone(), out_file, nrow=frames_i.shape[0])\n if as_gif:\n for j, frame in enumerate(frames_i):\n out_file = os.path.join(out_dir, gif_dir, f'img_{i:04d}_{j:04d}.png')\n save_image(frame.unsqueeze(0), out_file)\n \n out_file = os.path.join(out_dir, f'img_{i:04d}.gif')\n make_gif(os.path.join(out_dir, gif_dir), out_file, pattern=f'img_{i:04d}_*', fps=10)\n \n print(f'Saved images to {out_dir}')", "def save_predictions(predictions, img_paths, output_dir='predictions'):\n\n print(f'\\nSaving prediction to {output_dir} ...')\n\n if not osp.exists(output_dir):\n os.mkdir(output_dir)\n\n for pred, img_path in tqdm(zip(predictions, img_paths), total=len(predictions)):\n img_name = osp.basename(img_path)\n pred = pred.astype('uint8')\n Image.fromarray(pred * 255).save(osp.join(output_dir, img_name))", "def save_gif(frames):\n print(\"Saving gif images!\")\n for i in range(len(frames)):\n im_out_path = \"gif/gif_emilie_will_\" + str(i) + \".png\"\n plt.imsave(im_out_path, frames[i])", "def save_as_png(path):\r\n for _, _, filename in walk(path):\r\n for f in filename:\r\n medical_image = pydicom.dcmread(path + f)\r\n shape = medical_image.pixel_array.shape\r\n # Convert to float to avoid overflow or underflow losses\r\n brain_image = medical_image.pixel_array.astype(float)\r\n # Rescaling grey scale between 0-255\r\n scaled_image = (np.maximum(brain_image, 0) / brain_image.max()) * 255.0\r\n # Convert to uint\r\n scaled_image = np.uint8(scaled_image)\r\n # Write the PNG file\r\n with open(f'{path}png/{f.strip(\".dcm\")}.png', 'wb') as png_file:\r\n w = png.Writer(shape[1], shape[0], greyscale=True)\r\n w.write(png_file, scaled_image)", "def img_save(name,img):\n cv2.imwrite(name,img)", "def save_predictions(self, preds_all, save_dir, scale_pred=False):\n for idx, fname in enumerate(self.test_files):\n fh = open(fname, 'rb')\n img = pil.open(fh)\n orig_h, orig_w = self.gt_depths[idx].shape\n pred_resize = cv2.resize(preds_all[idx], (orig_w, orig_h), interpolation=cv2.INTER_LINEAR)\n if scale_pred:\n scaled_disp, _ = self.scale_depth_disp(pred_resize)\n disp_img = self.generate_disparity_img(scaled_disp)\n else:\n disp_img = self.generate_disparity_img(1./pred_resize)\n\n imgname = \"{0:04d}\".format(idx)\n name_img = os.path.join(save_dir, imgname+\".jpeg\")\n img.save(name_img)\n name_disp = os.path.join(save_dir, imgname+\"_disp.jpeg\")\n disp_img.save(name_disp)", "def saveGIFBatch(directory, path, name=''):\n # for each frame in batch\n images = []\n for filename in directory:\n print(filename)\n images.append(imageio.imread(filename))\n\n name_gif = path + '/' + name + '.gif'\n imageio.mimsave(name_gif, images)", "def save_images(self, sess, epoch):\n if not os.path.exists(self._images_dir):\n os.makedirs(self._images_dir)\n\n if not os.path.exists(os.path.join(self._images_dir, 'imgs')):\n os.makedirs(os.path.join(self._images_dir, 'imgs'))\n \n names = ['inputB_', 'fakeB_depth_' , 'cycB_']\n\n with open(os.path.join(\n self._output_dir, 'epoch_' + str(epoch) + '.html'), 'w') as v_html:\n for i in range(0, self._num_imgs_to_save):\n print(\"Saving image {}/{}\".format(i, self._num_imgs_to_save))\n x1_t, name1 = self.dataset.next_batch()\n count = 0\n fake_A_temp, cyc_B_temp = sess.run([\n self.fake_images_a,\n self.cycle_images_b], \n feed_dict={self.input_b: x1_t})\n \n fakedepth = fake_A_temp[:,:,:,-1]\n tensors = [x1_t, fakedepth, cyc_B_temp]\n\n for name, tensor in zip(names, tensors):\n #print(name)\n # if name == 'inputB_' or name == 'fakeB_depth_':\n # image_name = name1[count] + '_' + name + str(epoch) + \"_\" + str(i) + \".jpg\"\n # imsave(os.path.join(self._images_dir, 'imgs', image_name), ((tensor[0] + 1) * 127.5).astype(np.uint8))\n # else:\n image_name = name + str(epoch) + \"_\" + str(i) + \".jpg\"\n imsave(os.path.join(self._images_dir, image_name), ((tensor[0] + 1) * 127.5).astype(np.uint8))\n v_html.write(\n \"<img src=\\\"\" +\n os.path.join('imgs', image_name) + \"\\\">\"\n )\n v_html.write(\"<br>\")\n count += 1", "def write_file(self):\n if self.it_num % 5 == 0:\n #plt.imshow(self.grid)\n #plt.savefig(\"output%.4d.png\" % self.it_num, bbox_inches='tight')\n io.savemat(\"MLOutput%.4d\" % self.it_num, { \"Grid\":self.grid})", "def save_matrices(music_matrix, dance_matrix, duration):\n d = int(duration) + 1\n\n plt.tick_params(labelsize=22)\n plt.xticks(np.arange(0, music_matrix.shape[0], music_matrix.shape[0] / duration), np.arange(0, d, 1))\n plt.yticks(np.arange(0, music_matrix.shape[0], music_matrix.shape[0] / duration), np.arange(0, d, 1))\n plt.imshow(music_matrix, cmap='gray')\n plt.savefig('music.png', bbox_inches='tight')\n plt.close()\n\n plt.tick_params(labelsize=22)\n plt.xticks(np.arange(0, music_matrix.shape[0], music_matrix.shape[0] / duration), np.arange(0, d, 1))\n plt.yticks(np.arange(0, music_matrix.shape[0], music_matrix.shape[0] / duration), np.arange(0, d, 1))\n plt.imshow(dance_matrix, cmap='gray')\n plt.savefig('dance.png', bbox_inches='tight')\n plt.close()", "def __save__(self, loops, location):\n\n if not os.path.isdir(location):\n os.makedirs(location)\n\n try:\n mlab = matlab.Matlab(self.config['locations']['fr3d_root'])\n [status, err_msg] = mlab.aSaveLoops(loops, location, nout=2)\n except Exception as err:\n self.logger.exception(err)\n raise err\n\n if status != 0:\n self.logger.error(mlab.last_stdout)\n raise matlab.MatlabFailed(\"Could not save all loop mat files\")\n\n self.logger.debug(\"Saved loop mat files\")", "def img_save(self):\n file_name, extension = return_folder_file_extension(self.img_name)[1:]\n image_name_save = \"%s_D=%s_Rs=%s_size=%s_offset=%i%s\" % (file_name, self.D, self.Rs, self.axe_X, self.offset_X+self.offset_X2, extension)\n\n if self.img2 is not None:\n self.img2.save(image_name_save)\n print(\"Saved \"+image_name_save)\n else:\n print(\"No image to save\")", "def save_images(pool, dst, images, entries):\n start = time.perf_counter()\n fnames = [os.path.splitext(x.name)[0] + \".png\" for x in entries]\n fpaths = [os.path.join(dst, x) for x in fnames]\n pool.starmap(ski.io.imsave, zip(fpaths, images))\n logger.info(\"Saved %i images:\", len(fpaths))\n util.pprint_log(fnames, logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return fpaths", "def save(self, filename):\n print(\"Saving...\", end=\"\\r\")\n canvas = self.canvas[self.N:self.S,self.W:self.E]\n cv2.imwrite(\"./Output/\"+filename, canvas)\n print(\"Saved:\",filename)", "async def save_url_images(images):\n for source, image in images:\n name = source.split('/')[-1]\n async with aiofiles.open(f'{OUTPUT_FOLDER}/{name}', 'wb') as f:\n await f.write(image)", "def save_images(args,path,images, epoch, nrow=None):\n if nrow == None:\n nrow = int(np.floor(np.sqrt(images.size(0)\n )))\n\n img = torchvision.utils.make_grid(images, nrow=nrow, normalize=True).numpy()\n img = np.transpose(img, (1,2,0))\n\n plt.figure()\n plt.imshow(img)\n plt.savefig(path+\"/epoch{:04d}\".format(epoch))\n plt.close()", "def save(im, output_dir: Path):\n if not hasattr(save, \"counter\"):\n save.counter = 0 # type: ignore\n fname = f\"{save.counter:05d}.jpg\" # type: ignore\n cv2.imwrite(str(output_dir / fname), im)\n print(\"Saved\", fname)\n save.counter += 1 # type: ignore", "def __save_to_dir(self, imagelist, prefix, PATH):\n for pair in imagelist:\n directory = os.path.join(PATH, pair[1])\n if not os.path.exists(directory):\n os.mkdir(directory)\n filename = prefix + pair[2]\n pair[0].save(os.path.join(directory, filename))\n print(\"Saved \" + os.path.join(directory, filename))", "def save_images(self, samples, label=None, dir=\"\"):\n if label is None:\n label = self.global_step_\n fig = plt.figure()\n self.net_.eval()\n self.dist.visualize(fig, samples, self.energy)\n plot_fn = os.path.join(dir, f\"samples_{label}.png\")\n fig.savefig(plot_fn)\n plt.close(fig)", "def imwrite(image, path):\n\n if image.ndim == 3 and image.shape[2] == 1: # for gray image\n image = np.array(image, copy=True)\n image.shape = image.shape[0:2]\n\n imgarray=((image+1.0)*127.5).astype(np.uint8)\n img=Image.fromarray(imgarray)\n img.save(path)", "def _save_object_stack(self, folder, basename, img_stack, slices, labels=None):\n if labels is None:\n labels = range(slices)\n for lab, sl in zip(labels, slices):\n if sl is None:\n pass\n x = sl[0].start\n y = sl[1].start\n\n exsl = tuple([np.s_[:]] + [s for s in sl])\n\n fn = os.path.join(\n folder,\n basename\n + \"_l\"\n + str(lab + 1)\n + \"_x\"\n + str(x)\n + \"_y\"\n + str(y)\n + \".tiff\",\n )\n timg = img_stack[exsl]\n skimage.io.imsave(fn, timg, plugin=\"tifffile\", imagej=True)", "def save_reconstructions(reconstructions, out_dir):\n out_dir.mkdir(exist_ok=True)\n for fname, recons in reconstructions.items():\n file_path = out_dir/fname\n np.save(file_path,recons)", "def save(self, output_folder: str, show_confidence: bool = True) -> None:\n if output_folder:\n os.makedirs(output_folder, exist_ok=True)\n\n for i, prediction in enumerate(self._images_prediction_lst):\n image_output_path = os.path.join(output_folder, f\"pred_{i}.jpg\")\n prediction.save(output_path=image_output_path, show_confidence=show_confidence)", "def save_lod_files(files, filename, path=None, start_index=0):\n path = path_formatter(path)\n for i, target in enumerate(files):\n with open(\"{}{}_{}.mtxt\".format(path, filename, i + start_index),\n \"w\") as f:\n f.write(str(target))", "def test_save_materials(temp_dir):\n image1 = [[[0, 0, 0], [0, 0, 0]], [[255, 255, 255], [255, 255, 255]]]\n image2 = [[[0, 0, 0], [255, 255, 255]], [[255, 255, 255], [0, 0, 0]]]\n image3 = [[[255, 255, 255], [255, 255, 255]], [[0, 0, 0], [0, 0, 0]]]\n\n data = [\n (\"image1.png\", Image.fromarray(np.array(image1, dtype=np.uint8))),\n (\"image2.png\", Image.fromarray(np.array(image2, dtype=np.uint8))),\n (\"image3.png\", Image.fromarray(np.array(image3, dtype=np.uint8))),\n ]\n save_materials(temp_dir, data, step=1)\n\n assert os.path.exists(os.path.join(temp_dir, \"images\", \"1\", \"image1.png\"))\n assert os.path.exists(os.path.join(temp_dir, \"images\", \"1\", \"image2.png\"))\n assert os.path.exists(os.path.join(temp_dir, \"images\", \"1\", \"image3.png\"))", "def write_to_train_file(files: List, train_file_path: str) -> None:\n f = open(train_file_path, \"w\")\n text_to_save = \"\"\n for i, img_path in enumerate(files):\n img_path_stripped = img_path.replace(\"/darknet\", \"\")\n if i == len(files) - 1:\n text_to_save += img_path_stripped\n else:\n text_to_save += img_path_stripped + \"\\n\"\n\n f.write(text_to_save)\n f.close()", "def save(self):\n from settings import PROCESSORS\n from .filesystem import makedirs\n\n if self.im is None:\n # If we got here something very strange is going on that I can't even\n # predict.\n return # pragma: no cover\n makedirs(self.output_path)\n for action, arg in self.actions:\n action = PROCESSORS[action]\n if self.frames:\n new_frames = []\n for frame in self.frames:\n new_frames.append(action.process(frame, arg))\n self.frames = new_frames\n else:\n self.im = action.process(self.im, arg)\n\n self.im = optimize.optimize(self.im, fmt=self.format, quality=self.quality)\n\n kwargs = {\n 'format': self.format,\n 'optimize': True,\n 'quality': self.quality,\n }\n if self.format == 'jpeg':\n kwargs['progressive'] = True\n\n if self.filename.startswith('s3://'):\n import cStringIO\n from filesystem import s3\n output = cStringIO.StringIO()\n if self.frames:\n images2gif.write_gif(output, self.frames)\n else:\n self.im.save(output, **kwargs)\n output.reset()\n s3.put_file(output, self.filename)\n else:\n if self.frames:\n images2gif.write_gif(self.filename, self.frames)\n else:\n self.im.save(self.filename, **kwargs)", "def write_estimations(dir_path, images, labels, annotated_original_image=None, suffix=\"\"):\n assert len(images) == len(labels)\n\n estimation_dir = create_estimation_dir(dir_path, suffix)\n if not estimation_dir:\n return False\n\n for i, (image, label) in enumerate(zip(images, labels)):\n filename = os.path.join(estimation_dir, \"%s_%d.jpg\" % (label, i))\n cv2.imwrite(filename, image)\n\n if annotated_original_image is not None:\n filename = os.path.join(estimation_dir, \"annotated_original_image.jpg\")\n cv2.imwrite(filename, annotated_original_image)\n\n return True", "def save_output(self,\n image_save_fcn,\n label_save_fcn=None\n ):\n ctr = 0\n for i in range(len(self)):\n try:\n X, y, _ = self[i]\n except ValueError:\n X, y = self[i]\n\n for j in range(X.shape[0]):\n im_name = str(ctr).zfill(5)\n image_save_fcn(im_name, X[j, ...])\n\n if y is not None and label_save_fcn is not None:\n label_save_fcn(im_name, y[j, ...])\n\n ctr += 1", "def save_reconstructions(reconstructions, out_dir):\n print(\"save reconstruction\")\n out_dir.mkdir(exist_ok=True)\n for fname, recons in reconstructions.items():\n print(\"fname\",fname)\n with h5py.File(out_dir / fname, 'w') as f:\n f.create_dataset('reconstruction', data=recons)", "def save_tiles(self, tiles, output_dir):\n save_path = f\"{output_dir}/tiles.npy\"\n tiles_np = np.asarray(tiles)\n np.save(save_path, tiles_np)\n print(\"done saving .npy!\")", "def save_images(unique_class_names, root_folder_to_save_images, img_names, y, original_images, perturbed_images):\n original_images = original_images / np.max(original_images)\n perturbed_images = perturbed_images / np.max(perturbed_images)\n\n if not os.path.isdir(root_folder_to_save_images):\n os.makedirs(root_folder_to_save_images, exist_ok=True)\n for class_names in unique_class_names:\n perturbed_images_save_path = os.path.join(root_folder_to_save_images, class_names, 'perturbed')\n original_images_save_path = os.path.join(root_folder_to_save_images, class_names, 'original')\n if not os.path.isdir(perturbed_images_save_path):\n os.makedirs(perturbed_images_save_path, exist_ok=True)\n if not os.path.isdir(original_images_save_path):\n os.makedirs(original_images_save_path, exist_ok=True)\n\n for name_of_image, label, original_image, adversarial_image in zip(img_names, y, original_images, perturbed_images):\n absolute_path_perturbed_image = os.path.join(root_folder_to_save_images, label, 'perturbed', name_of_image)\n absolute_path_orig_image = os.path.join(root_folder_to_save_images, label, 'original', name_of_image)\n perturbed_image = adversarial_image.copy()\n mp_img.imsave(absolute_path_orig_image, original_image)\n mp_img.imsave(absolute_path_perturbed_image, perturbed_image)", "def print_images(images,output_dir,image_num=0,pair=False,synth_images=None):\n for i in xrange(images.shape[0]):\n to_print = fix_image(images[i])\n\n if pair and synth_images is not None:\n synth_to_print = fix_image(synth_images[i])\n to_print = np.hstack((to_print,synth_to_print))\n\n #What is the name of the image?\n imsave(os.path.join(output_dir,str(image_num + i) + \".png\"), to_print)", "def save_tiles(tiles, prefix=\"\", directory=os.getcwd(), format=\"png\"):\n for tile in tiles:\n tile.save(\n filename=tile.generate_filename(\n prefix=prefix, directory=directory, format=format\n ),\n format=format,\n )\n return tuple(tiles)", "def save_processed_images(exp_dir, img_dict):\n # save them into a directory called \"processed\"\n img_fname = os.path.join(exp_dir, str(experiment) + '_processed.jpg')", "def save_maps(self, output_dir='.', prefix='', prefix_sep='_',\n names=None):\n if prefix == '':\n prefix_sep = ''\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n names = names or list(self.maps.keys())\n maps = {k: self.get_map(k) for k in names}\n\n for imgtype, img in maps.items():\n filename = prefix + prefix_sep + imgtype + '.nii.gz'\n outpath = os.path.join(output_dir, filename)\n img.to_filename(outpath)", "def save_annotations(self):\n for fp in self.ris_widget.flipbook_pages:\n if len(fp) == 0:\n # skip empty flipbook pages\n continue\n annotations = getattr(fp, 'annotations', {})\n pose = annotations.get('pose', (None, None))\n if pose is not None:\n center_tck, width_tck = pose\n if center_tck is not None:\n path = pathlib.Path(fp[0].name)\n with path.with_suffix('.pickle').open('wb') as f:\n pickle.dump(dict(pose=pose), f)\n\n # warp and save images from all flipbook pages\n for lab_frame in fp:\n lab_frame_image = lab_frame.data\n path = pathlib.Path(lab_frame.name)\n warp = worm_spline.to_worm_frame(lab_frame_image, center_tck, width_tck)\n warp_save_path = path.parent / (path.stem + '-straight.png')\n freeimage.write(warp, warp_save_path)\n\n # If the widths are drawn, then create a mask that allows the user to make an alpha channel later.\n # We create one mask for each flipbook page, in case the images were saved in different places.\n # If we wind up redundantly writing the same mask a few times, so be it.\n if width_tck is not None:\n mask = worm_spline.worm_frame_mask(width_tck, warp.shape)\n mask_save_path = path.parent / (path.stem + '-mask.png')\n freeimage.write(mask, mask_save_path)", "def write_images(band,skypos,tranges,skyrange,write_cnt=False,write_int=False,write_rr=False,framesz=0,width=False,height=False,verbose=0,tscale=1000.,memlight=False,coadd=False,response=False,calpath='../cal/',clobber=False,retries=20):\n\t# No files were requested, so don't bother doing anything.\n\tif not (write_cnt or write_int or write_rr):\n\t\treturn\n\tcount,rr,intensity=create_images(band,skypos,tranges,skyrange,framesz=framesz,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,coadd=coadd,response=response,calpath=calpath,retries=retries)\n\n\t# Add a conditional so that this is only created for multi-frame images\n\ttbl = movie_tbl(band,tranges,framesz=framesz,verbose=verbose,retries=retries)\n\n\tif write_cnt:\n\t\thdu = pyfits.PrimaryHDU(count)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing count image to '+str(write_cnt)\n\t\thdulist.writeto(write_cnt,clobber=clobber)\n\tif write_rr:\n\t\thdu = pyfits.PrimaryHDU(rr)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing response image to '+str(write_rr)\n hdulist.writeto(write_rr,clobber=clobber)\n\tif write_int:\n\t\thdu = pyfits.PrimaryHDU(intensity)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing intensity image to '+str(write_int)\n\t\thdulist.writeto(write_int,clobber=clobber)\n\n\treturn", "def save_batch(dataset, steps, outdir, batch_nbr):\n\twith h5py.File(outdir+\"/batch%d.h5\" % (batch_nbr), 'w') as f:\n\t\tf.create_dataset('left_camera_image', data=np.array(dataset['left_camera']))\n\t\tf.create_dataset('left_camera_steering_angle', data=np.array(dataset['left_camera/steering_angle']))\n\t\tf.create_dataset('center_camera_image', data=np.array(dataset['center_camera']))\n\t\tf.create_dataset('center_camera_steering_angle', data=np.array(dataset['center_camera/steering_angle']))\n\t\tf.create_dataset('right_camera_image', data=np.array(dataset['right_camera']))\n\t\tf.create_dataset('right_camera_steering_angle', data=np.array(dataset['right_camera/steering_angle']))\n\treturn batch_nbr + 1", "def saveFrame(filepath, frame):\n if not filepath.lower().endswith('.png'):\n filepath += '.png'\n image = Image.fromarray(frame)\n image.save(filepath)", "def imsave(file_name, img):\n assert(type(img) == torch.FloatTensor,\n 'img must be a torch.FloatTensor')\n ndim = len(img.size())\n assert(ndim == 2 or ndim == 3,\n 'img must be a 2 or 3 dimensional tensor')\n\n img = img.numpy()\n\n if ndim == 3:\n plt.imsave(file_name, np.transpose(img, (1, 2, 0)))\n else:\n plt.imsave(file_name, img, cmap='gray')", "def save(img, path, file_name):\n\n name = os.path.join(path,file_name).replace('/', os.sep)\n\n io.imsave(name,img)", "def save_to_disk(x_data, y_data, usage, output_dir='cifar10_images'):\n assert usage in ['train', 'val', 'test']\n\n # Set paths\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n for label in np.unique(y_data):\n label_path = os.path.join(output_dir, usage, str(label))\n if not os.path.exists(label_path):\n os.makedirs(label_path)\n\n for idx, img in enumerate(x_data):\n bgr_img = img[..., ::-1] # RGB -> BGR\n # label = y_data[idx][0]\n label = y_data[idx]\n img_path = os.path.join(\n output_dir, usage, str(label), 'img_{}.jpg'.format(idx)\n )\n retval = cv2.imwrite(img_path, bgr_img)\n\n assert retval, 'Problem saving image at index: {}'.format(idx)", "def save(self, filename):\n self.image.save(filename, self.options.img_format)", "def save_material(filename, mat):\n out = np.array([mat.wav, mat.eps.real, mat.eps.imag,\n mat.mu.real, mat.mu.imag]).T\n header = \"Wavelength\\teps_real\\teps_imag\\tmu_real\\tmu_imag\"\n miepy.array_io.save(filename, out, header=header)", "def save_to_file(filename: str, sequence: List[Sample]):\n\n with open(get_path() + \"/sequence/\" + filename, \"ab+\") as file:\n for sample in sequence:\n pickle.dump(sample, file, pickle.HIGHEST_PROTOCOL)", "def _dump_image(self):\n if not self._current_id == len(self._img_ids):\n warnings.warn(\n 'Recorded {} out of {} validation images, incomplete results'.format(\n self._current_id, len(self._img_ids)))\n try:\n for im_name, im in self._panoptic_images.items():\n cv2.imwrite(osp.join(self._save_imgpath, im_name), im)\n except IOError as e:\n raise RuntimeError(\"Unable to dump images, ignored. What(): {}\".format(str(e)))", "def save_images(d, image_dir='images/'):\n for (lkey, rkey) in zip(d.left, d.right):\n cv2.imwrite(image_dir+lkey+\"_left.png\", d.left[lkey])\n cv2.imwrite(image_dir+rkey+\"_right.png\", d.right[rkey])", "def save_data(data_dir):\r\n for k in range(1,11):\r\n fold_name = 'fold' + str(k)\r\n print \"Saving\" + fold_name\r\n features, labels = process_audio(parent_path, [fold_name])\r\n labels = encode(labels)\r\n print \"Features of\", fold_name , \" = \", features.shape\r\n print \"Labels of\", fold_name , \" = \", labels.shape\r\n feature_file = os.path.join(data_dir, fold_name + '_x.npy')\r\n labels_file = os.path.join(data_dir, fold_name + '_y.npy')\r\n np.save(feature_file, features)\r\n print \"Saved \" + feature_file\r\n np.save(labels_file, labels)\r\n print \"Saved \" + labels_file", "def write(self):\n f, ds = self.opendset()\n #\n # Now add the images\n #\n start_time = time.clock() # time this\n nframes = 0 # number completed\n print_every = 1; marker = \" .\";\n print('Frames written (of %s):' % self.ntowrite, end=\"\")\n for i in range(self.nfiles):\n if nframes >= self.ntowrite: break\n\n logging.debug('processing file %d of %d' % (i+1, self.nfiles))\n img_i = fabio.open(self.files[i])\n nfi = img_i.nframes\n for j in range(nfi):\n msg = '... file %d/image %d' % (i, j)\n logging.debug(msg)\n if j < self.nempty:\n logging.debug('... empty frame ... skipping')\n else:\n ds[nframes, :, :] = img_i.data\n nframes += 1\n if numpy.mod(nframes, print_every) == 0:\n print(marker, nframes, end=\"\")\n print_every *= 2\n sys.stdout.flush()\n logging.debug('... wrote image %s of %s' %\\\n (nframes, self.ntowrite))\n if nframes >= self.ntowrite:\n logging.debug('wrote last frame: stopping')\n break\n if j < nfi - 1:\n # on last frame in file, fabio will look for next file\n img_i = img_i.next()\n\n f.close()\n print(\"\\nTime to write: %f seconds \" %(time.clock()-start_time))", "def SaveAll():\n\tfor file in files:\n\t\tfile.SaveFile()", "def save(self, simulation_results: list):\n\n if self._dir_path is None or len(self._dir_path) < 1 or not os.path.isdir(self._dir_path):\n raise ValueError('Invalid dir path')\n\n best_sol = min(simulation_results, key=lambda x: x.result.best_fitness)\n\n self._file_name = best_sol.result.algorithm_title\n full_path = os.path.join(self._dir_path, self._file_name + '.gif')\n\n if os.path.isfile(full_path):\n raise ValueError('File already exists')\n\n img_arr = self._simulate_route(best_sol)\n imageio.mimsave(full_path, img_arr, fps=55, loop=0, duration=4)", "def pickle_examples(paths, save_path,train_mark):\n with open(save_path, 'wb') as ft:\n for p in paths:\n label = int(os.path.basename(p).split(\"_\")[0])\n with open(p, 'rb') as f:\n if train_mark == True:\n print(\"Train: img2bny %s\" % p, label)\n else:\n print(\"Val: img2bny %s\" % p, label)\n img_bytes = f.read()\n r = random.random()\n example = (label, img_bytes)\n pickle.dump(example, ft)", "def save_images(all_patients, contour_type='i_contour',\n main_dir='final_data/images/'):\n\n # create folder for contour_type\n dirname = main_dir + f'{contour_type}/'\n os.makedirs(dirname, exist_ok=True)\n\n for patient in all_patients:\n # create patient folders for saving\n dirname = main_dir + f'{contour_type}/{patient.dicom_id}/'\n os.makedirs(dirname, exist_ok=True)\n\n # create numpy arrays for the patient\n patient.create_numpy_arrays()\n\n # loop over slices in numpy array dict\n for slice_no in patient.all_numpy_dict:\n slice_dict = patient.all_numpy_dict[slice_no]\n\n # only show image for given contour type\n if slice_dict[f'{contour_type}_array'] is not None:\n\n img_array = slice_dict['dicom_array']\n msk_array = slice_dict[f'{contour_type}_array']\n\n show_img_msk_fromarray(img_array,\n msk_array,\n cmap='Wistia',\n sz=10, alpha=0.7,\n save_path=dirname +f'slice_{slice_no}.png')", "def write_all_patients():\n\n data_dir = sys.argv[1]\n output_dir = sys.argv[2]\n\n imgs, i_msks, o_msks = load_all_patients(data_dir=data_dir)\n\n for idx, array in enumerate(imgs):\n np.save(output_dir+'/img_'+str(idx), array)\n for idx, array in enumerate(i_msks):\n np.save(output_dir+'/i_msk_'+str(idx), array)\n for idx, array in enumerate(o_msks):\n np.save(output_dir + '/o_msk_' + str(idx), array)\n\n return None", "def write_npy(uri, gen):\n imdir = os.path.join(uri, 'image_volumes')\n if not os.path.exists(imdir):\n os.makedirs(imdir)\n\n griddir = os.path.join(uri, 'grid_volumes')\n if not os.path.exists(griddir):\n os.makedirs(griddir)\n\n # Make sure rotation and shuffle are turned off\n gen.channel_combo = None\n gen.shuffle = False\n gen.rotation = False\n gen.expval = True\n\n # Turn normalization off so that we can save as uint8\n gen.norm_im = False\n\n bs = gen.batch_size\n for i in range(len(gen)):\n if i % 1000 == 0:\n print(i)\n # Generate batch\n bch = gen.__getitem__(i)\n # loop over all examples in batch and save volume\n for j in range(bs):\n #get the frame name / unique ID\n fname = gen.list_IDs[gen.indexes[i*bs + j]]\n\n #and save\n print(fname)\n np.save(os.path.join(imdir, fname + '.npy'), bch[0][0][j].astype('uint8'))\n np.save(os.path.join(griddir, fname + '.npy'), bch[0][1][j])" ]
[ "0.711835", "0.70112777", "0.6918421", "0.6822654", "0.6764862", "0.6758189", "0.67128646", "0.67098147", "0.6670487", "0.665418", "0.66379416", "0.6584189", "0.6560411", "0.651001", "0.6488647", "0.6457336", "0.6429019", "0.6422496", "0.6414986", "0.638881", "0.6349937", "0.63433266", "0.6336029", "0.6316999", "0.63029647", "0.62871224", "0.62520385", "0.62495214", "0.62443113", "0.6213891", "0.6188822", "0.6183601", "0.6145531", "0.6140206", "0.61362743", "0.61135113", "0.6111349", "0.60966957", "0.60746735", "0.60444707", "0.6016554", "0.59949553", "0.5985973", "0.5985362", "0.5978382", "0.5950467", "0.59332484", "0.5923628", "0.5912371", "0.59119546", "0.5896677", "0.5892437", "0.5890171", "0.5887658", "0.5887635", "0.58865154", "0.58849335", "0.5866631", "0.5847086", "0.58463037", "0.5835385", "0.58279204", "0.5811946", "0.58040494", "0.5779163", "0.5775254", "0.5774162", "0.5771088", "0.5769197", "0.5767639", "0.5763244", "0.57590145", "0.5758385", "0.57582235", "0.5744884", "0.57436556", "0.5742217", "0.5732287", "0.57291985", "0.5719924", "0.5715934", "0.5709093", "0.5701404", "0.5701316", "0.5701156", "0.5698049", "0.5696529", "0.5693305", "0.5689642", "0.568835", "0.56838226", "0.56805396", "0.5679273", "0.5678258", "0.56753445", "0.56624615", "0.5650246", "0.5648545", "0.5648037", "0.5645484" ]
0.67297393
6
Utility function to read 12bit packed mraw files into uint16 array Will store entire array in memory!
def _read_uint12_video(data, shape): data = np.memmap(data, dtype=np.uint8, mode="r") fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4) snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8 return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unpack_mraw_frame_12bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*12/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*12/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::3]\n bytes_2 = int_array[1::3] \n bytes_3 = int_array[2::3]\n\n \n # Here 2 pixels from the image are shared between three bytes of data like\n #\n # | byte 1 | byte 2 | byte 3 |\n # |o o o o o o o o|o o o o | o o o o|o o o o o o o o|\n # | Pixel 1 | Pixel 2 |\n #\n # byte 2 is shared between pixel and we need only the right-most bits for pixel 2 and\n # only the left most bits for pixel 1. \n \n # right-most bits of byte 2 = Most significant bits of Pixel 2\n # left-most bits of byte 2 = Least significant bits of Pixel 1\n \n pix_1 = np.array(16.0*bytes_1 + np.right_shift(bytes_2,4),dtype=np.uint16)\n pix_2 = np.array(256.0*np.bitwise_and(bytes_2,0b1111) + bytes_3,dtype=np.uint16)\n \n try:\n image = (np.dstack([pix_1,pix_2])).reshape((1,n_pixels))[0]\n except:\n image = np.zeros(n_pixels)\n return image", "def nb_read_uint12(data_chunk):\n \n #ensure that the data_chunk has the right length\n assert np.mod(data_chunk.shape[0],3)==0\n out = np.empty(data_chunk.size//3*2, dtype=np.uint16)\n\n for i in nb.prange(data_chunk.shape[0]//3):\n fst_uint8=np.uint16(data_chunk[i*3])\n mid_uint8=np.uint16(data_chunk[i*3+1])\n lst_uint8=np.uint16(data_chunk[i*3+2])\n \n out[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n out[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n \n return out", "def _read_uint12_video_prec(data, shape):\n data = np.memmap(data, dtype=np.uint8, mode=\"r\")\n return nb_read_uint12(data).reshape(shape)", "def raw_to_tif(file, channel=None ):\n \n def read_uint12(data_chunk):\n data = np.frombuffer(data_chunk, dtype=np.uint8)\n fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n # fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n # snd_uint12 = (lst_uint8 << 4) + (np.bitwise_and(15, mid_uint8))\n fst_uint12 = (fst_uint8 << 4) + (np.bitwise_and(15, mid_uint8))\n snd_uint12 = (lst_uint8 << 4) + (mid_uint8 >> 4)\n return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n\n# def read_uint12(data_chunk):\n# data = np.frombuffer(data_chunk, dtype=np.uint8)\n# fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n# fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n# snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8\n# return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n\n# def read_uint12(data_chunk):\n# data = np.frombuffer(data_chunk, dtype=np.uint8)\n# fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n# fst_uint12 = ((mid_uint8 & 0x0F) << 8) | fst_uint8\n# snd_uint12 = (lst_uint8 << 4) | ((mid_uint8 & 0xF0) >> 4)\n# return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n \n # infile = 'd:\\\\Projekti\\\\Satelit\\\\CO\\\\Razpis\\\\Flat field images_new2020\\\\flatfield\\\\NHDBflat_1D'\n # infile = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Jure_naloga_banje_raw_pyt\\\\NHDRGoreMorje_3D'\n\n # in_path = 'p:\\\\NEMO\\Posnetki\\\\20201014_GoreMorje_data\\cele\\\\'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_image_files = [filename for filename in os.listdir(in_path) if filename.lower().startswith(\"nhd\") and filename.lower().endswith(\"d\")]\n\n \n # infile = in_path + in_image_files[i]\n with open(file, 'rb', buffering=10) as f: # problem pri branju podatkov?\n byte = f.read()\n print(file)\n # # ar = open(infile, 'rb')\n # buffer = BytesIO()\n # byte = BytesIO(ar)\n \n img = read_uint12(byte)\n print(img)\n \n if channel==\"P\":\n img = img.reshape((2748, 3664)) # PAN\n else:\n img = img.reshape((2050, 2448)) # MS\n # img = img.reshape((2748, 3664)) # PAN\n\n size = img.shape\n \n \n out = file[:-4]+ \"_py.tif\"\n\n driver = gdal.GetDriverByName('GTiff')\n\n outRaster = driver.Create(out, size[1], size[0], 1, gdal.GDT_UInt16)\n\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(img)\n outband.FlushCache()", "def extract_data(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(28 * 28 * 10000 * 1)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (255 / 2.0)) / 255\n data = data.reshape(10000, 28, 28, 1)\n return data", "def readFIBSEMdat(path, channel_index=-1, header=1024, magic_number=3555587570):\n ra = RandomAccessFile(path, 'r')\n try:\n # Check the magic number\n ra.seek(0)\n if ra.readInt() & 0xffffffff != magic_number:\n print \"Magic number mismatch\"\n return None\n # Read the number of channels\n ra.seek(32)\n numChannels = ra.readByte() & 0xff # a single byte as unsigned integer\n # Parse width and height\n ra.seek(100)\n width = ra.readInt()\n ra.seek(104)\n height = ra.readInt()\n print numChannels, width, height\n # Read the whole interleaved pixel array\n ra.seek(header)\n bytes = zeros(width * height * 2 * numChannels, 'b') # 2 for 16-bit\n ra.read(bytes)\n print \"read\", len(bytes), \"bytes\" # takes ~2 seconds\n # Parse as 16-bit array\n sb = ByteBuffer.wrap(bytes).order(ByteOrder.BIG_ENDIAN).asShortBuffer()\n shorts = zeros(width * height * numChannels, 'h')\n sb.get(shorts)\n # Deinterleave channels\n # With Weaver: fast\n channels = w.deinterleave(shorts, numChannels, channel_index)\n # With python array sampling: very slow, and not just from iterating whole array once per channel\n # seq = xrange(numChannels) if -1 == channel_index else [channel_index]\n #channels = [shorts[i::numChannels] for i in seq]\n # With clojure: extremely slow, may be using reflection unexpectedly\n #channels = deinterleave.invoke(shorts, numChannels)\n print len(channels)\n # Shockingly, these values are signed shorts, not unsigned!\n return [ArrayImgs.shorts(s, [width, height]) for s in channels]\n finally:\n ra.close()", "def load_spe(filename):\n def read_at(data, pos, size, ntype):\n raw.seek(pos)\n return np.fromfile(raw, ntype, size)\n raw = open(filename, 'rb')\n xdim = np.int64(read_at(raw, 42, 1, np.int16)[0])\n ydim = np.int64(read_at(raw, 656, 1, np.int16)[0])\n arr = read_at(raw, 4100, xdim*ydim, np.uint16)\n arr = arr.reshape((ydim, xdim))\n print('data shape: {}'.format(np.shape(arr)))\n if np.shape(arr)[0] == 1:\n arr = arr[0]\n print('data shape: {}'.format(np.shape(arr)))\n return arr", "def parse_to_numpy(audio):\n\treturn numpy.frombuffer(audio, dtype=numpy.int16)", "def readToMem(filePath, loggerInfo=None, cols=['X', 'Y', 'Z']):\n current_time = datetime.now().strftime(\"%H:%M:%S\")\n print(\"Read\", filePath, \"(\", current_time, \")\")\n\n headerOffset = 1024\n sectorSize = 512\n samplesPerSector = loggerInfo['first']['samplesPerSector']\n\n types = ['i2'] * len(cols) # The 2byte integer layout\n layout = np.dtype({'names': cols, 'formats': types}) # Name the columns of the array\n\n fp = open(filePath, \"rb\") # Open the file in read bytes mode\n memmap = memoryview(fp.read())\n fp.close()\n\n fileSize = len(memmap)\n sectors = (fileSize - headerOffset) // sectorSize\n samples = sectors * samplesPerSector\n\n masterArray = np.zeros((samples, ), dtype=layout)\n\n for i in range(sectors):\n #if i % (sectors // 4) == 0:\n #print(\"Read \", round(100.0 * i / sectors, 1), \"% complete\")\n #print(i/(sectors // 100))\n if i % (sectors // 100) == 0:\n pbar.printProgressBar(round((100.0 * i)/sectors, 0), 100, prefix=\"Read File\", printEnd=\" \")\n\n imp = np.frombuffer(memmap[headerOffset + i * sectorSize : headerOffset + (i+1) * sectorSize - 2], offset=30, dtype=layout)\n masterArray[i * samplesPerSector : (i+1) * samplesPerSector] = imp\n\n memmap.release()\n\n current_time = datetime.now().strftime(\"%H:%M:%S\")\n print(\"Read Complete. (\", current_time, \")\")\n return masterArray.view(np.int16).reshape(masterArray.shape + (-1,)).transpose().astype(np.float64, casting='safe')", "def tiffread(fname):\n from PIL import Image\n img = Image.open(fname)\n \n res = []\n offsets = []\n frame = 0\n try:\n for frame in itertools.count():\n img.seek(frame)\n aux = np.asarray(img)\n if aux.ndim == 0:\n if img.mode == 'I;16':\n aux = np.fromstring(img.tostring(), np.uint16)\n aux = np.reshape(aux, img.size[::-1])\n elif img.mode == 'I;16S':\n aux = np.fromstring(img.tostring(), np.int16)\n aux = np.reshape(aux, img.size[::-1])\n else:\n raise ValueError, \"unknown pixel mode\"\n res.append(aux)\n except EOFError:\n pass\n \n return np.asarray(res)", "def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data", "def unpack_mraw_frame_10bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*10/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*10/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::5]\n bytes_2 = int_array[1::5] \n bytes_3 = int_array[2::5]\n bytes_4 = int_array[3::5] \n bytes_5 = int_array[4::5]\n\n \n # Here 4 pixels from the image are shared between 5 bytes of data like\n #\n # | byte 1 | byte 2 | byte 3 | byte 4 | byte 5 |\n # |o o o o o o o o | o o | o o o o o o | o o o o | o o o o | o o o o o o | o o | o o o o o o o o|\n # | Pixel 1 | Pixel 2 | Pixel 3 | Pixel 4 |\n #\n # byte 2 is shared between pixel and we need only the right-most bits for pixel 2 and\n # only the left most bits for pixel 1. \n \n # right-most bits of byte 2 = Most significant bits of Pixel 2\n # left-most bits of byte 2 = Least significant bits of Pixel 1\n \n pix_1 = np.array(4.0*bytes_1 + np.right_shift(bytes_2,6),dtype=np.uint16)\n pix_2 = np.array(16.0*np.bitwise_and(bytes_2,0b111111) + np.right_shift(bytes_3,4),dtype=np.uint16)\n pix_3 = np.array(64.0*np.bitwise_and(bytes_3,0b1111) + np.right_shift(bytes_4,2),dtype=np.uint16)\n pix_4 = np.array(256.0*np.bitwise_and(bytes_4,0b11) + bytes_5,dtype=np.uint16)\n #try:\n image = (np.dstack([pix_1,pix_2,pix_3,pix_4])).reshape((1,n_pixels))[0]\n #except:\n # image = np.zeros(n_pixels)\n return image", "def m16i():\n\n global offset\n\n x = 0L\n for i in range(2):\n try:\n byte = midifile[offset]\n offset += 1\n except:\n error(\"Invalid MIDI file include (i16->int, offset=%s)\" % offset)\n x = (x << 8) + ord(byte)\n\n return int(x)", "def read_raw(rawfile, shape, dtype=np.uint16, kind='middleton'):\n\n # -- alert\n print(\"READ_RAW: reading {0}...\".format(rawfile))\n\n\n # -- read file\n if kind=='middleton':\n return np.fromfile(open(rawfile),dtype) \\\n .reshape(shape[2],shape[0],shape[1])[:,:,::-1] \\\n .transpose(1,2,0) \\\n .astype(float)", "def extract_data(filename, num_images, IMAGE_WIDTH):\n\n# this function definition has been taken from internet\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_WIDTH * IMAGE_WIDTH * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32) #Interpret a buffer as a 1-dimensional array\n data = data.reshape(num_images, IMAGE_WIDTH*IMAGE_WIDTH)\n return data", "def read_bin_file(file_name, config, mode=0, header=True, packet_num=1443):\n # Read file\n if header:\n data = remove_header(file_name, packet_num)\n else:\n data = np.fromfile(file_name, dtype=np.int16)\n\n frame = config[0]\n sample = config[1]\n chirp = config[2]\n tx_num = config[3]\n rx_num = config[4]\n\n if mode == 0:\n data = np.reshape(data, [-1, 8])\n data = data[:, 0:4:] + 1j * data[:, 4::]\n if rx_num == 4:\n cdata1 = np.reshape(data[:, 0], [frame, chirp, tx_num, sample])\n cdata1 = np.transpose(cdata1, [0, 1, 3, 2]) # frame, chirp, sample, channel\n cdata2 = np.reshape(data[:, 1], [frame, chirp, tx_num, sample])\n cdata2 = np.transpose(cdata2, [0, 1, 3, 2]) # frame, chirp, sample, channel\n cdata3 = np.reshape(data[:, 2], [frame, chirp, tx_num, sample])\n cdata3 = np.transpose(cdata3, [0, 1, 3, 2]) # frame, chirp, sample, channel\n cdata4 = np.reshape(data[:, 3], [frame, chirp, tx_num, sample])\n cdata4 = np.transpose(cdata4, [0, 1, 3, 2]) # frame, chirp, sample, channel\n\n if tx_num == 3:\n cdata = np.array([cdata1[:, :, :, 0], cdata2[:, :, :, 0], cdata3[:, :, :, 0], cdata4[:, :, :, 0],\n cdata1[:, :, :, 1], cdata2[:, :, :, 1], cdata3[:, :, :, 1], cdata4[:, :, :, 1],\n cdata1[:, :, :, 2], cdata2[:, :, :, 2], cdata3[:, :, :, 2], cdata4[:, :, :, 2]])\n cdata = np.transpose(cdata, [1, 2, 3, 0])\n # cdata = np.concatenate([cdata1, cdata2, cdata3, cdata4], axis=3)\n return cdata # frame, chirp, sample, channel(tx1,tx2,tx3)\n\n elif tx_num == 1:\n cdata = np.array([cdata1[:, :, :, 0], cdata2[:, :, :, 0], cdata3[:, :, :, 0], cdata4[:, :, :, 0]])\n cdata = np.transpose(cdata, [1, 2, 3, 0])\n return cdata # frame, chirp, sample, channel\n\n elif mode == 1: # testing\n data = np.reshape(data, [-1, 4])\n data = data[:, 0:2:] + 1j * data[:, 2::]\n data = np.reshape(data, [frame, chirp, tx_num, rx_num, sample])\n if rx_num == 4:\n cdata1 = data[:, :, :, 0, :]\n cdata1 = np.transpose(cdata1, [0, 1, 3, 2])\n cdata2 = data[:, :, :, 1, :]\n cdata2 = np.transpose(cdata2, [0, 1, 3, 2])\n cdata3 = data[:, :, :, 2, :]\n cdata3 = np.transpose(cdata3, [0, 1, 3, 2])\n cdata4 = data[:, :, :, 3, :]\n cdata4 = np.transpose(cdata4, [0, 1, 3, 2])\n\n if tx_num == 3:\n cdata = np.concatenate((cdata1, cdata2, cdata3, cdata4), axis=3)\n return cdata # frame, chirp, sample, channel\n\n elif tx_num == 1:\n cdata = np.array([cdata1[:, :, :, 0], cdata2[:, :, :, 0], cdata3[:, :, :, 0], cdata4[:, :, :, 0]])\n cdata = np.transpose(cdata, [1, 2, 3, 0])\n return cdata # frame, chirp, sample, channel\n\n elif mode == 2:\n data = np.reshape(data, [-1, 4])\n data = data[:, 0:2:] + 1j * data[:, 2::]\n data = np.reshape(data, [frame, chirp * tx_num, rx_num, sample])\n return data\n\n else:\n raise ValueError", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n# data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE * IMAGE_SIZE)\n return data", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data", "def unpack(file, legacy=False):\r\n with open(file, 'rb') as f:\r\n \r\n # Read configuration part of data\r\n config = read_config_data(f, legacy)\r\n \r\n # Compute range bins in datas\r\n scan_start_time = float(config['scan_start'])\r\n start_range = SPEED_OF_LIGHT * ((scan_start_time * 1e-12) - DT_0 * 1e-9) / 2\r\n \r\n # Read data\r\n data = dict()\r\n data= {'scan_data': [],\r\n 'time_stamp': [],\r\n 'packet_ind': [],\r\n 'packet_pulse_ind': [],\r\n 'range_bins': [],\r\n 'config': config}\r\n single_scan_data = []\r\n packet_count = 0\r\n pulse_count = 0\r\n \r\n while True:\r\n \r\n # Read a single data packet and break loop if not a complete packet\r\n # (in terms of size)\r\n packet = f.read(1452)\r\n if len(packet) < 1452:\r\n break \r\n \r\n # Get information from first packet about how scans are stored and \r\n # range bins collected\r\n if packet_count == 0:\r\n num_range_bins = np.frombuffer(packet[44:48], dtype='>u4')[0]\r\n num_packets_per_scan = np.frombuffer(packet[50:52], dtype='>u2')[0]\r\n drange_bins = SPEED_OF_LIGHT * T_BIN * 1e-9 / 2\r\n range_bins = start_range + drange_bins * np.arange(0, num_range_bins, 1)\r\n packet_count += 1\r\n \r\n # Number of samples in current packet and packet index\r\n num_samples = np.frombuffer(packet[42:44], dtype='>u2')[0]\r\n data['packet_ind'].append(np.frombuffer(packet[48:50], dtype='>u2')[0])\r\n \r\n # Extract radar data samples from current packet; process last \r\n # packet within a scan seperately to get all data\r\n packet_data = np.frombuffer(packet[52:(52 + 4 * num_samples)], \r\n dtype='>i4')\r\n single_scan_data.append(packet_data)\r\n \r\n if packet_count % num_packets_per_scan == 0:\r\n data['scan_data'].append(np.concatenate(single_scan_data))\r\n data['time_stamp'].append(np.frombuffer(packet[8:12], \r\n dtype='>u4')[0])\r\n single_scan_data = []\r\n pulse_count += 1\r\n \r\n # Add last partial scan if present\r\n if single_scan_data:\r\n single_scan_data = np.concatenate(single_scan_data)\r\n num_pad = data['scan_data'][0].size - single_scan_data.size\r\n single_scan_data = np.pad(single_scan_data, (0, num_pad), \r\n 'constant', constant_values=0)\r\n data['scan_data'].append(single_scan_data)\r\n \r\n # Stack scan data into 2-D array \r\n # (rows -> pulses, columns -> range bins)\r\n data['scan_data'] = np.stack(data['scan_data'])\r\n \r\n # Finalize entries in data\r\n data['time_stamp'] = np.asarray(data['time_stamp'])\r\n data['range_bins'] = range_bins\r\n \r\n with open('../Raw_Data/data.pkl', 'wb') as o:\r\n pickle.dump(data, o)\r\n return data", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)\n return data", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data", "def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X", "def _load_bt12_data(region):\n fname = os.path.join(\n os.path.dirname(__file__), 'data', region + '_bt12_trms4osc.pars')\n\n return np.rec.fromrecords(\n np.loadtxt(fname, skiprows=4, usecols=range(9)),\n names='mag,dist,c1,c2,c3,c4,c5,c6,c7')", "def le_binario_mgbq(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def readRawSamples(fname):\n\n d = numpy.fromfile(fname, dtype=numpy.float32)\n #d = d.astype(numpy.float64)\n #d = (d - 128) / 128.0\n\n return d[::2] + 1j * d[1::2]", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def read_matrix_from_binary(file_name):\n with open(file_name, 'rb') as file:\n buffer = file.read()\n n_row = int.from_bytes(buffer[0:4], 'little')\n n_col = int.from_bytes(buffer[4:8], 'little')\n matrix = numpy.frombuffer(buffer[8:], dtype=float).reshape([n_row, n_col])\n return matrix", "def readH264(path,flag='mask'):\n # known size of the images\n rows = 128\n cols = 128\n\n # read in raw bytes as a 1D array\n arr = np.fromfile(path,dtype='uint16')\n\n if flag=='mask':\n ## update values based on code\n # get code\n code_array = np.bitwise_and(arr,0xF000)\n # CODE_VAL_SEUIL2\n arr[code_array==0xD000] = 0xF800\n # CODE_VAL_CONTOUR\n arr[code_array==0xB000] = 0xF81F\n # CODE_VAL_MAX\n arr[code_array==0xC000] = 0x0000\n # CODE_VAL_SEUIL1\n arr[code_array==0xE000] = 0x001F\n\n ## just lower 12-bits\n arr = np.bitwise_and(arr,0x0FFF)\n\n ## convert data to frames\n # break the data into chunks that are 1d frames\n frames_set = np.split(arr,int(arr.shape[0]/(rows*cols)))\n # combined frames together into a 3d array and interpret as float16 data type\n return np.dstack([np.reshape(f,(rows,cols)) for f in frames_set]).astype('float16')", "def read_ultrasound_file(ult_file):\n\n return np.fromfile(open(ult_file, \"rb\"), dtype=np.uint8)", "def read_int16(ucode_file):\n return int.from_bytes(ucode_file.read(2), 'little')", "def getdata(filename, rw=False, verbose=False):\n sh, dt, header = getheader(filename)\n if verbose:\n print(('Reading %s...\\n%s' % (filename, header)))\n mode = ['c', 'r+']\n return np.memmap(filename, mode=mode[rw], shape=sh, dtype=dt, order='F',\n offset=512)", "def list_to_uint16_array(data_list):\n data_array = _populate_array(data_list, driver.uint16_array)\n return data_array", "def load_images(mraw, h, w, N, bit=16, roll_axis=True):\n\n if int(bit) == 16:\n images = np.memmap(mraw, dtype=np.uint16, mode='r', shape=(N, h, w))\n elif int(bit) == 8:\n images = np.memmap(mraw, dtype=np.uint8, mode='r', shape=(N, h, w))\n elif int(bit) == 12:\n warnings.warn(\"12bit images will be loaded into memory!\")\n #images = _read_uint12_video(mraw, (N, h, w))\n images = _read_uint12_video_prec(mraw, (N, h, w))\n else:\n raise Exception(f\"Unsupported bit depth: {bit}\")\n\n\n #images=np.fromfile(mraw, dtype=np.uint16, count=h * w * N).reshape(N, h, w) # about a 1/3 slower than memmap when loading to RAM. Also memmap doesn't need to read to RAM but can read from disc when needed.\n if roll_axis:\n return np.rollaxis(images, 0, 3)\n else:\n return images", "def read_bin(filename):\n import sys\n import numpy as np\n\n with open(filename + '.flt', \"rb\") as f:\n raster_data = np.fromstring(f.read(), 'f')\n\n if sys.byteorder == 'big':\n raster_data = raster_data.byteswap() #ensures data is little endian\n\n return raster_data", "def read_sp2(file_name, debug=False, arm_convention=True):\n\n my_data = open(file_name, \"rb\").read()\n # Get file date from name\n if platform.system() == \"Windows\":\n split_file_name = file_name.split(\"\\\\\")\n else:\n split_file_name = file_name.split(\"/\")\n if arm_convention:\n next_split = split_file_name[-1].split(\".\")\n dt = datetime.strptime(next_split[2], \"%Y%m%d\")\n else:\n dt = datetime.strptime(split_file_name[-1][0:8], \"%Y%m%d\")\n\n if len(my_data) > 0:\n bytepos = 0\n numCols = struct.unpack(\">I\", my_data[bytepos:bytepos + 4])[0]\n bytepos += 4\n numChannels = struct.unpack(\">I\", my_data[bytepos:bytepos + 4])[0]\n if debug:\n print((\"Loaded file with numCols = {}, numChannels = {}\"\n .format(numCols, numChannels)))\n\n data_points_per_record = numChannels * numCols\n\n bytes_per_record = 2 * data_points_per_record\n bytes_not_data_array = 12 + 2 + 28 + 16\n bytes_per_record += bytes_not_data_array\n last_pos = int(bytes_per_record - 1)\n num_spare_cols = struct.unpack(\">I\", my_data[last_pos - 4:last_pos])[0]\n if debug:\n print(\"Number of spare columns = %d\" % num_spare_cols)\n\n if num_spare_cols != 0:\n bytes_per_record += num_spare_cols\n\n numRecords = int(len(my_data) / bytes_per_record)\n totalRows = numChannels * numRecords\n DataWave = np.zeros((totalRows, numCols), dtype='int16')\n Flag = np.zeros(int(totalRows / numChannels), dtype='int16')\n TimeWave = np.zeros(numRecords, dtype='float64')\n Res1 = np.zeros(numRecords, dtype='float32')\n EventIndex = np.zeros(numRecords, dtype='float32')\n TimeDiv10000 = np.zeros(numRecords, dtype='float64')\n TimeRemainder = np.zeros(numRecords, dtype='float64')\n Res5 = np.zeros(numRecords, dtype='float32')\n Res6 = np.zeros(numRecords, dtype='float32')\n Res7 = np.zeros(numRecords, dtype='float64')\n Res8 = np.zeros(numRecords, dtype='float64')\n if num_spare_cols != 0:\n SpareDataArray = np.zeros(numRecords, num_spare_cols)\n\n arrayFmt = \">\"\n for i in range(data_points_per_record):\n arrayFmt += \"h\"\n\n for record in range(numRecords):\n dataStartPoint = record * bytes_per_record + 8\n startRow = record * numChannels\n endRow = startRow + numChannels - 1\n the_row = np.array(struct.unpack(\n arrayFmt, my_data[dataStartPoint:dataStartPoint + int(data_points_per_record * 2)]))\n\n DataWave[startRow:endRow + 1, 0:numCols] = the_row.reshape(\n numCols, numChannels).T\n dataStartPoint += data_points_per_record * 2\n Flag[record] = struct.unpack(\">h\", my_data[dataStartPoint:dataStartPoint + 2])[0]\n next_floats = struct.unpack(\">ffffffff\", my_data[dataStartPoint + 2:dataStartPoint + 34])\n TimeWave[record] = next_floats[0]\n Res1[record] = next_floats[1]\n EventIndex[record] = next_floats[2]\n TimeDiv10000[record] = next_floats[3]\n TimeRemainder[record] = next_floats[4]\n Res5[record] = next_floats[5]\n Res6[record] = next_floats[6]\n next_doubles = struct.unpack(\">dd\", my_data[dataStartPoint + 34:dataStartPoint + 50])\n Res7[record] = next_doubles[0]\n Res8[record] = next_doubles[1]\n dataStartPoint += 50\n\n if num_spare_cols != 0:\n startRow = (2 * num_spare_cols) * record\n dataStartPoint += bytes_not_data_array - 4\n spareFmt = \">\"\n for i in range(num_spare_cols):\n spareFmt += \"f\"\n\n SpareDataArray[record] = np.array(\n struct.unpack(spareFmt, my_data[dataStartPoint:dataStartPoint+4*num_spare_cols]))\n\n UTCtime = TimeDiv10000 * 10000 + TimeRemainder\n diff_epoch_1904 = (\n datetime(1970, 1, 1) - datetime(1904, 1, 1)).total_seconds()\n UTCdatetime = np.array([\n datetime.utcfromtimestamp(x - diff_epoch_1904) for x in UTCtime])\n\n DateTimeWave = (dt - datetime(1904, 1, 1)).total_seconds() + TimeWave\n\n # Make an xarray dataset for SP2\n Flag = xr.DataArray(Flag, dims={'event_index': EventIndex})\n Res1 = xr.DataArray(Res1, dims={'event_index': EventIndex})\n Res5 = xr.DataArray(Res5, dims={'event_index': EventIndex})\n Res6 = xr.DataArray(Res6, dims={'event_index': EventIndex})\n Res7 = xr.DataArray(Res7, dims={'event_index': EventIndex})\n Res8 = xr.DataArray(Res8, dims={'event_index': EventIndex})\n Time = xr.DataArray(UTCdatetime, dims={'event_index': EventIndex})\n EventInd = xr.DataArray(EventIndex, dims={'event_index': EventIndex})\n DateTimeWaveUTC = xr.DataArray(UTCtime, dims={'event_index': EventIndex})\n DateTimeWave = xr.DataArray(DateTimeWave, dims={'event_index': EventIndex})\n TimeWave = xr.DataArray(TimeWave, dims={'event_index': EventIndex})\n my_ds = xr.Dataset({'time': Time, 'Flag': Flag, 'Res1': Res1, 'Res5': Res5,\n 'Res6': Res6, 'Res7': Res7, 'Res8': Res8, 'EventIndex': EventInd,\n 'DateTimeWaveUTC': DateTimeWaveUTC, 'TimeWave': TimeWave,\n 'DateTimeWave': DateTimeWave})\n\n for i in range(numChannels):\n temp_array = np.zeros((numRecords, numCols), dtype='int')\n for j in range(numRecords):\n k = i + j*numChannels\n temp_array[j] = DataWave[k]\n my_ds['Data_ch' + str(i)] = xr.DataArray(\n temp_array, dims={'event_index': EventIndex, 'columns': np.arange(0, 100, 1)})\n del my_data\n del DataWave\n return my_ds\n else:\n return None", "def fits_to_nparray(file):\n hdu_list = fits.open(file)\n image_data = hdu_list[0].data\n image_data=image_data.astype(np.uint16)\n \n gdal_array.SaveArray(image_data, file[:-5]+\".tif\")\n \n return image_data", "def loader(filename,wdm=0,verbose=0,kmpers=1):\n with open(filename, 'rb') as f:\n if wdm == False:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n infoBytes = f.tell()\n if verbose>2:\n print(infoBytes)\n #skip darkmatter\n #read the first dm line\n if verbose>2:\n print(f.tell())\n catd = np.fromfile(f,dtype= dmdtype, count=1) \n #get the bytes location and subtract off the bytes location after loading info to get n bytes a line for dm\n if verbose>2:\n print(f.tell())\n current = f.tell()\n dmBytes = current-infoBytes\n f.seek(dmBytes*(info['nd'][0]-1)+current)\n if verbose>2:\n print(f.tell())\n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n else:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n if verbose>2:\n print(f.tell())\n # #dark matter setup count is reading the number of ?rows? \n catd= np.fromfile(f,dmdtype, count=info['nd'][0]) \n if verbose>2:\n print(f.tell()) \n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n \n \n #convert to physical units as found in README.md\n if wdm == True:\n catd['mass']*=2.324876e9\n if kmpers == 1:\n catd['vx']*=100.\n catd['vy']*=100.\n catd['vz']*=100.\n cats['mass']*=2.324876e9\n if kmpers == 1:\n cats['vx']*=100.\n cats['vy']*=100.\n cats['vz']*=100.\n \n if wdm == True:\n return(catd,cats,info)\n else:\n return(cats,info)", "def _read_data(self):\n with self._open(self.filename, 'rb') as f:\n try:\n f.seek(self._offset_data, self._offset_whence)\n except IOError:\n print('Error: hedp.io.HamamatsuFile seeking outside of file limits.')\n print(' Failed to parse file.')\n print(\" Either the 'offset' or 'dtype' input arguments must be wrong!\")\n raise\n except:\n raise\n\n data_len = np.prod(self.shape)*np.dtype(self._dtype).itemsize\n data_str = f.read(data_len)\n if data_len != len(data_str):\n print(data_len, len(data_str))\n raise ValueError('File ended before all data was read. Probably wrong offset or dtype!')\n\n\n self.data = np.fromstring(data_str, dtype=self._dtype).reshape(self.shape[::-1])\n self.data = np.ndarray.astype(self.data, 'float32')\n\n #self.data = np.fromfile(f, dtype=self._dtype,\n # count=np.prod(self.shape)).reshape(self.shape[::-1])", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def loadarb(self,wfm):\n wfm = wfm.astype('int16')\n l = str(2*len(wfm))\n self.instrument.write_raw('DATA:DAC VOLATILE, #{0}{1}{2}'.format(len(l),l,wfm.byteswap().tostring()))", "def binint(filename):\n return np.memmap(filename, dtype='int32')", "def read_u16(self) -> int:", "def le_binario_mgbp(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgbp(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def _fread3_many(fobj, n):\n b1, b2, b3 = np.fromfile(fobj, \">u1\", 3 * n).reshape(-1,\n 3).astype(np.int).T\n return (b1 << 16) + (b2 << 8) + b3", "def file_to_bitarray(fname):\n ba = bitarray()\n with open(fname, 'rb') as f:\n ba.fromfile(f)\n return ba", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def _read_data(self, fh, byteorder='>'):\r\n fh.seek(len(self.header))\r\n data = fh.read()\r\n dtype = 'u1' if self.maxval < 256 else byteorder + 'u2'\r\n depth = 1 if self.magicnum == b\"P7 332\" else self.depth\r\n shape = [-1, self.height, self.width, depth]\r\n size = numpy.prod(shape[1:])\r\n if self.magicnum in b\"P1P2P3\":\r\n data = numpy.array(data.split(None, size)[:size], dtype)\r\n data = data.reshape(shape)\r\n elif self.maxval == 1:\r\n shape[2] = int(math.ceil(self.width / 8))\r\n data = numpy.frombuffer(data, dtype).reshape(shape)\r\n data = numpy.unpackbits(data, axis=-2)[:, :, :self.width, :]\r\n else:\r\n data = numpy.frombuffer(data, dtype)\r\n data = data[:size * (data.size // size)].reshape(shape)\r\n if data.shape[0] < 2:\r\n data = data.reshape(data.shape[1:])\r\n if data.shape[-1] < 2:\r\n data = data.reshape(data.shape[:-1])\r\n if self.magicnum == b\"P7 332\":\r\n rgb332 = numpy.array(list(numpy.ndindex(8, 8, 4)), numpy.uint8)\r\n rgb332 *= [36, 36, 85]\r\n data = numpy.take(rgb332, data, axis=0)\r\n return data", "def fromunformatted(file,dtype='float32', shape=None, skip=-1, count=-1):\n if skip >= 0:\n endcount = 1\n else:\n endcount = -1\n\n try:\n file.seek(0,1)\n except AttributeError:\n file = open(file)\n\n if skip > 0 or count >= 0:\n for i in range(skip):\n n1, = np.fromfile(file,'int32',count=1)\n file.seek(n1+4,1)\n\n if count > 0:\n res = np.empty((count,)+shape,dtype)\n for c in range(count):\n res[c,...] = fromunformatted(file,dtype,shape,skip=0)\n\n return res\n\n try:\n # skip header\n n1, = np.fromfile(file,'int32',count=1)\n except TypeError:\n raise\n else:\n n1 /= np.dtype(dtype).itemsize\n data = np.fromfile(file, dtype, count=n1)\n n2, = np.fromfile(file,'int32',count=endcount)\n\n if shape is not None:\n data = data.reshape(shape)\n\n return data", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\tprint(data_chunk.shape)\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def pack_unpack_hard():\n # Array is apprx. 1.5 GB large\n # should make apprx 1536 chunks\n pack_unpack(100, chunk_size=reverse_pretty('1M'), progress=simple_progress)", "def binfloat(filename):\n return np.memmap(filename, dtype='float32')", "def mhd_to_array(path):\n return sitk.GetArrayFromImage(sitk.ReadImage(path, sitk.sitkFloat32))", "def read_data(infile):\n extension = os.path.splitext(infile)[1]\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n fid = open(infile, 'rb')\n fid.seek(512) #skip header\n if extension == '.aps' or extension == '.a3daps':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, ny, nt, order='F').copy() #make N-d image\n elif extension == '.a3d':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, nt, ny, order='F').copy() #make N-d image\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype = np.float32, count = 2* nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0,:,:,:].copy()\n imag = data[1,:,:,:].copy()\n fid.close()\n if extension != '.ahi':\n return data\n else:\n return real, imag", "def parse_file(fits_file, data_offset, records, record_length, field_position, field_size):\n bits = field_size * 8\n\n with open(fits_file, 'rb') as f:\n f.read(data_offset)\n for _ in range(0, records):\n record = f.read(record_length)\n value = record[field_position-1:field_position+field_size-1]\n print(multiparse(bits, value))", "def createSongsArray():\r\n file2write.write(\"unsigned short songs[5][2500][2] = {\")", "def wavread(fname):\n fh = wave.open(fname,'rb')\n (nchannels, sampwidth, framerate, nframes, comptype, compname) = fh.getparams()\n if sampwidth == 2:\n frames = fh.readframes(nframes * nchannels)\n dn = struct.unpack_from('%dh' % nframes*nchannels, frames)\n if nchannels > 1:\n out = np.array([dn[i::nchannels] for i in range(nchannels)])/float(2**15)\n else:\n out = np.array(dn)/float(2**15)\n else:\n print('not a 16 bit wav-file')\n out = [0]\n fh.close()\n return (out,framerate)", "def _load(self, filepath):\n import subprocess as sp\n command = ['ffmpeg',\n '-i', filepath,\n '-f', 's16le',\n '-acodec', 'pcm_s16le',\n '-ac', '1'] # channels: 2 for stereo, 1 for mono\n if self.sampling_rate != SAMPLING_RATE:\n command.extend(['-ar', str(self.sampling_rate)])\n command.append('-')\n # 30s at 44.1 kHz ~= 1.3e6\n proc = sp.run(command, stdout=sp.PIPE, bufsize=10**7, stderr=sp.DEVNULL, check=True)\n\n return np.fromstring(proc.stdout, dtype=\"int16\")", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def _decode_int_array(fp):\n size = _decode_int(fp)\n return list(struct.unpack('>{}i'.format(size), fp.read(size * 4)))", "def convert_uint16_to_array(value):\n return [\n (value >> 0 & 0xFF),\n (value >> 8 & 0xFF)\n ]", "def read_m16_ds_2(use_red=True, mass_bin='10.0_10.4'):\n if use_red:\n # sm_10.0_10.4. - sm_10.4_10.7. - sm_10.7_11.0. - sm_11.0_11.2.\\\n #- sm_11.2_11.4. - sm_11.4_11.6. - sm_11.6_15.0. - sm_11.0_15.0.\n fname = os.path.join(m16path, 'planck_lbg.ds.red.out')\n cols_dict ={\n '10.0_10.4': (0, 1, 2),\n '10.4_10.7': (0, 3, 4),\n '10.7_11.0': (0, 5, 6),\n '11.0_11.2': (0, 7, 8),\n '11.2_11.4': (0, 9, 10),\n '11.4_11.6': (0, 11, 12),\n '11.6_15.0': (0, 13, 14),\n '11.0_15.0': (0, 15, 16),\n }\n elif mass_bin in ['11.0_11.2','11.2_11.4','11.4_11.6','11.6_15.0']:\n fname = os.path.join(m16path, 'planck_lbg.ds.blue.rebinned.out')\n cols_dict ={\n '11.0_11.2': (0, 1, 2),\n '11.2_11.4': (0, 3, 4),\n '11.4_11.6': (0, 5, 6),\n '11.6_15.0': (0, 7, 8),\n }\n else:\n # sm_10.0_10.4. - sm_10.4_10.7. - sm_10.7_11.0. - sm_11.0_15.0.\n fname = os.path.join(m16path, 'planck_lbg.ds.blue.out')\n cols_dict ={\n '10.0_10.4': (0, 1, 2),\n '10.4_10.7': (0, 3, 4),\n '10.7_11.0': (0, 5, 6),\n '11.0_15.0': (0, 7, 8),\n }\n # Mpc/h, (h Msun/(physical pc)^2)\n rp, ds, ds_err = np.genfromtxt(fname, usecols=cols_dict[mass_bin],\\\n unpack=True)\n return(rp, ds, ds_err)", "def _read_arduino(self) -> np.ndarray:\r\n raw_data: bytes = self._serial_handle.read(self._chunk)\r\n int_data = [int(data_bit) for data_bit in raw_data]\r\n return np.array(int_data)", "def read_u16(self) -> int:\n ...", "def load_encoded(filename):\n return np.fromfile(filename, dtype='uint8')", "def _decode_long_array(fp):\n size = _decode_int(fp)\n return list(struct.unpack('>{}q'.format(size), fp.read(size * 8)))", "def read(filename):\n\n fileName, fileExtension = os.path.splitext(filename)\n wav_filename = filename\n rate, data = scipy.io.wavfile.read(str(wav_filename)) # the data is read in its native format\n if data.dtype =='int16':\n data = numpy.cast['float'](data)\n return [rate,data]", "def read_mb_file(self,idir='.',ifile=None, gmt=True, verbose=False):\n \n import numpy as np\n import os\n \n if gmt==True:\n gmt_file=idir+'/../maps/en_velo.gmt'\n if isinstance(gmt,str):\n gmt_file=gmt\n \n if gmt != False:\n self.read_lon_lat(gmt_file,verbose=verbose)\n \n if ifile is None:\n mb_file_basename= idir + '/mb_'+self.code+'_GPS.dat'\n else:\n mb_file_basename=ifile\n \n data_NEU = []\n for i in range(1,4):\n mb_file = mb_file_basename + str(i)\n\n # file\n self.ifile=os.path.abspath(mb_file)\n \n data=np.genfromtxt(mb_file,skip_header=4)\n \n # reshape to ensure a 2D array\n if len(data.shape)==1:\n data=data.reshape((1,data.shape[0]))\n \n\n\n data_NEU.append(data)\n\n if data_NEU[0].shape == data_NEU[1].shape == data_NEU[2].shape:\n self.data=np.zeros((data_NEU[0].shape[0],7))\n self.data[:,0]=data_NEU[0][:,0]\n self.data[:,1]=data_NEU[0][:,1]#*to_mm\n self.data[:,2]=data_NEU[1][:,1]#*to_mm\n self.data[:,3]=data_NEU[2][:,1]#*to_mm\n\n self.data[:,4]=data_NEU[0][:,2]#*to_mm\n self.data[:,5]=data_NEU[1][:,2]#*to_mm\n self.data[:,6]=data_NEU[2][:,2]#*to_mm\n\n else: \n print(\"!!! Error reading \",mb_file_basename,\" :*dat1, *dat2, *dat3 do not have the same length\")\n self.data = None", "def read12bit(self, register):\n valuearray = bytearray(self.device.readregistermulti(register, 2))\n return struct.unpack('!H', valuearray)[0] & ((2 ** 12) - 1) # Convert to short and discard first four bits", "def read_cycle_info(filename):\r\n \r\n # Open file and read it into a list of lines.\r\n fin = open(filename, \"r\")\r\n lines = fin.readlines()\r\n fin.close()\r\n \r\n info = [[]] * 256;\r\n\r\n for line in lines:\r\n fields = line.split(',')\r\n opc = int(fields[0],16)\r\n info[opc] = (int(fields[1]), int(fields[2]), int(fields[3]))\r\n return info", "def read_mhd_and_raw(path, numpyFlag=True):\n img = sitk.ReadImage(path)\n if not numpyFlag:\n return img\n\n nda = sitk.GetArrayFromImage(img) # (img(x,y,z)->numpyArray(z,y,x))\n return nda", "def samp_file_to_arr(labeled_file, total_size, entry_dtype='f8'):\n buf = []\n n = 0\n with open( labeled_file, 'rb' ) as fi:\n for _, line in enumerate(fi):\n n = n + 1\n r = random.random()\n if n <= total_size:\n buf.append(line)\n elif r < 1.0*total_size/n:\n loc = random.randint(0, total_size-1)\n buf[loc] = line\n return np.array([np.fromstring(s, sep=',', dtype='f8') for s in buf])", "def fread(fid, nelements, dtype):\n\n if dtype is np.str:\n dt = np.uint8 # WARNING: assuming 8-bit ASCII for np.str!\n else:\n dt = dtype\n\n data_array = np.fromfile(fid, dt, nelements)\n if data_array.size==1:data_array=data_array[0]\n return data_array", "def atmparamread(filename):\n f = open(filename, 'r')\n f.readline()\n line = f.readline()\n #Td = float(line.split()[0])\n #Pd = float(line.split()[1])\n #Mc = float(line.split()[2])\n #rc = float(line.split()[3])\n n = int(line.split()[0])\n f.readline()\n atm = 0*numpy.ndarray(shape=(n, ncol), dtype=float)\n S = 0*numpy.ndarray(shape=(n), dtype=float)\n for i in range(n):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(ncol ):\n atm[i, j] = float(line.split()[j+1])\n f.close()\n return atm, S", "def padread(filename, columns=4, out_dtype=np.float32):\n with open(filename, \"rb\") as f: \n A = np.fromfile(f, dtype=np.float32) # accel file: 32-bit float \"singles\"\n B = np.reshape(A, (-1, columns))\n if B.dtype == out_dtype:\n return B\n return B.astype(out_dtype)", "def load_raw(fname):\n # Read all the data from the file\n ctd = []\n with open(fname) as ctdfile:\n \n for line in ctdfile:\n \n if (line.find('*') < 0) and (line.find('#') < 0):\n \n # This line contains data; parse the line\n entries = line.strip().split()\n # Convert data to float64\n entries = [np.float64(entries[i]) \n for i in range(len(entries))]\n # Append to list\n ctd.append(entries)\n \n # Return the raw data as an numpy array\n return np.array(ctd)", "def load_info():\n data = np.loadtxt(\"u_sol_meta.txt\", dtype=int)\n return data", "def read (self, file):\n\t\tself.unpack (file.read (self.size()))", "def autodetect_endian_and_sanity_check_su(file):\n pos = file.tell()\n if isinstance(file, io.BytesIO):\n file.seek(0, 2)\n size = file.tell()\n file.seek(pos, 0)\n else:\n size = os.fstat(file.fileno())[6]\n if size < 244:\n return False\n # Also has to be a multiple of 4 in length because every header is 400 long\n # and every data value 4 byte long.\n elif (size % 4) != 0:\n return False\n # Jump to the number of samples field in the trace header.\n file.seek(114, 0)\n sample_count = file.read(2)\n interval = file.read(2)\n # Jump to the beginning of the year fields.\n file.seek(156, 0)\n year = file.read(2)\n jul_day = file.read(2)\n hour = file.read(2)\n minute = file.read(2)\n second = file.read(2)\n # Jump to previous position.\n file.seek(pos, 0)\n # Unpack in little and big endian.\n le_sample_count = unpack(b'<h', sample_count)[0]\n be_sample_count = unpack(b'>h', sample_count)[0]\n # Check if both work.\n working_byteorders = []\n if le_sample_count > 0:\n length = 240 + (le_sample_count * 4)\n if (size % length) == 0:\n working_byteorders.append('<')\n if be_sample_count > 0:\n length = 240 + (be_sample_count * 4)\n if (size % length) == 0:\n working_byteorders.append('>')\n # If None works return False.\n if len(working_byteorders) == 0:\n return False\n # Check if the other header values make sense.\n still_working_byteorders = []\n for bo in working_byteorders:\n fmt = (\"%sh\" % bo).encode('ascii', 'strict')\n this_interval = unpack(fmt, interval)[0]\n this_year = unpack(fmt, year)[0]\n this_julday = unpack(fmt, jul_day)[0]\n this_hour = unpack(fmt, hour)[0]\n this_minute = unpack(fmt, minute)[0]\n this_second = unpack(fmt, second)[0]\n # Make a sanity check for each.\n # XXX: The arbitrary maximum of the sample interval is 10 seconds.\n if this_interval <= 0 or this_interval > 10E7:\n continue\n # Some programs write two digit years.\n if this_year != 0 and (this_year < 1930 or this_year >= 2030) and \\\n (this_year < 0 or this_year >= 100):\n continue\n # 9999 is often used as a placeholder\n if (this_julday > 366 or this_julday < 0) and this_julday != 9999:\n continue\n if this_hour > 24 or this_hour < 0:\n continue\n if this_minute > 60 or this_minute < 0:\n continue\n if this_second > 60 or this_second < 0:\n continue\n still_working_byteorders.append(bo)\n length = len(still_working_byteorders)\n if not length:\n return False\n elif length == 1:\n return still_working_byteorders[0]\n else:\n # XXX: In the unlikely case both byte orders pass the sanity checks\n # something else should be checked. Currently it is not.\n msg = \"\"\"\n Both possible byte orders passed all sanity checks. Please contact\n the ObsPy developers so they can implement additional tests.\n \"\"\".strip()\n raise Exception(msg)", "def read_sm_product(filepath):\n # check the files are udp files\n if os.path.basename(filepath)[14:17] != 'UDP':\n raise ValueError('{} is not a UDP file'.format(filepath))\n\n # Open the data file for reading\n try:\n file = open(filepath, 'rb')\n except IOError:\n logging.exception('file {} does not exist'.format(filepath))\n raise\n\n # Read first unsigned int32, containing number of datapoints to iterate over\n n_grid_points = np.fromfile(file, dtype=np.uint32, count=1)[0]\n logging.debug('Data file contains {} data points'.format(n_grid_points))\n logging.debug('Reading file... ')\n data = np.fromfile(file, dtype=datatype, count=n_grid_points)\n file.close()\n logging.debug('Done')\n\n return data", "def read_raw_data(file_name: str, ROWS: int, COLS: int, OFFSET=0) -> list:\r\n FILE = open(file_name, mode=\"r\")\r\n\r\n # Reading the data in the Single Dimensional form\r\n img = np.fromfile(\r\n FILE, dtype=np.uint8, count=ROWS * COLS, offset=((ROWS * COLS) * OFFSET)\r\n )\r\n\r\n # Shaping the data to the two dimensional format\r\n img = np.reshape(img, (ROWS, COLS)).tolist()\r\n\r\n FILE.close()\r\n return img", "def loadCudaStream(name):\n data=np.fromfile(name, dtype=\"float32\")\n data=data.reshape(int(len(data)/4), 4)\n data=np.delete(data,3,1)\n return data", "def load_nifty_volume_as_array(filename):\n img = sitk.ReadImage(filename)\n img_arr = sitk.GetArrayFromImage(img)\n return img_arr", "def read_results():\r\n with open(\"packing.nfo\", \"r\") as fin:\r\n fin.readline()\r\n fin.readline()\r\n por_theory = float(fin.readline().split()[2])\r\n por_final = float(fin.readline().split()[2])\r\n print('Theoretical porosity:', por_theory)\r\n print('Final porosity:', por_final)\r\n with open(\"packing.xyzd\", \"rb\") as fin:\r\n btxt = fin.read()\r\n txt = list(struct.unpack(\"<\" + \"d\" * (len(btxt) // 8), btxt))\r\n data = array(zip(*[iter(txt)] * 4))\r\n data[:, 3] = data[:, 3] * \\\r\n ((1 - por_final) / (1 - por_theory))**(1 / 3)\r\n return data", "def read_szx_fmv_12(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"abs_line_number\"\n ]\n\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"num_val_trip\", ulong_nan), (\"f_kp\", byte_nan),\n (\"f_usable\", byte_nan), (\"f_f\", uint_nan), (\"f_v\", uint_nan),\n (\"f_oa\", uint_nan), (\"f_sa\", uint_nan), (\"f_tel\", uint_nan),\n (\"f_ref\", uint_nan), (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n data[\"swath_indicator\"] = data.pop(\"swath indicator\")\n\n return data, metadata", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def get_pict_data(fname):\n with open(fname, 'r') as f:\n return np.asarray(f.read().split(',')).reshape((11, 1024)).astype(int)", "def read_ascii(file):\n wvlen, band, mag, emag, fmag, unit, beam, odate, ref = [],[],[],[],[],[],[],[],[]\n with open(file, 'r') as f_in:\n for line in f_in:\n try:\n # ensure line contains data:\n a = float(line[0])\n except ValueError:\n a = 'dummy'\n try:\n # ensure mag or flux entry is not '--'\n m = float(line.split(' ')[2])\n except ValueError:\n m = 'dummy'\n \n if isinstance(a, float) and isinstance(m, float):\n wvlen.append(float(line.strip().split(' ')[0])) # in metres\n band.append(line.strip().split(' ')[1])\n mag.append(float(line.strip().split(' ')[2]))\n emag.append(line.strip().split(' ')[3])\n fmag.append(line.strip().split(' ')[4])\n unit.append(line.strip().split(' ')[5])\n beam.append(line.strip().split(' ')[6])\n odate.append(line.strip().split(' ')[7])\n ref.append(line.strip().split(' ')[8])\n \n return wvlen, band, mag, emag, fmag, unit, beam, odate, ref", "def disassemble(file, MB_limit=80, destination=None):\n # List of files to return\n filelist = []\n\n # Check file size in MB\n filesize = os.path.getsize(file) / 1000000\n\n # Filename\n filename = os.path.basename(file).replace('.fits', '')\n\n # Get destination\n if destination is None:\n destination = os.path.dirname(file)\n\n # If already small enough, do nothing\n if filesize > MB_limit:\n\n # Open the FITS file\n hdulist = fits.open(file, mode='update')\n\n # Strip file of data\n extensions = {}\n for hdu in hdulist:\n\n # Save the real data\n extensions[hdu.name] = hdu.data\n\n # Replace with tiny dummy array\n hdulist[hdu.name].data = None\n\n # Write to the file and close it\n hdulist.writeto(file, overwrite=True)\n hdulist.close()\n\n # Make a folder\n folder = filename + '_data'\n destination = os.path.join(destination, folder)\n os.system('mkdir {}'.format(destination))\n\n # Write the data to .npz files\n for ext, data in extensions.items():\n\n # Some are None\n if data is not None:\n\n # Check data size in MB\n datasize = data.nbytes\n\n # Get number of chunks\n nchunks = np.ceil(datasize / 1000000 / MB_limit).astype(int)\n\n # Break up into chunks\n chunks = np.array_split(data, nchunks + 2)\n\n # Save as .npz files\n for n, chunk in enumerate(chunks):\n\n # Determine filename\n chunkname = filename + '.{}.{}.npy'.format(ext, n)\n\n # Save the chunk to file\n filepath = os.path.join(destination, chunkname)\n np.save(filepath, chunk)\n\n # Add to list of filenames\n filelist.append(filepath)\n\n return filelist", "def _decode_35708(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29974:\n start_byte += n_bytes\n n_bytes = 4\n var_size = struct.unpack('<I', data[start_byte:\n start_byte + n_bytes])[0]\n start_byte += n_bytes\n n_bytes = var_size\n\n return np.frombuffer(data[start_byte:start_byte + n_bytes],\n dtype=np.float64)", "def load_png16(fname):\n with Image.open(fname) as img:\n if hasattr(img, 'text') and 'min' in img.text and 'max' in img.text:\n vmin = float(img.text['min'])\n vmax = float(img.text['max'])\n arr = np.array(img).astype(np.float64) * \\\n ((vmax-vmin)/(2**16-1)) + vmin\n else:\n arr = np.array(img)\n return arr", "def hdr_to_Nifti(files):\r\n array = []\r\n for element in files:\r\n array = np.append(array, nib.load(element))\r\n\r\n print('array size: ', array.shape, '\\narray type: ', type(array))\r\n\r\n return array", "def decode_file(self, filename):\n num_bytes = os.stat(filename)[6]\n data = array.array('B')\n\n with open(filename, 'rb') as f:\n data.fromfile(f, num_bytes)\n\n return self.decode_data(data)", "def readFastaFile(filename):" ]
[ "0.68789697", "0.68381524", "0.66513246", "0.6528", "0.6359484", "0.6214181", "0.6206337", "0.6118009", "0.60529387", "0.60003304", "0.5936918", "0.593421", "0.59340906", "0.5930919", "0.58940655", "0.5846712", "0.58181554", "0.5809074", "0.5799255", "0.5763211", "0.5753879", "0.569734", "0.5679182", "0.5613523", "0.56096065", "0.5559068", "0.55411315", "0.55395705", "0.5537046", "0.5534046", "0.5520784", "0.55064666", "0.55055106", "0.5489941", "0.5482633", "0.5465561", "0.54511744", "0.54439205", "0.54374355", "0.54374355", "0.54374355", "0.54374355", "0.54374355", "0.54374355", "0.5435986", "0.54317737", "0.5428415", "0.54162884", "0.54162884", "0.54152274", "0.5403145", "0.5397583", "0.53878224", "0.53762096", "0.537094", "0.5367073", "0.5354103", "0.53526306", "0.53524274", "0.53470325", "0.533969", "0.53372896", "0.53314006", "0.5323054", "0.5323054", "0.53223294", "0.532131", "0.5313428", "0.5311305", "0.53051186", "0.52955425", "0.5279003", "0.52605855", "0.52481335", "0.5242152", "0.5235823", "0.52339727", "0.5212984", "0.52021164", "0.5201903", "0.5198456", "0.5194801", "0.51852363", "0.5173439", "0.5162157", "0.51577234", "0.514973", "0.5143659", "0.5135908", "0.5125856", "0.5110657", "0.510095", "0.51005405", "0.5097485", "0.5083629", "0.50803393", "0.5073898", "0.50654554", "0.50638115", "0.5062607" ]
0.70496243
0
Utility function to read 12bit packed mraw files into uint16 array Will store entire array in memory!
def _read_uint12_video_prec(data, shape): data = np.memmap(data, dtype=np.uint8, mode="r") return nb_read_uint12(data).reshape(shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_uint12_video(data, shape):\n data = np.memmap(data, dtype=np.uint8, mode=\"r\")\n fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8\n return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), shape)", "def unpack_mraw_frame_12bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*12/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*12/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::3]\n bytes_2 = int_array[1::3] \n bytes_3 = int_array[2::3]\n\n \n # Here 2 pixels from the image are shared between three bytes of data like\n #\n # | byte 1 | byte 2 | byte 3 |\n # |o o o o o o o o|o o o o | o o o o|o o o o o o o o|\n # | Pixel 1 | Pixel 2 |\n #\n # byte 2 is shared between pixel and we need only the right-most bits for pixel 2 and\n # only the left most bits for pixel 1. \n \n # right-most bits of byte 2 = Most significant bits of Pixel 2\n # left-most bits of byte 2 = Least significant bits of Pixel 1\n \n pix_1 = np.array(16.0*bytes_1 + np.right_shift(bytes_2,4),dtype=np.uint16)\n pix_2 = np.array(256.0*np.bitwise_and(bytes_2,0b1111) + bytes_3,dtype=np.uint16)\n \n try:\n image = (np.dstack([pix_1,pix_2])).reshape((1,n_pixels))[0]\n except:\n image = np.zeros(n_pixels)\n return image", "def nb_read_uint12(data_chunk):\n \n #ensure that the data_chunk has the right length\n assert np.mod(data_chunk.shape[0],3)==0\n out = np.empty(data_chunk.size//3*2, dtype=np.uint16)\n\n for i in nb.prange(data_chunk.shape[0]//3):\n fst_uint8=np.uint16(data_chunk[i*3])\n mid_uint8=np.uint16(data_chunk[i*3+1])\n lst_uint8=np.uint16(data_chunk[i*3+2])\n \n out[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n out[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n \n return out", "def raw_to_tif(file, channel=None ):\n \n def read_uint12(data_chunk):\n data = np.frombuffer(data_chunk, dtype=np.uint8)\n fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n # fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n # snd_uint12 = (lst_uint8 << 4) + (np.bitwise_and(15, mid_uint8))\n fst_uint12 = (fst_uint8 << 4) + (np.bitwise_and(15, mid_uint8))\n snd_uint12 = (lst_uint8 << 4) + (mid_uint8 >> 4)\n return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n\n# def read_uint12(data_chunk):\n# data = np.frombuffer(data_chunk, dtype=np.uint8)\n# fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n# fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n# snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8\n# return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n\n# def read_uint12(data_chunk):\n# data = np.frombuffer(data_chunk, dtype=np.uint8)\n# fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n# fst_uint12 = ((mid_uint8 & 0x0F) << 8) | fst_uint8\n# snd_uint12 = (lst_uint8 << 4) | ((mid_uint8 & 0xF0) >> 4)\n# return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n \n # infile = 'd:\\\\Projekti\\\\Satelit\\\\CO\\\\Razpis\\\\Flat field images_new2020\\\\flatfield\\\\NHDBflat_1D'\n # infile = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Jure_naloga_banje_raw_pyt\\\\NHDRGoreMorje_3D'\n\n # in_path = 'p:\\\\NEMO\\Posnetki\\\\20201014_GoreMorje_data\\cele\\\\'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_image_files = [filename for filename in os.listdir(in_path) if filename.lower().startswith(\"nhd\") and filename.lower().endswith(\"d\")]\n\n \n # infile = in_path + in_image_files[i]\n with open(file, 'rb', buffering=10) as f: # problem pri branju podatkov?\n byte = f.read()\n print(file)\n # # ar = open(infile, 'rb')\n # buffer = BytesIO()\n # byte = BytesIO(ar)\n \n img = read_uint12(byte)\n print(img)\n \n if channel==\"P\":\n img = img.reshape((2748, 3664)) # PAN\n else:\n img = img.reshape((2050, 2448)) # MS\n # img = img.reshape((2748, 3664)) # PAN\n\n size = img.shape\n \n \n out = file[:-4]+ \"_py.tif\"\n\n driver = gdal.GetDriverByName('GTiff')\n\n outRaster = driver.Create(out, size[1], size[0], 1, gdal.GDT_UInt16)\n\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(img)\n outband.FlushCache()", "def extract_data(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(28 * 28 * 10000 * 1)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (255 / 2.0)) / 255\n data = data.reshape(10000, 28, 28, 1)\n return data", "def readFIBSEMdat(path, channel_index=-1, header=1024, magic_number=3555587570):\n ra = RandomAccessFile(path, 'r')\n try:\n # Check the magic number\n ra.seek(0)\n if ra.readInt() & 0xffffffff != magic_number:\n print \"Magic number mismatch\"\n return None\n # Read the number of channels\n ra.seek(32)\n numChannels = ra.readByte() & 0xff # a single byte as unsigned integer\n # Parse width and height\n ra.seek(100)\n width = ra.readInt()\n ra.seek(104)\n height = ra.readInt()\n print numChannels, width, height\n # Read the whole interleaved pixel array\n ra.seek(header)\n bytes = zeros(width * height * 2 * numChannels, 'b') # 2 for 16-bit\n ra.read(bytes)\n print \"read\", len(bytes), \"bytes\" # takes ~2 seconds\n # Parse as 16-bit array\n sb = ByteBuffer.wrap(bytes).order(ByteOrder.BIG_ENDIAN).asShortBuffer()\n shorts = zeros(width * height * numChannels, 'h')\n sb.get(shorts)\n # Deinterleave channels\n # With Weaver: fast\n channels = w.deinterleave(shorts, numChannels, channel_index)\n # With python array sampling: very slow, and not just from iterating whole array once per channel\n # seq = xrange(numChannels) if -1 == channel_index else [channel_index]\n #channels = [shorts[i::numChannels] for i in seq]\n # With clojure: extremely slow, may be using reflection unexpectedly\n #channels = deinterleave.invoke(shorts, numChannels)\n print len(channels)\n # Shockingly, these values are signed shorts, not unsigned!\n return [ArrayImgs.shorts(s, [width, height]) for s in channels]\n finally:\n ra.close()", "def load_spe(filename):\n def read_at(data, pos, size, ntype):\n raw.seek(pos)\n return np.fromfile(raw, ntype, size)\n raw = open(filename, 'rb')\n xdim = np.int64(read_at(raw, 42, 1, np.int16)[0])\n ydim = np.int64(read_at(raw, 656, 1, np.int16)[0])\n arr = read_at(raw, 4100, xdim*ydim, np.uint16)\n arr = arr.reshape((ydim, xdim))\n print('data shape: {}'.format(np.shape(arr)))\n if np.shape(arr)[0] == 1:\n arr = arr[0]\n print('data shape: {}'.format(np.shape(arr)))\n return arr", "def parse_to_numpy(audio):\n\treturn numpy.frombuffer(audio, dtype=numpy.int16)", "def readToMem(filePath, loggerInfo=None, cols=['X', 'Y', 'Z']):\n current_time = datetime.now().strftime(\"%H:%M:%S\")\n print(\"Read\", filePath, \"(\", current_time, \")\")\n\n headerOffset = 1024\n sectorSize = 512\n samplesPerSector = loggerInfo['first']['samplesPerSector']\n\n types = ['i2'] * len(cols) # The 2byte integer layout\n layout = np.dtype({'names': cols, 'formats': types}) # Name the columns of the array\n\n fp = open(filePath, \"rb\") # Open the file in read bytes mode\n memmap = memoryview(fp.read())\n fp.close()\n\n fileSize = len(memmap)\n sectors = (fileSize - headerOffset) // sectorSize\n samples = sectors * samplesPerSector\n\n masterArray = np.zeros((samples, ), dtype=layout)\n\n for i in range(sectors):\n #if i % (sectors // 4) == 0:\n #print(\"Read \", round(100.0 * i / sectors, 1), \"% complete\")\n #print(i/(sectors // 100))\n if i % (sectors // 100) == 0:\n pbar.printProgressBar(round((100.0 * i)/sectors, 0), 100, prefix=\"Read File\", printEnd=\" \")\n\n imp = np.frombuffer(memmap[headerOffset + i * sectorSize : headerOffset + (i+1) * sectorSize - 2], offset=30, dtype=layout)\n masterArray[i * samplesPerSector : (i+1) * samplesPerSector] = imp\n\n memmap.release()\n\n current_time = datetime.now().strftime(\"%H:%M:%S\")\n print(\"Read Complete. (\", current_time, \")\")\n return masterArray.view(np.int16).reshape(masterArray.shape + (-1,)).transpose().astype(np.float64, casting='safe')", "def tiffread(fname):\n from PIL import Image\n img = Image.open(fname)\n \n res = []\n offsets = []\n frame = 0\n try:\n for frame in itertools.count():\n img.seek(frame)\n aux = np.asarray(img)\n if aux.ndim == 0:\n if img.mode == 'I;16':\n aux = np.fromstring(img.tostring(), np.uint16)\n aux = np.reshape(aux, img.size[::-1])\n elif img.mode == 'I;16S':\n aux = np.fromstring(img.tostring(), np.int16)\n aux = np.reshape(aux, img.size[::-1])\n else:\n raise ValueError, \"unknown pixel mode\"\n res.append(aux)\n except EOFError:\n pass\n \n return np.asarray(res)", "def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data", "def unpack_mraw_frame_10bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*10/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*10/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::5]\n bytes_2 = int_array[1::5] \n bytes_3 = int_array[2::5]\n bytes_4 = int_array[3::5] \n bytes_5 = int_array[4::5]\n\n \n # Here 4 pixels from the image are shared between 5 bytes of data like\n #\n # | byte 1 | byte 2 | byte 3 | byte 4 | byte 5 |\n # |o o o o o o o o | o o | o o o o o o | o o o o | o o o o | o o o o o o | o o | o o o o o o o o|\n # | Pixel 1 | Pixel 2 | Pixel 3 | Pixel 4 |\n #\n # byte 2 is shared between pixel and we need only the right-most bits for pixel 2 and\n # only the left most bits for pixel 1. \n \n # right-most bits of byte 2 = Most significant bits of Pixel 2\n # left-most bits of byte 2 = Least significant bits of Pixel 1\n \n pix_1 = np.array(4.0*bytes_1 + np.right_shift(bytes_2,6),dtype=np.uint16)\n pix_2 = np.array(16.0*np.bitwise_and(bytes_2,0b111111) + np.right_shift(bytes_3,4),dtype=np.uint16)\n pix_3 = np.array(64.0*np.bitwise_and(bytes_3,0b1111) + np.right_shift(bytes_4,2),dtype=np.uint16)\n pix_4 = np.array(256.0*np.bitwise_and(bytes_4,0b11) + bytes_5,dtype=np.uint16)\n #try:\n image = (np.dstack([pix_1,pix_2,pix_3,pix_4])).reshape((1,n_pixels))[0]\n #except:\n # image = np.zeros(n_pixels)\n return image", "def m16i():\n\n global offset\n\n x = 0L\n for i in range(2):\n try:\n byte = midifile[offset]\n offset += 1\n except:\n error(\"Invalid MIDI file include (i16->int, offset=%s)\" % offset)\n x = (x << 8) + ord(byte)\n\n return int(x)", "def read_raw(rawfile, shape, dtype=np.uint16, kind='middleton'):\n\n # -- alert\n print(\"READ_RAW: reading {0}...\".format(rawfile))\n\n\n # -- read file\n if kind=='middleton':\n return np.fromfile(open(rawfile),dtype) \\\n .reshape(shape[2],shape[0],shape[1])[:,:,::-1] \\\n .transpose(1,2,0) \\\n .astype(float)", "def extract_data(filename, num_images, IMAGE_WIDTH):\n\n# this function definition has been taken from internet\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_WIDTH * IMAGE_WIDTH * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32) #Interpret a buffer as a 1-dimensional array\n data = data.reshape(num_images, IMAGE_WIDTH*IMAGE_WIDTH)\n return data", "def read_bin_file(file_name, config, mode=0, header=True, packet_num=1443):\n # Read file\n if header:\n data = remove_header(file_name, packet_num)\n else:\n data = np.fromfile(file_name, dtype=np.int16)\n\n frame = config[0]\n sample = config[1]\n chirp = config[2]\n tx_num = config[3]\n rx_num = config[4]\n\n if mode == 0:\n data = np.reshape(data, [-1, 8])\n data = data[:, 0:4:] + 1j * data[:, 4::]\n if rx_num == 4:\n cdata1 = np.reshape(data[:, 0], [frame, chirp, tx_num, sample])\n cdata1 = np.transpose(cdata1, [0, 1, 3, 2]) # frame, chirp, sample, channel\n cdata2 = np.reshape(data[:, 1], [frame, chirp, tx_num, sample])\n cdata2 = np.transpose(cdata2, [0, 1, 3, 2]) # frame, chirp, sample, channel\n cdata3 = np.reshape(data[:, 2], [frame, chirp, tx_num, sample])\n cdata3 = np.transpose(cdata3, [0, 1, 3, 2]) # frame, chirp, sample, channel\n cdata4 = np.reshape(data[:, 3], [frame, chirp, tx_num, sample])\n cdata4 = np.transpose(cdata4, [0, 1, 3, 2]) # frame, chirp, sample, channel\n\n if tx_num == 3:\n cdata = np.array([cdata1[:, :, :, 0], cdata2[:, :, :, 0], cdata3[:, :, :, 0], cdata4[:, :, :, 0],\n cdata1[:, :, :, 1], cdata2[:, :, :, 1], cdata3[:, :, :, 1], cdata4[:, :, :, 1],\n cdata1[:, :, :, 2], cdata2[:, :, :, 2], cdata3[:, :, :, 2], cdata4[:, :, :, 2]])\n cdata = np.transpose(cdata, [1, 2, 3, 0])\n # cdata = np.concatenate([cdata1, cdata2, cdata3, cdata4], axis=3)\n return cdata # frame, chirp, sample, channel(tx1,tx2,tx3)\n\n elif tx_num == 1:\n cdata = np.array([cdata1[:, :, :, 0], cdata2[:, :, :, 0], cdata3[:, :, :, 0], cdata4[:, :, :, 0]])\n cdata = np.transpose(cdata, [1, 2, 3, 0])\n return cdata # frame, chirp, sample, channel\n\n elif mode == 1: # testing\n data = np.reshape(data, [-1, 4])\n data = data[:, 0:2:] + 1j * data[:, 2::]\n data = np.reshape(data, [frame, chirp, tx_num, rx_num, sample])\n if rx_num == 4:\n cdata1 = data[:, :, :, 0, :]\n cdata1 = np.transpose(cdata1, [0, 1, 3, 2])\n cdata2 = data[:, :, :, 1, :]\n cdata2 = np.transpose(cdata2, [0, 1, 3, 2])\n cdata3 = data[:, :, :, 2, :]\n cdata3 = np.transpose(cdata3, [0, 1, 3, 2])\n cdata4 = data[:, :, :, 3, :]\n cdata4 = np.transpose(cdata4, [0, 1, 3, 2])\n\n if tx_num == 3:\n cdata = np.concatenate((cdata1, cdata2, cdata3, cdata4), axis=3)\n return cdata # frame, chirp, sample, channel\n\n elif tx_num == 1:\n cdata = np.array([cdata1[:, :, :, 0], cdata2[:, :, :, 0], cdata3[:, :, :, 0], cdata4[:, :, :, 0]])\n cdata = np.transpose(cdata, [1, 2, 3, 0])\n return cdata # frame, chirp, sample, channel\n\n elif mode == 2:\n data = np.reshape(data, [-1, 4])\n data = data[:, 0:2:] + 1j * data[:, 2::]\n data = np.reshape(data, [frame, chirp * tx_num, rx_num, sample])\n return data\n\n else:\n raise ValueError", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n# data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE * IMAGE_SIZE)\n return data", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data", "def unpack(file, legacy=False):\r\n with open(file, 'rb') as f:\r\n \r\n # Read configuration part of data\r\n config = read_config_data(f, legacy)\r\n \r\n # Compute range bins in datas\r\n scan_start_time = float(config['scan_start'])\r\n start_range = SPEED_OF_LIGHT * ((scan_start_time * 1e-12) - DT_0 * 1e-9) / 2\r\n \r\n # Read data\r\n data = dict()\r\n data= {'scan_data': [],\r\n 'time_stamp': [],\r\n 'packet_ind': [],\r\n 'packet_pulse_ind': [],\r\n 'range_bins': [],\r\n 'config': config}\r\n single_scan_data = []\r\n packet_count = 0\r\n pulse_count = 0\r\n \r\n while True:\r\n \r\n # Read a single data packet and break loop if not a complete packet\r\n # (in terms of size)\r\n packet = f.read(1452)\r\n if len(packet) < 1452:\r\n break \r\n \r\n # Get information from first packet about how scans are stored and \r\n # range bins collected\r\n if packet_count == 0:\r\n num_range_bins = np.frombuffer(packet[44:48], dtype='>u4')[0]\r\n num_packets_per_scan = np.frombuffer(packet[50:52], dtype='>u2')[0]\r\n drange_bins = SPEED_OF_LIGHT * T_BIN * 1e-9 / 2\r\n range_bins = start_range + drange_bins * np.arange(0, num_range_bins, 1)\r\n packet_count += 1\r\n \r\n # Number of samples in current packet and packet index\r\n num_samples = np.frombuffer(packet[42:44], dtype='>u2')[0]\r\n data['packet_ind'].append(np.frombuffer(packet[48:50], dtype='>u2')[0])\r\n \r\n # Extract radar data samples from current packet; process last \r\n # packet within a scan seperately to get all data\r\n packet_data = np.frombuffer(packet[52:(52 + 4 * num_samples)], \r\n dtype='>i4')\r\n single_scan_data.append(packet_data)\r\n \r\n if packet_count % num_packets_per_scan == 0:\r\n data['scan_data'].append(np.concatenate(single_scan_data))\r\n data['time_stamp'].append(np.frombuffer(packet[8:12], \r\n dtype='>u4')[0])\r\n single_scan_data = []\r\n pulse_count += 1\r\n \r\n # Add last partial scan if present\r\n if single_scan_data:\r\n single_scan_data = np.concatenate(single_scan_data)\r\n num_pad = data['scan_data'][0].size - single_scan_data.size\r\n single_scan_data = np.pad(single_scan_data, (0, num_pad), \r\n 'constant', constant_values=0)\r\n data['scan_data'].append(single_scan_data)\r\n \r\n # Stack scan data into 2-D array \r\n # (rows -> pulses, columns -> range bins)\r\n data['scan_data'] = np.stack(data['scan_data'])\r\n \r\n # Finalize entries in data\r\n data['time_stamp'] = np.asarray(data['time_stamp'])\r\n data['range_bins'] = range_bins\r\n \r\n with open('../Raw_Data/data.pkl', 'wb') as o:\r\n pickle.dump(data, o)\r\n return data", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)\n return data", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data", "def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X", "def _load_bt12_data(region):\n fname = os.path.join(\n os.path.dirname(__file__), 'data', region + '_bt12_trms4osc.pars')\n\n return np.rec.fromrecords(\n np.loadtxt(fname, skiprows=4, usecols=range(9)),\n names='mag,dist,c1,c2,c3,c4,c5,c6,c7')", "def le_binario_mgbq(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def readRawSamples(fname):\n\n d = numpy.fromfile(fname, dtype=numpy.float32)\n #d = d.astype(numpy.float64)\n #d = (d - 128) / 128.0\n\n return d[::2] + 1j * d[1::2]", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def read_matrix_from_binary(file_name):\n with open(file_name, 'rb') as file:\n buffer = file.read()\n n_row = int.from_bytes(buffer[0:4], 'little')\n n_col = int.from_bytes(buffer[4:8], 'little')\n matrix = numpy.frombuffer(buffer[8:], dtype=float).reshape([n_row, n_col])\n return matrix", "def readH264(path,flag='mask'):\n # known size of the images\n rows = 128\n cols = 128\n\n # read in raw bytes as a 1D array\n arr = np.fromfile(path,dtype='uint16')\n\n if flag=='mask':\n ## update values based on code\n # get code\n code_array = np.bitwise_and(arr,0xF000)\n # CODE_VAL_SEUIL2\n arr[code_array==0xD000] = 0xF800\n # CODE_VAL_CONTOUR\n arr[code_array==0xB000] = 0xF81F\n # CODE_VAL_MAX\n arr[code_array==0xC000] = 0x0000\n # CODE_VAL_SEUIL1\n arr[code_array==0xE000] = 0x001F\n\n ## just lower 12-bits\n arr = np.bitwise_and(arr,0x0FFF)\n\n ## convert data to frames\n # break the data into chunks that are 1d frames\n frames_set = np.split(arr,int(arr.shape[0]/(rows*cols)))\n # combined frames together into a 3d array and interpret as float16 data type\n return np.dstack([np.reshape(f,(rows,cols)) for f in frames_set]).astype('float16')", "def read_ultrasound_file(ult_file):\n\n return np.fromfile(open(ult_file, \"rb\"), dtype=np.uint8)", "def read_int16(ucode_file):\n return int.from_bytes(ucode_file.read(2), 'little')", "def getdata(filename, rw=False, verbose=False):\n sh, dt, header = getheader(filename)\n if verbose:\n print(('Reading %s...\\n%s' % (filename, header)))\n mode = ['c', 'r+']\n return np.memmap(filename, mode=mode[rw], shape=sh, dtype=dt, order='F',\n offset=512)", "def load_images(mraw, h, w, N, bit=16, roll_axis=True):\n\n if int(bit) == 16:\n images = np.memmap(mraw, dtype=np.uint16, mode='r', shape=(N, h, w))\n elif int(bit) == 8:\n images = np.memmap(mraw, dtype=np.uint8, mode='r', shape=(N, h, w))\n elif int(bit) == 12:\n warnings.warn(\"12bit images will be loaded into memory!\")\n #images = _read_uint12_video(mraw, (N, h, w))\n images = _read_uint12_video_prec(mraw, (N, h, w))\n else:\n raise Exception(f\"Unsupported bit depth: {bit}\")\n\n\n #images=np.fromfile(mraw, dtype=np.uint16, count=h * w * N).reshape(N, h, w) # about a 1/3 slower than memmap when loading to RAM. Also memmap doesn't need to read to RAM but can read from disc when needed.\n if roll_axis:\n return np.rollaxis(images, 0, 3)\n else:\n return images", "def list_to_uint16_array(data_list):\n data_array = _populate_array(data_list, driver.uint16_array)\n return data_array", "def read_bin(filename):\n import sys\n import numpy as np\n\n with open(filename + '.flt', \"rb\") as f:\n raster_data = np.fromstring(f.read(), 'f')\n\n if sys.byteorder == 'big':\n raster_data = raster_data.byteswap() #ensures data is little endian\n\n return raster_data", "def read_sp2(file_name, debug=False, arm_convention=True):\n\n my_data = open(file_name, \"rb\").read()\n # Get file date from name\n if platform.system() == \"Windows\":\n split_file_name = file_name.split(\"\\\\\")\n else:\n split_file_name = file_name.split(\"/\")\n if arm_convention:\n next_split = split_file_name[-1].split(\".\")\n dt = datetime.strptime(next_split[2], \"%Y%m%d\")\n else:\n dt = datetime.strptime(split_file_name[-1][0:8], \"%Y%m%d\")\n\n if len(my_data) > 0:\n bytepos = 0\n numCols = struct.unpack(\">I\", my_data[bytepos:bytepos + 4])[0]\n bytepos += 4\n numChannels = struct.unpack(\">I\", my_data[bytepos:bytepos + 4])[0]\n if debug:\n print((\"Loaded file with numCols = {}, numChannels = {}\"\n .format(numCols, numChannels)))\n\n data_points_per_record = numChannels * numCols\n\n bytes_per_record = 2 * data_points_per_record\n bytes_not_data_array = 12 + 2 + 28 + 16\n bytes_per_record += bytes_not_data_array\n last_pos = int(bytes_per_record - 1)\n num_spare_cols = struct.unpack(\">I\", my_data[last_pos - 4:last_pos])[0]\n if debug:\n print(\"Number of spare columns = %d\" % num_spare_cols)\n\n if num_spare_cols != 0:\n bytes_per_record += num_spare_cols\n\n numRecords = int(len(my_data) / bytes_per_record)\n totalRows = numChannels * numRecords\n DataWave = np.zeros((totalRows, numCols), dtype='int16')\n Flag = np.zeros(int(totalRows / numChannels), dtype='int16')\n TimeWave = np.zeros(numRecords, dtype='float64')\n Res1 = np.zeros(numRecords, dtype='float32')\n EventIndex = np.zeros(numRecords, dtype='float32')\n TimeDiv10000 = np.zeros(numRecords, dtype='float64')\n TimeRemainder = np.zeros(numRecords, dtype='float64')\n Res5 = np.zeros(numRecords, dtype='float32')\n Res6 = np.zeros(numRecords, dtype='float32')\n Res7 = np.zeros(numRecords, dtype='float64')\n Res8 = np.zeros(numRecords, dtype='float64')\n if num_spare_cols != 0:\n SpareDataArray = np.zeros(numRecords, num_spare_cols)\n\n arrayFmt = \">\"\n for i in range(data_points_per_record):\n arrayFmt += \"h\"\n\n for record in range(numRecords):\n dataStartPoint = record * bytes_per_record + 8\n startRow = record * numChannels\n endRow = startRow + numChannels - 1\n the_row = np.array(struct.unpack(\n arrayFmt, my_data[dataStartPoint:dataStartPoint + int(data_points_per_record * 2)]))\n\n DataWave[startRow:endRow + 1, 0:numCols] = the_row.reshape(\n numCols, numChannels).T\n dataStartPoint += data_points_per_record * 2\n Flag[record] = struct.unpack(\">h\", my_data[dataStartPoint:dataStartPoint + 2])[0]\n next_floats = struct.unpack(\">ffffffff\", my_data[dataStartPoint + 2:dataStartPoint + 34])\n TimeWave[record] = next_floats[0]\n Res1[record] = next_floats[1]\n EventIndex[record] = next_floats[2]\n TimeDiv10000[record] = next_floats[3]\n TimeRemainder[record] = next_floats[4]\n Res5[record] = next_floats[5]\n Res6[record] = next_floats[6]\n next_doubles = struct.unpack(\">dd\", my_data[dataStartPoint + 34:dataStartPoint + 50])\n Res7[record] = next_doubles[0]\n Res8[record] = next_doubles[1]\n dataStartPoint += 50\n\n if num_spare_cols != 0:\n startRow = (2 * num_spare_cols) * record\n dataStartPoint += bytes_not_data_array - 4\n spareFmt = \">\"\n for i in range(num_spare_cols):\n spareFmt += \"f\"\n\n SpareDataArray[record] = np.array(\n struct.unpack(spareFmt, my_data[dataStartPoint:dataStartPoint+4*num_spare_cols]))\n\n UTCtime = TimeDiv10000 * 10000 + TimeRemainder\n diff_epoch_1904 = (\n datetime(1970, 1, 1) - datetime(1904, 1, 1)).total_seconds()\n UTCdatetime = np.array([\n datetime.utcfromtimestamp(x - diff_epoch_1904) for x in UTCtime])\n\n DateTimeWave = (dt - datetime(1904, 1, 1)).total_seconds() + TimeWave\n\n # Make an xarray dataset for SP2\n Flag = xr.DataArray(Flag, dims={'event_index': EventIndex})\n Res1 = xr.DataArray(Res1, dims={'event_index': EventIndex})\n Res5 = xr.DataArray(Res5, dims={'event_index': EventIndex})\n Res6 = xr.DataArray(Res6, dims={'event_index': EventIndex})\n Res7 = xr.DataArray(Res7, dims={'event_index': EventIndex})\n Res8 = xr.DataArray(Res8, dims={'event_index': EventIndex})\n Time = xr.DataArray(UTCdatetime, dims={'event_index': EventIndex})\n EventInd = xr.DataArray(EventIndex, dims={'event_index': EventIndex})\n DateTimeWaveUTC = xr.DataArray(UTCtime, dims={'event_index': EventIndex})\n DateTimeWave = xr.DataArray(DateTimeWave, dims={'event_index': EventIndex})\n TimeWave = xr.DataArray(TimeWave, dims={'event_index': EventIndex})\n my_ds = xr.Dataset({'time': Time, 'Flag': Flag, 'Res1': Res1, 'Res5': Res5,\n 'Res6': Res6, 'Res7': Res7, 'Res8': Res8, 'EventIndex': EventInd,\n 'DateTimeWaveUTC': DateTimeWaveUTC, 'TimeWave': TimeWave,\n 'DateTimeWave': DateTimeWave})\n\n for i in range(numChannels):\n temp_array = np.zeros((numRecords, numCols), dtype='int')\n for j in range(numRecords):\n k = i + j*numChannels\n temp_array[j] = DataWave[k]\n my_ds['Data_ch' + str(i)] = xr.DataArray(\n temp_array, dims={'event_index': EventIndex, 'columns': np.arange(0, 100, 1)})\n del my_data\n del DataWave\n return my_ds\n else:\n return None", "def fits_to_nparray(file):\n hdu_list = fits.open(file)\n image_data = hdu_list[0].data\n image_data=image_data.astype(np.uint16)\n \n gdal_array.SaveArray(image_data, file[:-5]+\".tif\")\n \n return image_data", "def loader(filename,wdm=0,verbose=0,kmpers=1):\n with open(filename, 'rb') as f:\n if wdm == False:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n infoBytes = f.tell()\n if verbose>2:\n print(infoBytes)\n #skip darkmatter\n #read the first dm line\n if verbose>2:\n print(f.tell())\n catd = np.fromfile(f,dtype= dmdtype, count=1) \n #get the bytes location and subtract off the bytes location after loading info to get n bytes a line for dm\n if verbose>2:\n print(f.tell())\n current = f.tell()\n dmBytes = current-infoBytes\n f.seek(dmBytes*(info['nd'][0]-1)+current)\n if verbose>2:\n print(f.tell())\n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n else:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n if verbose>2:\n print(f.tell())\n # #dark matter setup count is reading the number of ?rows? \n catd= np.fromfile(f,dmdtype, count=info['nd'][0]) \n if verbose>2:\n print(f.tell()) \n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n \n \n #convert to physical units as found in README.md\n if wdm == True:\n catd['mass']*=2.324876e9\n if kmpers == 1:\n catd['vx']*=100.\n catd['vy']*=100.\n catd['vz']*=100.\n cats['mass']*=2.324876e9\n if kmpers == 1:\n cats['vx']*=100.\n cats['vy']*=100.\n cats['vz']*=100.\n \n if wdm == True:\n return(catd,cats,info)\n else:\n return(cats,info)", "def _read_data(self):\n with self._open(self.filename, 'rb') as f:\n try:\n f.seek(self._offset_data, self._offset_whence)\n except IOError:\n print('Error: hedp.io.HamamatsuFile seeking outside of file limits.')\n print(' Failed to parse file.')\n print(\" Either the 'offset' or 'dtype' input arguments must be wrong!\")\n raise\n except:\n raise\n\n data_len = np.prod(self.shape)*np.dtype(self._dtype).itemsize\n data_str = f.read(data_len)\n if data_len != len(data_str):\n print(data_len, len(data_str))\n raise ValueError('File ended before all data was read. Probably wrong offset or dtype!')\n\n\n self.data = np.fromstring(data_str, dtype=self._dtype).reshape(self.shape[::-1])\n self.data = np.ndarray.astype(self.data, 'float32')\n\n #self.data = np.fromfile(f, dtype=self._dtype,\n # count=np.prod(self.shape)).reshape(self.shape[::-1])", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgb(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def loadarb(self,wfm):\n wfm = wfm.astype('int16')\n l = str(2*len(wfm))\n self.instrument.write_raw('DATA:DAC VOLATILE, #{0}{1}{2}'.format(len(l),l,wfm.byteswap().tostring()))", "def binint(filename):\n return np.memmap(filename, dtype='int32')", "def read_u16(self) -> int:", "def le_binario_mgbp(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def le_binario_mgbp(filebin,nt,nc):\n return np.fromfile(filebin,'<f4').reshape(nt,nc)", "def _fread3_many(fobj, n):\n b1, b2, b3 = np.fromfile(fobj, \">u1\", 3 * n).reshape(-1,\n 3).astype(np.int).T\n return (b1 << 16) + (b2 << 8) + b3", "def file_to_bitarray(fname):\n ba = bitarray()\n with open(fname, 'rb') as f:\n ba.fromfile(f)\n return ba", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def _read_data(self, fh, byteorder='>'):\r\n fh.seek(len(self.header))\r\n data = fh.read()\r\n dtype = 'u1' if self.maxval < 256 else byteorder + 'u2'\r\n depth = 1 if self.magicnum == b\"P7 332\" else self.depth\r\n shape = [-1, self.height, self.width, depth]\r\n size = numpy.prod(shape[1:])\r\n if self.magicnum in b\"P1P2P3\":\r\n data = numpy.array(data.split(None, size)[:size], dtype)\r\n data = data.reshape(shape)\r\n elif self.maxval == 1:\r\n shape[2] = int(math.ceil(self.width / 8))\r\n data = numpy.frombuffer(data, dtype).reshape(shape)\r\n data = numpy.unpackbits(data, axis=-2)[:, :, :self.width, :]\r\n else:\r\n data = numpy.frombuffer(data, dtype)\r\n data = data[:size * (data.size // size)].reshape(shape)\r\n if data.shape[0] < 2:\r\n data = data.reshape(data.shape[1:])\r\n if data.shape[-1] < 2:\r\n data = data.reshape(data.shape[:-1])\r\n if self.magicnum == b\"P7 332\":\r\n rgb332 = numpy.array(list(numpy.ndindex(8, 8, 4)), numpy.uint8)\r\n rgb332 *= [36, 36, 85]\r\n data = numpy.take(rgb332, data, axis=0)\r\n return data", "def fromunformatted(file,dtype='float32', shape=None, skip=-1, count=-1):\n if skip >= 0:\n endcount = 1\n else:\n endcount = -1\n\n try:\n file.seek(0,1)\n except AttributeError:\n file = open(file)\n\n if skip > 0 or count >= 0:\n for i in range(skip):\n n1, = np.fromfile(file,'int32',count=1)\n file.seek(n1+4,1)\n\n if count > 0:\n res = np.empty((count,)+shape,dtype)\n for c in range(count):\n res[c,...] = fromunformatted(file,dtype,shape,skip=0)\n\n return res\n\n try:\n # skip header\n n1, = np.fromfile(file,'int32',count=1)\n except TypeError:\n raise\n else:\n n1 /= np.dtype(dtype).itemsize\n data = np.fromfile(file, dtype, count=n1)\n n2, = np.fromfile(file,'int32',count=endcount)\n\n if shape is not None:\n data = data.reshape(shape)\n\n return data", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\tprint(data_chunk.shape)\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def pack_unpack_hard():\n # Array is apprx. 1.5 GB large\n # should make apprx 1536 chunks\n pack_unpack(100, chunk_size=reverse_pretty('1M'), progress=simple_progress)", "def binfloat(filename):\n return np.memmap(filename, dtype='float32')", "def read_data(infile):\n extension = os.path.splitext(infile)[1]\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n fid = open(infile, 'rb')\n fid.seek(512) #skip header\n if extension == '.aps' or extension == '.a3daps':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, ny, nt, order='F').copy() #make N-d image\n elif extension == '.a3d':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, nt, ny, order='F').copy() #make N-d image\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype = np.float32, count = 2* nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0,:,:,:].copy()\n imag = data[1,:,:,:].copy()\n fid.close()\n if extension != '.ahi':\n return data\n else:\n return real, imag", "def mhd_to_array(path):\n return sitk.GetArrayFromImage(sitk.ReadImage(path, sitk.sitkFloat32))", "def parse_file(fits_file, data_offset, records, record_length, field_position, field_size):\n bits = field_size * 8\n\n with open(fits_file, 'rb') as f:\n f.read(data_offset)\n for _ in range(0, records):\n record = f.read(record_length)\n value = record[field_position-1:field_position+field_size-1]\n print(multiparse(bits, value))", "def wavread(fname):\n fh = wave.open(fname,'rb')\n (nchannels, sampwidth, framerate, nframes, comptype, compname) = fh.getparams()\n if sampwidth == 2:\n frames = fh.readframes(nframes * nchannels)\n dn = struct.unpack_from('%dh' % nframes*nchannels, frames)\n if nchannels > 1:\n out = np.array([dn[i::nchannels] for i in range(nchannels)])/float(2**15)\n else:\n out = np.array(dn)/float(2**15)\n else:\n print('not a 16 bit wav-file')\n out = [0]\n fh.close()\n return (out,framerate)", "def createSongsArray():\r\n file2write.write(\"unsigned short songs[5][2500][2] = {\")", "def _load(self, filepath):\n import subprocess as sp\n command = ['ffmpeg',\n '-i', filepath,\n '-f', 's16le',\n '-acodec', 'pcm_s16le',\n '-ac', '1'] # channels: 2 for stereo, 1 for mono\n if self.sampling_rate != SAMPLING_RATE:\n command.extend(['-ar', str(self.sampling_rate)])\n command.append('-')\n # 30s at 44.1 kHz ~= 1.3e6\n proc = sp.run(command, stdout=sp.PIPE, bufsize=10**7, stderr=sp.DEVNULL, check=True)\n\n return np.fromstring(proc.stdout, dtype=\"int16\")", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def _decode_int_array(fp):\n size = _decode_int(fp)\n return list(struct.unpack('>{}i'.format(size), fp.read(size * 4)))", "def convert_uint16_to_array(value):\n return [\n (value >> 0 & 0xFF),\n (value >> 8 & 0xFF)\n ]", "def read_m16_ds_2(use_red=True, mass_bin='10.0_10.4'):\n if use_red:\n # sm_10.0_10.4. - sm_10.4_10.7. - sm_10.7_11.0. - sm_11.0_11.2.\\\n #- sm_11.2_11.4. - sm_11.4_11.6. - sm_11.6_15.0. - sm_11.0_15.0.\n fname = os.path.join(m16path, 'planck_lbg.ds.red.out')\n cols_dict ={\n '10.0_10.4': (0, 1, 2),\n '10.4_10.7': (0, 3, 4),\n '10.7_11.0': (0, 5, 6),\n '11.0_11.2': (0, 7, 8),\n '11.2_11.4': (0, 9, 10),\n '11.4_11.6': (0, 11, 12),\n '11.6_15.0': (0, 13, 14),\n '11.0_15.0': (0, 15, 16),\n }\n elif mass_bin in ['11.0_11.2','11.2_11.4','11.4_11.6','11.6_15.0']:\n fname = os.path.join(m16path, 'planck_lbg.ds.blue.rebinned.out')\n cols_dict ={\n '11.0_11.2': (0, 1, 2),\n '11.2_11.4': (0, 3, 4),\n '11.4_11.6': (0, 5, 6),\n '11.6_15.0': (0, 7, 8),\n }\n else:\n # sm_10.0_10.4. - sm_10.4_10.7. - sm_10.7_11.0. - sm_11.0_15.0.\n fname = os.path.join(m16path, 'planck_lbg.ds.blue.out')\n cols_dict ={\n '10.0_10.4': (0, 1, 2),\n '10.4_10.7': (0, 3, 4),\n '10.7_11.0': (0, 5, 6),\n '11.0_15.0': (0, 7, 8),\n }\n # Mpc/h, (h Msun/(physical pc)^2)\n rp, ds, ds_err = np.genfromtxt(fname, usecols=cols_dict[mass_bin],\\\n unpack=True)\n return(rp, ds, ds_err)", "def _read_arduino(self) -> np.ndarray:\r\n raw_data: bytes = self._serial_handle.read(self._chunk)\r\n int_data = [int(data_bit) for data_bit in raw_data]\r\n return np.array(int_data)", "def read_u16(self) -> int:\n ...", "def load_encoded(filename):\n return np.fromfile(filename, dtype='uint8')", "def _decode_long_array(fp):\n size = _decode_int(fp)\n return list(struct.unpack('>{}q'.format(size), fp.read(size * 8)))", "def read(filename):\n\n fileName, fileExtension = os.path.splitext(filename)\n wav_filename = filename\n rate, data = scipy.io.wavfile.read(str(wav_filename)) # the data is read in its native format\n if data.dtype =='int16':\n data = numpy.cast['float'](data)\n return [rate,data]", "def read_mb_file(self,idir='.',ifile=None, gmt=True, verbose=False):\n \n import numpy as np\n import os\n \n if gmt==True:\n gmt_file=idir+'/../maps/en_velo.gmt'\n if isinstance(gmt,str):\n gmt_file=gmt\n \n if gmt != False:\n self.read_lon_lat(gmt_file,verbose=verbose)\n \n if ifile is None:\n mb_file_basename= idir + '/mb_'+self.code+'_GPS.dat'\n else:\n mb_file_basename=ifile\n \n data_NEU = []\n for i in range(1,4):\n mb_file = mb_file_basename + str(i)\n\n # file\n self.ifile=os.path.abspath(mb_file)\n \n data=np.genfromtxt(mb_file,skip_header=4)\n \n # reshape to ensure a 2D array\n if len(data.shape)==1:\n data=data.reshape((1,data.shape[0]))\n \n\n\n data_NEU.append(data)\n\n if data_NEU[0].shape == data_NEU[1].shape == data_NEU[2].shape:\n self.data=np.zeros((data_NEU[0].shape[0],7))\n self.data[:,0]=data_NEU[0][:,0]\n self.data[:,1]=data_NEU[0][:,1]#*to_mm\n self.data[:,2]=data_NEU[1][:,1]#*to_mm\n self.data[:,3]=data_NEU[2][:,1]#*to_mm\n\n self.data[:,4]=data_NEU[0][:,2]#*to_mm\n self.data[:,5]=data_NEU[1][:,2]#*to_mm\n self.data[:,6]=data_NEU[2][:,2]#*to_mm\n\n else: \n print(\"!!! Error reading \",mb_file_basename,\" :*dat1, *dat2, *dat3 do not have the same length\")\n self.data = None", "def read12bit(self, register):\n valuearray = bytearray(self.device.readregistermulti(register, 2))\n return struct.unpack('!H', valuearray)[0] & ((2 ** 12) - 1) # Convert to short and discard first four bits", "def read_cycle_info(filename):\r\n \r\n # Open file and read it into a list of lines.\r\n fin = open(filename, \"r\")\r\n lines = fin.readlines()\r\n fin.close()\r\n \r\n info = [[]] * 256;\r\n\r\n for line in lines:\r\n fields = line.split(',')\r\n opc = int(fields[0],16)\r\n info[opc] = (int(fields[1]), int(fields[2]), int(fields[3]))\r\n return info", "def read_mhd_and_raw(path, numpyFlag=True):\n img = sitk.ReadImage(path)\n if not numpyFlag:\n return img\n\n nda = sitk.GetArrayFromImage(img) # (img(x,y,z)->numpyArray(z,y,x))\n return nda", "def samp_file_to_arr(labeled_file, total_size, entry_dtype='f8'):\n buf = []\n n = 0\n with open( labeled_file, 'rb' ) as fi:\n for _, line in enumerate(fi):\n n = n + 1\n r = random.random()\n if n <= total_size:\n buf.append(line)\n elif r < 1.0*total_size/n:\n loc = random.randint(0, total_size-1)\n buf[loc] = line\n return np.array([np.fromstring(s, sep=',', dtype='f8') for s in buf])", "def fread(fid, nelements, dtype):\n\n if dtype is np.str:\n dt = np.uint8 # WARNING: assuming 8-bit ASCII for np.str!\n else:\n dt = dtype\n\n data_array = np.fromfile(fid, dt, nelements)\n if data_array.size==1:data_array=data_array[0]\n return data_array", "def atmparamread(filename):\n f = open(filename, 'r')\n f.readline()\n line = f.readline()\n #Td = float(line.split()[0])\n #Pd = float(line.split()[1])\n #Mc = float(line.split()[2])\n #rc = float(line.split()[3])\n n = int(line.split()[0])\n f.readline()\n atm = 0*numpy.ndarray(shape=(n, ncol), dtype=float)\n S = 0*numpy.ndarray(shape=(n), dtype=float)\n for i in range(n):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(ncol ):\n atm[i, j] = float(line.split()[j+1])\n f.close()\n return atm, S", "def padread(filename, columns=4, out_dtype=np.float32):\n with open(filename, \"rb\") as f: \n A = np.fromfile(f, dtype=np.float32) # accel file: 32-bit float \"singles\"\n B = np.reshape(A, (-1, columns))\n if B.dtype == out_dtype:\n return B\n return B.astype(out_dtype)", "def load_raw(fname):\n # Read all the data from the file\n ctd = []\n with open(fname) as ctdfile:\n \n for line in ctdfile:\n \n if (line.find('*') < 0) and (line.find('#') < 0):\n \n # This line contains data; parse the line\n entries = line.strip().split()\n # Convert data to float64\n entries = [np.float64(entries[i]) \n for i in range(len(entries))]\n # Append to list\n ctd.append(entries)\n \n # Return the raw data as an numpy array\n return np.array(ctd)", "def load_info():\n data = np.loadtxt(\"u_sol_meta.txt\", dtype=int)\n return data", "def read (self, file):\n\t\tself.unpack (file.read (self.size()))", "def autodetect_endian_and_sanity_check_su(file):\n pos = file.tell()\n if isinstance(file, io.BytesIO):\n file.seek(0, 2)\n size = file.tell()\n file.seek(pos, 0)\n else:\n size = os.fstat(file.fileno())[6]\n if size < 244:\n return False\n # Also has to be a multiple of 4 in length because every header is 400 long\n # and every data value 4 byte long.\n elif (size % 4) != 0:\n return False\n # Jump to the number of samples field in the trace header.\n file.seek(114, 0)\n sample_count = file.read(2)\n interval = file.read(2)\n # Jump to the beginning of the year fields.\n file.seek(156, 0)\n year = file.read(2)\n jul_day = file.read(2)\n hour = file.read(2)\n minute = file.read(2)\n second = file.read(2)\n # Jump to previous position.\n file.seek(pos, 0)\n # Unpack in little and big endian.\n le_sample_count = unpack(b'<h', sample_count)[0]\n be_sample_count = unpack(b'>h', sample_count)[0]\n # Check if both work.\n working_byteorders = []\n if le_sample_count > 0:\n length = 240 + (le_sample_count * 4)\n if (size % length) == 0:\n working_byteorders.append('<')\n if be_sample_count > 0:\n length = 240 + (be_sample_count * 4)\n if (size % length) == 0:\n working_byteorders.append('>')\n # If None works return False.\n if len(working_byteorders) == 0:\n return False\n # Check if the other header values make sense.\n still_working_byteorders = []\n for bo in working_byteorders:\n fmt = (\"%sh\" % bo).encode('ascii', 'strict')\n this_interval = unpack(fmt, interval)[0]\n this_year = unpack(fmt, year)[0]\n this_julday = unpack(fmt, jul_day)[0]\n this_hour = unpack(fmt, hour)[0]\n this_minute = unpack(fmt, minute)[0]\n this_second = unpack(fmt, second)[0]\n # Make a sanity check for each.\n # XXX: The arbitrary maximum of the sample interval is 10 seconds.\n if this_interval <= 0 or this_interval > 10E7:\n continue\n # Some programs write two digit years.\n if this_year != 0 and (this_year < 1930 or this_year >= 2030) and \\\n (this_year < 0 or this_year >= 100):\n continue\n # 9999 is often used as a placeholder\n if (this_julday > 366 or this_julday < 0) and this_julday != 9999:\n continue\n if this_hour > 24 or this_hour < 0:\n continue\n if this_minute > 60 or this_minute < 0:\n continue\n if this_second > 60 or this_second < 0:\n continue\n still_working_byteorders.append(bo)\n length = len(still_working_byteorders)\n if not length:\n return False\n elif length == 1:\n return still_working_byteorders[0]\n else:\n # XXX: In the unlikely case both byte orders pass the sanity checks\n # something else should be checked. Currently it is not.\n msg = \"\"\"\n Both possible byte orders passed all sanity checks. Please contact\n the ObsPy developers so they can implement additional tests.\n \"\"\".strip()\n raise Exception(msg)", "def read_sm_product(filepath):\n # check the files are udp files\n if os.path.basename(filepath)[14:17] != 'UDP':\n raise ValueError('{} is not a UDP file'.format(filepath))\n\n # Open the data file for reading\n try:\n file = open(filepath, 'rb')\n except IOError:\n logging.exception('file {} does not exist'.format(filepath))\n raise\n\n # Read first unsigned int32, containing number of datapoints to iterate over\n n_grid_points = np.fromfile(file, dtype=np.uint32, count=1)[0]\n logging.debug('Data file contains {} data points'.format(n_grid_points))\n logging.debug('Reading file... ')\n data = np.fromfile(file, dtype=datatype, count=n_grid_points)\n file.close()\n logging.debug('Done')\n\n return data", "def read_raw_data(file_name: str, ROWS: int, COLS: int, OFFSET=0) -> list:\r\n FILE = open(file_name, mode=\"r\")\r\n\r\n # Reading the data in the Single Dimensional form\r\n img = np.fromfile(\r\n FILE, dtype=np.uint8, count=ROWS * COLS, offset=((ROWS * COLS) * OFFSET)\r\n )\r\n\r\n # Shaping the data to the two dimensional format\r\n img = np.reshape(img, (ROWS, COLS)).tolist()\r\n\r\n FILE.close()\r\n return img", "def loadCudaStream(name):\n data=np.fromfile(name, dtype=\"float32\")\n data=data.reshape(int(len(data)/4), 4)\n data=np.delete(data,3,1)\n return data", "def load_nifty_volume_as_array(filename):\n img = sitk.ReadImage(filename)\n img_arr = sitk.GetArrayFromImage(img)\n return img_arr", "def read_results():\r\n with open(\"packing.nfo\", \"r\") as fin:\r\n fin.readline()\r\n fin.readline()\r\n por_theory = float(fin.readline().split()[2])\r\n por_final = float(fin.readline().split()[2])\r\n print('Theoretical porosity:', por_theory)\r\n print('Final porosity:', por_final)\r\n with open(\"packing.xyzd\", \"rb\") as fin:\r\n btxt = fin.read()\r\n txt = list(struct.unpack(\"<\" + \"d\" * (len(btxt) // 8), btxt))\r\n data = array(zip(*[iter(txt)] * 4))\r\n data[:, 3] = data[:, 3] * \\\r\n ((1 - por_final) / (1 - por_theory))**(1 / 3)\r\n return data", "def read_szx_fmv_12(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n mphr = eps_file.mphr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = raw_data[\"LONGITUDE\"].size\n\n data = {}\n metadata = {}\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n metadata[\"spacecraft_id\"] = np.int8(mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(mphr[\"ORBIT_START\"])\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n\n for f in fields:\n metadata[f] = np.int16(mphr[f.upper()])\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"abs_line_number\"\n ]\n\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan), (\"latitude\", long_nan),\n (\"swath indicator\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = nan_val\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"num_val_trip\", ulong_nan), (\"f_kp\", byte_nan),\n (\"f_usable\", byte_nan), (\"f_f\", uint_nan), (\"f_v\", uint_nan),\n (\"f_oa\", uint_nan), (\"f_sa\", uint_nan), (\"f_tel\", uint_nan),\n (\"f_ref\", uint_nan), (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n # modify longitudes from (0, 360) to (-180,180)\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1),\n n_lines).astype(np.uint8)\n\n data[\"line_num\"] = idx_nodes.astype(np.uint16)\n\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n data[\"swath_indicator\"] = data.pop(\"swath indicator\")\n\n return data, metadata", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def get_pict_data(fname):\n with open(fname, 'r') as f:\n return np.asarray(f.read().split(',')).reshape((11, 1024)).astype(int)", "def read_ascii(file):\n wvlen, band, mag, emag, fmag, unit, beam, odate, ref = [],[],[],[],[],[],[],[],[]\n with open(file, 'r') as f_in:\n for line in f_in:\n try:\n # ensure line contains data:\n a = float(line[0])\n except ValueError:\n a = 'dummy'\n try:\n # ensure mag or flux entry is not '--'\n m = float(line.split(' ')[2])\n except ValueError:\n m = 'dummy'\n \n if isinstance(a, float) and isinstance(m, float):\n wvlen.append(float(line.strip().split(' ')[0])) # in metres\n band.append(line.strip().split(' ')[1])\n mag.append(float(line.strip().split(' ')[2]))\n emag.append(line.strip().split(' ')[3])\n fmag.append(line.strip().split(' ')[4])\n unit.append(line.strip().split(' ')[5])\n beam.append(line.strip().split(' ')[6])\n odate.append(line.strip().split(' ')[7])\n ref.append(line.strip().split(' ')[8])\n \n return wvlen, band, mag, emag, fmag, unit, beam, odate, ref", "def disassemble(file, MB_limit=80, destination=None):\n # List of files to return\n filelist = []\n\n # Check file size in MB\n filesize = os.path.getsize(file) / 1000000\n\n # Filename\n filename = os.path.basename(file).replace('.fits', '')\n\n # Get destination\n if destination is None:\n destination = os.path.dirname(file)\n\n # If already small enough, do nothing\n if filesize > MB_limit:\n\n # Open the FITS file\n hdulist = fits.open(file, mode='update')\n\n # Strip file of data\n extensions = {}\n for hdu in hdulist:\n\n # Save the real data\n extensions[hdu.name] = hdu.data\n\n # Replace with tiny dummy array\n hdulist[hdu.name].data = None\n\n # Write to the file and close it\n hdulist.writeto(file, overwrite=True)\n hdulist.close()\n\n # Make a folder\n folder = filename + '_data'\n destination = os.path.join(destination, folder)\n os.system('mkdir {}'.format(destination))\n\n # Write the data to .npz files\n for ext, data in extensions.items():\n\n # Some are None\n if data is not None:\n\n # Check data size in MB\n datasize = data.nbytes\n\n # Get number of chunks\n nchunks = np.ceil(datasize / 1000000 / MB_limit).astype(int)\n\n # Break up into chunks\n chunks = np.array_split(data, nchunks + 2)\n\n # Save as .npz files\n for n, chunk in enumerate(chunks):\n\n # Determine filename\n chunkname = filename + '.{}.{}.npy'.format(ext, n)\n\n # Save the chunk to file\n filepath = os.path.join(destination, chunkname)\n np.save(filepath, chunk)\n\n # Add to list of filenames\n filelist.append(filepath)\n\n return filelist", "def _decode_35708(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29974:\n start_byte += n_bytes\n n_bytes = 4\n var_size = struct.unpack('<I', data[start_byte:\n start_byte + n_bytes])[0]\n start_byte += n_bytes\n n_bytes = var_size\n\n return np.frombuffer(data[start_byte:start_byte + n_bytes],\n dtype=np.float64)", "def load_png16(fname):\n with Image.open(fname) as img:\n if hasattr(img, 'text') and 'min' in img.text and 'max' in img.text:\n vmin = float(img.text['min'])\n vmax = float(img.text['max'])\n arr = np.array(img).astype(np.float64) * \\\n ((vmax-vmin)/(2**16-1)) + vmin\n else:\n arr = np.array(img)\n return arr", "def hdr_to_Nifti(files):\r\n array = []\r\n for element in files:\r\n array = np.append(array, nib.load(element))\r\n\r\n print('array size: ', array.shape, '\\narray type: ', type(array))\r\n\r\n return array", "def decode_file(self, filename):\n num_bytes = os.stat(filename)[6]\n data = array.array('B')\n\n with open(filename, 'rb') as f:\n data.fromfile(f, num_bytes)\n\n return self.decode_data(data)", "def readFastaFile(filename):" ]
[ "0.7049365", "0.6879431", "0.6836875", "0.65290755", "0.6360394", "0.621499", "0.6207839", "0.61189526", "0.60546416", "0.60031", "0.59370583", "0.5934907", "0.5933406", "0.5932318", "0.5895518", "0.5847376", "0.5819832", "0.5810755", "0.5799669", "0.5764724", "0.5755548", "0.569874", "0.5678875", "0.56133074", "0.56113255", "0.5560231", "0.5542144", "0.5541702", "0.5538175", "0.55341166", "0.5522006", "0.5507621", "0.55063504", "0.5490913", "0.5484261", "0.54663026", "0.5451572", "0.54447913", "0.54371166", "0.54371166", "0.54371166", "0.54371166", "0.54371166", "0.54371166", "0.5434744", "0.5431787", "0.54275763", "0.5416085", "0.5416085", "0.54147065", "0.5404259", "0.53980505", "0.5388207", "0.53771317", "0.53713477", "0.5366661", "0.5354433", "0.5353921", "0.5353812", "0.5347286", "0.5338803", "0.5337828", "0.53327847", "0.5324148", "0.5324148", "0.53231823", "0.5321794", "0.5314063", "0.53114253", "0.5304376", "0.5296398", "0.5279423", "0.5262427", "0.5248592", "0.52411324", "0.52357846", "0.5235574", "0.5213277", "0.5202782", "0.5202154", "0.51993334", "0.5195887", "0.51849383", "0.51736206", "0.5162965", "0.5159531", "0.51515347", "0.5144418", "0.5136737", "0.51257324", "0.51097506", "0.5101835", "0.51016515", "0.50974625", "0.50841117", "0.50802714", "0.5075294", "0.50652605", "0.5064522", "0.5063101" ]
0.66510236
3
precompiled function to efficiently covnert from 12bit packed video to 16bit video it splits 3 bytes into two 16 bit words data_chunk is a contigous 1D array of uint8 data, e.g. the 12bit video loaded as 8bit array
def nb_read_uint12(data_chunk): #ensure that the data_chunk has the right length assert np.mod(data_chunk.shape[0],3)==0 out = np.empty(data_chunk.size//3*2, dtype=np.uint16) for i in nb.prange(data_chunk.shape[0]//3): fst_uint8=np.uint16(data_chunk[i*3]) mid_uint8=np.uint16(data_chunk[i*3+1]) lst_uint8=np.uint16(data_chunk[i*3+2]) out[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4) out[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8 return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_uint12_video(data, shape):\n data = np.memmap(data, dtype=np.uint8, mode=\"r\")\n fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8\n return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), shape)", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\tprint(data_chunk.shape)\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out", "def raw_to_tif(file, channel=None ):\n \n def read_uint12(data_chunk):\n data = np.frombuffer(data_chunk, dtype=np.uint8)\n fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n # fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n # snd_uint12 = (lst_uint8 << 4) + (np.bitwise_and(15, mid_uint8))\n fst_uint12 = (fst_uint8 << 4) + (np.bitwise_and(15, mid_uint8))\n snd_uint12 = (lst_uint8 << 4) + (mid_uint8 >> 4)\n return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n\n# def read_uint12(data_chunk):\n# data = np.frombuffer(data_chunk, dtype=np.uint8)\n# fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n# fst_uint12 = (fst_uint8 << 4) + (mid_uint8 >> 4)\n# snd_uint12 = ((mid_uint8 % 16) << 8) + lst_uint8\n# return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n\n# def read_uint12(data_chunk):\n# data = np.frombuffer(data_chunk, dtype=np.uint8)\n# fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T\n# fst_uint12 = ((mid_uint8 & 0x0F) << 8) | fst_uint8\n# snd_uint12 = (lst_uint8 << 4) | ((mid_uint8 & 0xF0) >> 4)\n# return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])\n \n # infile = 'd:\\\\Projekti\\\\Satelit\\\\CO\\\\Razpis\\\\Flat field images_new2020\\\\flatfield\\\\NHDBflat_1D'\n # infile = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Jure_naloga_banje_raw_pyt\\\\NHDRGoreMorje_3D'\n\n # in_path = 'p:\\\\NEMO\\Posnetki\\\\20201014_GoreMorje_data\\cele\\\\'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_image_files = [filename for filename in os.listdir(in_path) if filename.lower().startswith(\"nhd\") and filename.lower().endswith(\"d\")]\n\n \n # infile = in_path + in_image_files[i]\n with open(file, 'rb', buffering=10) as f: # problem pri branju podatkov?\n byte = f.read()\n print(file)\n # # ar = open(infile, 'rb')\n # buffer = BytesIO()\n # byte = BytesIO(ar)\n \n img = read_uint12(byte)\n print(img)\n \n if channel==\"P\":\n img = img.reshape((2748, 3664)) # PAN\n else:\n img = img.reshape((2050, 2448)) # MS\n # img = img.reshape((2748, 3664)) # PAN\n\n size = img.shape\n \n \n out = file[:-4]+ \"_py.tif\"\n\n driver = gdal.GetDriverByName('GTiff')\n\n outRaster = driver.Create(out, size[1], size[0], 1, gdal.GDT_UInt16)\n\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(img)\n outband.FlushCache()", "def unpack_mraw_frame_12bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*12/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*12/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::3]\n bytes_2 = int_array[1::3] \n bytes_3 = int_array[2::3]\n\n \n # Here 2 pixels from the image are shared between three bytes of data like\n #\n # | byte 1 | byte 2 | byte 3 |\n # |o o o o o o o o|o o o o | o o o o|o o o o o o o o|\n # | Pixel 1 | Pixel 2 |\n #\n # byte 2 is shared between pixel and we need only the right-most bits for pixel 2 and\n # only the left most bits for pixel 1. \n \n # right-most bits of byte 2 = Most significant bits of Pixel 2\n # left-most bits of byte 2 = Least significant bits of Pixel 1\n \n pix_1 = np.array(16.0*bytes_1 + np.right_shift(bytes_2,4),dtype=np.uint16)\n pix_2 = np.array(256.0*np.bitwise_and(bytes_2,0b1111) + bytes_3,dtype=np.uint16)\n \n try:\n image = (np.dstack([pix_1,pix_2])).reshape((1,n_pixels))[0]\n except:\n image = np.zeros(n_pixels)\n return image", "def _transpose_by_2_vnchwconv(tik_inst, dst, src, sub_hw_size):\n\n # whether the sub_h_size is block align or not should be decided before transferring in\n sub_h_size, sub_w_size = sub_hw_size\n data_size_one_block = _get_elment_cnt_one_block(src.dtype)\n w_block_cnt = _ceil_div(sub_w_size, data_size_one_block)\n fp16_src = src.reinterpret_cast_to(\"float16\")\n fp16_dst = dst.reinterpret_cast_to(\"float16\")\n fp16_data_one_block = _get_elment_cnt_one_block(\"float16\")\n # vnchwconv get two bytes per time\n if src.dtype.lower() in (\"float32\", \"int32\", \"uint32\"):\n vnc_one_line_len = w_block_cnt * data_size_one_block * sub_h_size * 2\n elif src.dtype.lower() in (\"float16\", \"int16\", \"uint16\"):\n vnc_one_line_len = w_block_cnt * data_size_one_block * sub_h_size\n else:\n error_detail = \"not support the dtype\"\n error_manager_vector.raise_err_two_input_dtype_invalid(\"transpose_d\", \"in_dtype\",\n \"dst_dtype\", error_detail)\n\n # do 16hc to hc16 transfer\n src_addr_list = [fp16_src[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(1)\n dst_stride.set_as(16)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)\n\n # do hc16 to ch16 transfer\n with tik_inst.if_scope(sub_h_size > sub_w_size):\n with tik_inst.for_range(0, sub_w_size) as w_size_idx:\n tik_inst.data_move(\n fp16_src[w_size_idx * sub_h_size * fp16_data_one_block * 2],\n fp16_dst[w_size_idx * fp16_data_one_block * 2],\n 0, sub_h_size, 2, (w_block_cnt * data_size_one_block - 1) * 2, 0)\n with tik_inst.else_scope():\n with tik_inst.for_range(0, sub_h_size) as h_size_idx:\n tik_inst.data_move(\n fp16_src[h_size_idx * fp16_data_one_block * 2],\n fp16_dst[h_size_idx * w_block_cnt * data_size_one_block * fp16_data_one_block * 2],\n 0, sub_w_size, 2, 0, (sub_h_size - 1) * 2)\n\n # do ch16 to 16ch transfer\n src_addr_list = [fp16_src[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(16)\n dst_stride.set_as(1)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)", "def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data", "def _transpose_by_2_vnchwconv_not_last_dim(tik_inst, dst, src, sub_dim_size):\n\n # whether the sub_h_size is block align or not should be decided before transferring in\n sub_axis_1, sub_axis_0, axis_2 = sub_dim_size\n data_size_one_block = _get_elment_cnt_one_block(src.dtype)\n axis_2_block_cnt = _ceil_div(axis_2, data_size_one_block)\n fp16_src = src.reinterpret_cast_to(\"float16\")\n fp16_dst = dst.reinterpret_cast_to(\"float16\")\n fp16_data_one_block = _get_elment_cnt_one_block(\"float16\")\n # vnchwconv get two bytes per time\n if src.dtype.lower() in (\"float32\", \"int32\", \"uint32\"):\n vnc_one_line_len = axis_2_block_cnt * data_size_one_block * sub_axis_1 * sub_axis_0 * 2\n elif src.dtype.lower() in (\"float16\", \"int16\", \"uint16\"):\n vnc_one_line_len = axis_2_block_cnt * data_size_one_block * sub_axis_1 * sub_axis_0\n else:\n error_detail = \"not support the dtype\"\n error_manager_vector.raise_err_two_input_dtype_invalid(\"transpose_d\", \"in_dtype\",\n \"dst_dtype\", error_detail)\n\n # do 16hc to hc16 transfer\n src_addr_list = [fp16_src[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(1)\n dst_stride.set_as(16)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)\n\n # do sub_axis_1*sub_axis_0*16 to sub_axis_1*sub_axis_0*axis_2 transfer\n with tik_inst.for_range(0, sub_axis_1) as sub_axis_1_idx:\n tik_inst.data_move(\n fp16_src[sub_axis_1_idx * sub_axis_0 * axis_2 * fp16_data_one_block * 2],\n fp16_dst[sub_axis_1_idx * sub_axis_0 * fp16_data_one_block * fp16_data_one_block],\n 0, sub_axis_0, 2 * axis_2, fp16_data_one_block - 2 * axis_2, 0)\n\n # do ch16 to 16ch transfer\n src_addr_list = [fp16_src[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(16)\n dst_stride.set_as(1)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)", "def decode_16_bit_2ch(data):\n\n d = array.array('h', data)\n left = d[0::2]\n right = d[1::2]\n return left, right", "def _read_uint12_video_prec(data, shape):\n data = np.memmap(data, dtype=np.uint8, mode=\"r\")\n return nb_read_uint12(data).reshape(shape)", "def _process_data(data: np.ndarray) -> np.ndarray:\r\n result: np.ndarray = np.empty(shape=(0, 0))\r\n i = 0\r\n while i < (len(data) - 1):\r\n # Found beginning of frame\r\n if data[i] > 127:\r\n # Extract one sample from 2 bytes\r\n intout = (np.bitwise_and(data[i], 127)) * 128\r\n i += 1\r\n intout = intout + data[i]\r\n result = np.append(result, intout)\r\n i += 1\r\n return result", "def preprocess(x):\n if x.shape[-1] < 16000 * 8:\n raise ValueError(\n \"Cannot preprocess tensor less than 8 seconds in duration.\"\n )\n vad = VadChunk(*get_vad(\"both\"))\n return vad(x)", "def split_audio_into_chunks(sampling_rate, amplitude_vector, chunk_size):\n \n col_size = int(chunk_size / ((1 / sampling_rate) * 1000))\n whole = int(len(amplitude_vector) / col_size)\n first_partition_index = whole*col_size\n first_partition = amplitude_vector[:first_partition_index]\n second_partition = amplitude_vector[first_partition_index:]\n return first_partition.reshape((whole, col_size)), second_partition", "def decompressDeltas19Bit(buffer): \n if len(buffer) != 19:\n raise ValueError(\"Input should be 19 bytes long.\")\n \n receivedDeltas = [[0, 0, 0, 0], [0, 0, 0, 0]]\n\n # Sample 1 - Channel 1\n miniBuf = [\n (buffer[0] >> 5),\n ((buffer[0] & 0x1F) << 3 & 0xFF) | (buffer[1] >> 5),\n ((buffer[1] & 0x1F) << 3 & 0xFF) | (buffer[2] >> 5)]\n\n receivedDeltas[0][0] = conv19bitToInt32(miniBuf)\n\n # Sample 1 - Channel 2\n miniBuf = [\n (buffer[2] & 0x1F) >> 2,\n (buffer[2] << 6 & 0xFF) | (buffer[3] >> 2),\n (buffer[3] << 6 & 0xFF) | (buffer[4] >> 2)\n ]\n receivedDeltas[0][1] = conv19bitToInt32(miniBuf)\n\n # Sample 1 - Channel 3\n miniBuf = [\n ((buffer[4] & 0x03) << 1 & 0xFF) | (buffer[5] >> 7),\n ((buffer[5] & 0x7F) << 1 & 0xFF) | (buffer[6] >> 7),\n ((buffer[6] & 0x7F) << 1 & 0xFF) | (buffer[7] >> 7)]\n receivedDeltas[0][2] = conv19bitToInt32(miniBuf)\n\n # Sample 1 - Channel 4\n miniBuf = [\n ((buffer[7] & 0x7F) >> 4),\n ((buffer[7] & 0x0F) << 4 & 0xFF) | (buffer[8] >> 4),\n ((buffer[8] & 0x0F) << 4 & 0xFF) | (buffer[9] >> 4)]\n receivedDeltas[0][3] = conv19bitToInt32(miniBuf)\n\n # Sample 2 - Channel 1\n miniBuf = [\n ((buffer[9] & 0x0F) >> 1),\n (buffer[9] << 7 & 0xFF) | (buffer[10] >> 1),\n (buffer[10] << 7 & 0xFF) | (buffer[11] >> 1)]\n receivedDeltas[1][0] = conv19bitToInt32(miniBuf)\n\n # Sample 2 - Channel 2\n miniBuf = [\n ((buffer[11] & 0x01) << 2 & 0xFF) | (buffer[12] >> 6),\n (buffer[12] << 2 & 0xFF) | (buffer[13] >> 6),\n (buffer[13] << 2 & 0xFF) | (buffer[14] >> 6)]\n receivedDeltas[1][1] = conv19bitToInt32(miniBuf)\n\n # Sample 2 - Channel 3\n miniBuf = [\n ((buffer[14] & 0x38) >> 3),\n ((buffer[14] & 0x07) << 5 & 0xFF) | ((buffer[15] & 0xF8) >> 3),\n ((buffer[15] & 0x07) << 5 & 0xFF) | ((buffer[16] & 0xF8) >> 3)]\n receivedDeltas[1][2] = conv19bitToInt32(miniBuf)\n\n # Sample 2 - Channel 4\n miniBuf = [(buffer[16] & 0x07), buffer[17], buffer[18]]\n receivedDeltas[1][3] = conv19bitToInt32(miniBuf)\n\n return receivedDeltas;", "def decompressDeltas18Bit(buffer): \n if len(buffer) != 18:\n raise ValueError(\"Input should be 18 bytes long.\")\n\n receivedDeltas = [[0, 0, 0, 0],[0, 0, 0, 0]]\n\n # Sample 1 - Channel 1\n miniBuf = [\n (buffer[0] >> 6),\n ((buffer[0] & 0x3F) << 2 & 0xFF) | (buffer[1] >> 6),\n ((buffer[1] & 0x3F) << 2 & 0xFF) | (buffer[2] >> 6)]\n receivedDeltas[0][0] = conv18bitToInt32(miniBuf)\n\n # Sample 1 - Channel 2\n miniBuf = [\n (buffer[2] & 0x3F) >> 4,\n (buffer[2] << 4 & 0xFF) | (buffer[3] >> 4),\n (buffer[3] << 4 & 0xFF) | (buffer[4] >> 4)]\n receivedDeltas[0][1] = conv18bitToInt32(miniBuf)\n\n # Sample 1 - Channel 3\n miniBuf = [\n (buffer[4] & 0x0F) >> 2,\n (buffer[4] << 6 & 0xFF) | (buffer[5] >> 2),\n (buffer[5] << 6 & 0xFF) | (buffer[6] >> 2)]\n receivedDeltas[0][2] = conv18bitToInt32(miniBuf)\n\n # Sample 1 - Channel 4\n miniBuf = [\n (buffer[6] & 0x03),\n buffer[7],\n buffer[8]]\n receivedDeltas[0][3] = conv18bitToInt32(miniBuf)\n\n # Sample 2 - Channel 1\n miniBuf = [\n (buffer[9] >> 6),\n ((buffer[9] & 0x3F) << 2 & 0xFF) | (buffer[10] >> 6),\n ((buffer[10] & 0x3F) << 2 & 0xFF) | (buffer[11] >> 6)]\n receivedDeltas[1][0] = conv18bitToInt32(miniBuf)\n\n # Sample 2 - Channel 2\n miniBuf = [\n (buffer[11] & 0x3F) >> 4,\n (buffer[11] << 4 & 0xFF) | (buffer[12] >> 4),\n (buffer[12] << 4 & 0xFF) | (buffer[13] >> 4)]\n receivedDeltas[1][1] = conv18bitToInt32(miniBuf)\n\n # Sample 2 - Channel 3\n miniBuf = [\n (buffer[13] & 0x0F) >> 2,\n (buffer[13] << 6 & 0xFF) | (buffer[14] >> 2),\n (buffer[14] << 6 & 0xFF) | (buffer[15] >> 2)]\n receivedDeltas[1][2] = conv18bitToInt32(miniBuf)\n\n # Sample 2 - Channel 4\n miniBuf = [\n (buffer[15] & 0x03),\n buffer[16],\n buffer[17]]\n receivedDeltas[1][3] = conv18bitToInt32(miniBuf)\n\n return receivedDeltas", "def extract(self, audio_chunk: np.ndarray): \n pass", "def unpack_mraw_frame_10bit(file,n_pixels,start_frame=0):\n \n start_byte = start_frame*n_pixels*10/8\n file.seek(start_byte)\n image = []\n \n n_bytes = n_pixels*10/8\n \n int_array = np.fromfile(file,count=n_bytes,dtype=np.uint8)\n \n bytes_1 = int_array[::5]\n bytes_2 = int_array[1::5] \n bytes_3 = int_array[2::5]\n bytes_4 = int_array[3::5] \n bytes_5 = int_array[4::5]\n\n \n # Here 4 pixels from the image are shared between 5 bytes of data like\n #\n # | byte 1 | byte 2 | byte 3 | byte 4 | byte 5 |\n # |o o o o o o o o | o o | o o o o o o | o o o o | o o o o | o o o o o o | o o | o o o o o o o o|\n # | Pixel 1 | Pixel 2 | Pixel 3 | Pixel 4 |\n #\n # byte 2 is shared between pixel and we need only the right-most bits for pixel 2 and\n # only the left most bits for pixel 1. \n \n # right-most bits of byte 2 = Most significant bits of Pixel 2\n # left-most bits of byte 2 = Least significant bits of Pixel 1\n \n pix_1 = np.array(4.0*bytes_1 + np.right_shift(bytes_2,6),dtype=np.uint16)\n pix_2 = np.array(16.0*np.bitwise_and(bytes_2,0b111111) + np.right_shift(bytes_3,4),dtype=np.uint16)\n pix_3 = np.array(64.0*np.bitwise_and(bytes_3,0b1111) + np.right_shift(bytes_4,2),dtype=np.uint16)\n pix_4 = np.array(256.0*np.bitwise_and(bytes_4,0b11) + bytes_5,dtype=np.uint16)\n #try:\n image = (np.dstack([pix_1,pix_2,pix_3,pix_4])).reshape((1,n_pixels))[0]\n #except:\n # image = np.zeros(n_pixels)\n return image", "def split_video_random(file_path, start_pos, split_length, out_path):\n s_cmd = \" -i '%s'\"%(file_path) #use default CODEC\n try:\n\tfileext = file_path.split(\".\")[-1]\n except IndexError as e:\n\traise IndexError(\"No ext. in filename. Error: \" + str(e))\n\n split_start = start_pos\n split_length = split_length\n head, tail = os.path.split(file_path)\n name, ext = tail.split('.')\n filebase=name+'_'+str(start_pos)+'-'+str(split_length)\n\n dstfilebase = out_path + '/' + filebase # create output file base\n\n #split_str = \"\"\n #split_str += \" -ss \" + str(split_start) + \" -t \" + str(split_length) + \" '\"+ dstfilebase + \".\" + fileext + \"'\"\n\n s_str = \"\"\t\n #s_str += \"ffmpeg\"+\" -ss \"+str(split_start)+\" -t \"+str(split_length) + s_cmd + \" '\"+dstfilebase + \".\" + fileext + \"'\"\n s_str += \"ffmpeg\" + \" -ss \" + str(split_start) + s_cmd + \" -t \" + str(split_length) + \" '\"+ dstfilebase + \".\" + fileext + \"'\"\n print(\"########################################################\")\n #print \"About to run: \"+split_cmd+split_str\n print(\"About to run: \"+s_str)\n print(\"########################################################\")\n #output = subprocess.Popen(split_cmd+split_str, shell = True, stdout = subprocess.PIPE).stdout.read()\n output = subprocess.Popen(s_str, shell=True, stdout=subprocess.PIPE).stdout.read()", "def readH264(path,flag='mask'):\n # known size of the images\n rows = 128\n cols = 128\n\n # read in raw bytes as a 1D array\n arr = np.fromfile(path,dtype='uint16')\n\n if flag=='mask':\n ## update values based on code\n # get code\n code_array = np.bitwise_and(arr,0xF000)\n # CODE_VAL_SEUIL2\n arr[code_array==0xD000] = 0xF800\n # CODE_VAL_CONTOUR\n arr[code_array==0xB000] = 0xF81F\n # CODE_VAL_MAX\n arr[code_array==0xC000] = 0x0000\n # CODE_VAL_SEUIL1\n arr[code_array==0xE000] = 0x001F\n\n ## just lower 12-bits\n arr = np.bitwise_and(arr,0x0FFF)\n\n ## convert data to frames\n # break the data into chunks that are 1d frames\n frames_set = np.split(arr,int(arr.shape[0]/(rows*cols)))\n # combined frames together into a 3d array and interpret as float16 data type\n return np.dstack([np.reshape(f,(rows,cols)) for f in frames_set]).astype('float16')", "def get_data_for_video(\n video_name,\n path_info='/sequoia/data2/gcheron/UCF101/detection/mytracksK5_600k',\n path_tracks='/sequoia/data2/gcheron/pytorch/diffrac_action_localization/UCF101/results/mytracksK5_600k/tracks/',\n sp_iou_thresh=0.3,\n groupby=8,\n feat_type='RGB+OPF',\n dim_feat=832,\n n_actions=25):\n\n info_video = pickle.load(\n open(os.path.join(path_info, video_name + '.pkl')))\n gt = info_video['gts']\n\n if not gt:\n return False, 0, 0\n\n labels = np.array([x['label'] - 1 for x in gt])\n\n split_feat = feat_type.split('+')\n n_feat_type = len(split_feat)\n\n # Read the tracks.\n list_track_file = sorted(\n glob.glob(os.path.join(path_tracks, video_name, '*pkl')))\n\n if len(list_track_file) == 0:\n return False, 0, 0\n\n feats = []\n gts = []\n for id_track, track_name in enumerate(list_track_file):\n assert re.match('.*/([^/]*)', track_name).group(1) == 'track{:05d}.pkl'.format(id_track + 1)\n track_data = pickle.load(open(track_name, 'rb'))\n n_chunks = (track_data['N_frames'] - 1) / groupby + 1\n start_frame = track_data['tbound'][0] - 1\n\n x = np.zeros([n_chunks, dim_feat * n_feat_type])\n z = np.zeros([n_chunks, n_actions])\n\n for t in range(n_chunks):\n start_chunk = t * groupby\n end_chunk = min(start_chunk + groupby, track_data['N_frames'])\n\n # Deduce the GT for that chunk.\n gt_chunk = np.min(\n info_video['tracks'][id_track]['gt_iou'][start_chunk:end_chunk, :], axis=0)\n id_max = np.argmax(gt_chunk)\n\n # Add the GT to z.\n if sp_iou_thresh > 0:\n # Create 0/1 labels.\n if gt_chunk[id_max] > sp_iou_thresh:\n z[t, labels[id_max]] = 1\n else:\n z[t, -1] = 1\n else:\n # Regress the IOU instead of creating 0/1 labels.\n if gt_chunk[id_max] > -sp_iou_thresh:\n # If higher than threshold, put iou as regressed value.\n z[t, labels[id_max]] = gt_chunk[id_max]\n else:\n # If lower than threshold, put 1-iou as regressed value for background.\n z[t, -1] = 1 - gt_chunk[id_max]\n\n # Get the feature for that chunk.\n for f_id, feat_type in enumerate(split_feat):\n feat_chunk = np.zeros(dim_feat)\n for frame in range(start_chunk, end_chunk):\n # Get the original chunk id from feature computation (not compatible with Python 3).\n id_feat = (frame + start_frame) / 4 - start_frame / 4\n # Get the feature and divide by the total number of frames in my track chunk.\n feat_chunk += track_data[feat_type][id_feat, :] / (\n end_chunk - start_chunk)\n # Fill x with the average feature inside the chunk.\n x[t, f_id * dim_feat:(f_id + 1) * dim_feat] = feat_chunk\n\n feats.append(x)\n gts.append(z)\n\n feats = np.concatenate(feats)\n gts = np.concatenate(gts)\n return True, feats, gts", "def process_m3u8(m3u8_doc, stream_type='video'):\r\n\r\n url = d.eff_url if stream_type == 'video' else d.audio_url\r\n\r\n media_playlist = MediaPlaylist(d, url, m3u8_doc, stream_type)\r\n\r\n segments = media_playlist.create_segment_list()\r\n d.segments += segments\r\n\r\n # write m3u8 file with absolute paths for debugging\r\n name = 'remote_video2.m3u8' if stream_type == 'video' else 'remote_audio2.m3u8'\r\n file_path = os.path.join(d.temp_folder, name)\r\n with open(os.path.join(d.temp_folder, file_path), 'w') as f:\r\n f.write(media_playlist.create_remote_m3u8_doc())\r\n\r\n # write local m3u8 file\r\n name = 'local_video.m3u8' if stream_type == 'video' else 'local_audio.m3u8'\r\n file_path = os.path.join(d.temp_folder, name)\r\n with open(os.path.join(d.temp_folder, file_path), 'w') as f:\r\n f.write(media_playlist.create_local_m3u8_doc())", "def process_video(data_info, name, mode, is_training=True):\r\n data = Action_Dataset(name, mode, [data_info])\r\n if is_training:\r\n clip_seq, label_seq = data.next_batch(1, _CLIP_SIZE)\r\n else:\r\n clip_seq, label_seq = data.next_batch(\r\n 1, _EACH_VIDEO_TEST_SIZE+1, shuffle=False, data_augment=False)\r\n clip_seq = 2*(clip_seq/255) - 1\r\n clip_seq = np.array(clip_seq, dtype='float32')\r\n return clip_seq, label_seq", "def _decode_35708(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29974:\n start_byte += n_bytes\n n_bytes = 4\n var_size = struct.unpack('<I', data[start_byte:\n start_byte + n_bytes])[0]\n start_byte += n_bytes\n n_bytes = var_size\n\n return np.frombuffer(data[start_byte:start_byte + n_bytes],\n dtype=np.float64)", "def split_into_frames(filename_raw, thr_var_per_event=5e-4, downsampling_factor=2, disable_display=False,\n filename_output_video=None):\n\n assert downsampling_factor == int(downsampling_factor), \"Error: downsampling_factor must be an integer\"\n assert downsampling_factor >= 0, \"Error: downsampling_factor must be >= 0\"\n\n mv_adaptive_rate_iterator = AdaptiveRateEventsIterator(input_path=filename_raw,\n thr_var_per_event=thr_var_per_event,\n downsampling_factor=downsampling_factor)\n\n height, width = mv_adaptive_rate_iterator.get_size()\n\n if filename_output_video == None:\n video_process = None\n else:\n assert not os.path.exists(filename_output_video)\n video_process = FFmpegWriter(filename_output_video)\n\n if video_process or not disable_display:\n img_bgr = np.zeros((height, width, 3), dtype=np.uint8)\n\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n\n for events in mv_adaptive_rate_iterator:\n assert events.size > 0\n start_ts = events[0][\"t\"]\n end_ts = events[-1][\"t\"]\n print(\"frame: {} -> {} delta_t: {} fps: {} nb_ev: {}\".format(start_ts, end_ts,\n end_ts - start_ts,\n 1e6 / (end_ts - start_ts),\n events.size))\n if video_process or not disable_display:\n img = events_to_diff_image(events, sensor_size=(height, width))\n img_bgr[...] = 0\n img_bgr[img < 0, 0] = 255\n img_bgr[img > 0, 1] = 255\n\n chunk_start_ts = events[0][\"t\"]\n chunk_end_ts = events[-1][\"t\"]\n delta_t_frame = chunk_end_ts - chunk_start_ts + 1\n frame_txt = \"ts: {} -> {} delta_t: {} fps: {} (nb_ev): {}\".format(chunk_start_ts, chunk_end_ts,\n delta_t_frame,\n int(1.e6/delta_t_frame),\n events.size)\n img_bgr[20:45, ...] = 0\n cv2.putText(img_bgr,\n frame_txt,\n (int(0.05 * width), 40),\n cv2.FONT_HERSHEY_PLAIN, 1.0, (200, 200, 100))\n\n if video_process:\n video_process.writeFrame(img_bgr.astype(np.uint8)[..., ::-1])\n if not disable_display:\n cv2.imshow(\"img\", img_bgr)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n if video_process:\n video_process.close()\n if not disable_display:\n cv2.destroyAllWindows()", "def decode_faceshift_datastream(self, data):\n \n #block_id = struct.unpack_from('H', data)\n #print(\"Received block id \" + str(block_id)) ;\n\n offset = 0\n block_id, version, block_size = struct.unpack_from('HHI', data, offset)\n \n #print(\"ID, v, size = \" + str(block_id) + \",\" + str(version) + \",\" + str(block_size) )\n \n offset += 8\n\n if(block_id == BLOCK_ID_TRACKING_STATE):\n n_blocks, = struct.unpack_from('H', data, offset)\n #print(\"n_blocks = \" + str(n_blocks))\n offset += 2\n\n track_ok = 0 # Will be a byte: 1 if tracking ok, 0 otherwise.\n head_rotation_quat = None # Will be filled with the rotation using mathutils.Quaternion\n blend_shape_values = [] # Will be a list of float in the range 0-1\n #eyes_values = None # Will be a sequence of 4 angle values\n markers_position = [] # Will be a list of mathutils.Vector\n \n curr_block = 0\n while(curr_block < n_blocks):\n block_id, version, block_size = struct.unpack_from('HHI', data, offset)\n #print(\"ID, v, size = \" + str(block_id) + \",\" + str(version) + \",\" + str(block_size) )\n \n # put the offset at the beginning of the block\n offset += 8\n \n if(block_id == 101): # Frame Information blobk (timestamp and tracking status)\n ts, track_ok = struct.unpack_from('dB', data, offset)\n #print(\"timestamp, track_ok \" + str(ts) + \", \" + str(track_ok) )\n #offset += 9\n elif(block_id == 102): # Pose block (head rotation and position)\n x,y,z,w = struct.unpack_from('ffff', data, offset)\n #head_rotation_quat = mathutils.Quaternion((w,x,y,z))\n elif(block_id == 103): # Blendshapes block (blendshape values)\n n_coefficients, = struct.unpack_from('I', data, offset)\n #print(\"Blend shapes count=\"+ str(n_coefficients) )\n i = 0\n coeff_list = \"\"\n while(i < n_coefficients):\n # Offset of the block, plus the 4 bytes for int n_coefficients, plus 4 bytes per float\n val, = struct.unpack_from('f', data, offset + 4 + (i*4))\n blend_shape_values.append(val)\n coeff_list += repr(val) + \" \"\n i += 1\n print(\"Values: \" + coeff_list)\n elif(block_id == 104): # Eyes block (eyes gaze)\n leye_theta, leye_phi, reye_theta, reye_phi = struct.unpack_from('ffff', data, offset)\n elif(block_id == 105): # Markers block (absolute position of mark points)\n n_markers, = struct.unpack_from('H', data, offset)\n #print(\"n markers=\"+str(n_markers))\n i = 0\n while(i < n_markers):\n # Offset of the block, plus the 2 bytes for int n_markers, plus 4 bytes for each x,y,z floats\n x, y, z = struct.unpack_from('fff', data, offset + 2 + (i*4*3))\n #print(\"m\" + str(i) + \" \" + str(x) + \"\\t\" + str(y) + \"\\t\" + str(z))\n markers_position.append(mathutils.Vector((x,y,z)))\n i += 1\n \n curr_block += 1\n offset += block_size\n \n msg = fsMsgTrackingState()\n\n msg.m_timestamp = ts\n\n self.pub.publish(msg)\n\n # end -- while on blocks. Track State scan complete", "def cast_to(ibuilder, data_amounts, src_buf, dst_buf):\n src_dtype = src_buf.dtype\n dst_dtype = dst_buf.dtype\n if src_dtype == \"float16\" and dst_dtype == \"int32\":\n vconv_instr = \"vconv_f162s32f\"\n vconv_compute_num = VEC_NUMS_HALF\n elif src_dtype == \"float32\" and dst_dtype == \"float16\":\n vconv_instr = \"vconv_f322f16\"\n vconv_compute_num = VEC_NUMS_HALF\n elif src_dtype == \"float32\" and dst_dtype == \"int32\":\n vconv_instr = \"vconv_f322s32f\"\n vconv_compute_num = VEC_NUMS_HALF\n # vconv_s322f32 only support cloud_v100\n elif src_dtype == \"int32\" and dst_dtype == \"float32\":\n vconv_instr = \"vconv_s322f32\"\n vconv_compute_num = VEC_NUMS_HALF\n elif src_dtype == \"int8\" and dst_dtype == \"float16\":\n vconv_instr = \"vconv_s82f16\"\n vconv_compute_num = VEC_NUMS\n elif src_dtype == \"uint8\" and dst_dtype == \"float16\":\n vconv_instr = \"vconv_u82f16\"\n vconv_compute_num = VEC_NUMS\n elif src_dtype == \"float16\" and dst_dtype == \"float32\":\n vconv_instr = \"vconv_f162f32\"\n vconv_compute_num = VEC_NUMS_HALF\n elif src_dtype == \"float16\" and dst_dtype == \"int8\":\n vconv_instr = \"vconv_f162s8f\"\n vconv_compute_num = VEC_NUMS\n elif src_dtype == \"float16\" and dst_dtype == \"uint8\":\n vconv_instr = \"vconv_f162u8f\"\n vconv_compute_num = VEC_NUMS\n\n def compute_stride(src_type, dst_type, vconv_num):\n \"\"\"\n Calculated stride value\n \"\"\"\n perblock_nums_a = compute_perblock_nums(src_type)\n perblock_nums_b = compute_perblock_nums(dst_type)\n src_stride = vconv_num // perblock_nums_a\n dst_stride = vconv_num // perblock_nums_b\n\n return src_stride, dst_stride\n\n src_strides, dst_strides = compute_stride(src_dtype, dst_dtype, vconv_compute_num)\n\n # recheck vconv_instr support\n if not tbe_platform.cce_conf.intrinsic_check_support(\"Intrinsic_vconv\", \\\n vconv_instr.split('_')[1]):\n raise RuntimeError(\"This product don't support Intrinsic_vconv \" + \\\n vconv_instr)\n\n repeats = int(data_amounts // vconv_compute_num)\n remain = int(data_amounts % vconv_compute_num)\n init_times = int(repeats // UINT8_MAX)\n init_remain = int(repeats % UINT8_MAX)\n with ibuilder.if_scope(repeats != 0):\n if init_times != 0:\n with ibuilder.for_range(0, init_times) as rch:\n with ibuilder.new_scope():\n reset_mask_insn(\n ibuilder, dst_buf.dtype, bits=vconv_compute_num)\n ibuilder.emit(tvm.call_extern(dst_buf.dtype, vconv_instr, \\\n dst_buf.access_ptr('w', offset=rch * UINT8_MAX\n * vconv_compute_num), \\\n src_buf.access_ptr('r', offset=rch * UINT8_MAX\n * vconv_compute_num), \\\n 255, 1, 1, dst_strides, src_strides))\n if init_remain != 0:\n with ibuilder.new_scope():\n reset_mask_insn(ibuilder, dst_buf.dtype, bits=vconv_compute_num)\n ibuilder.emit(tvm.call_extern(dst_buf.dtype, vconv_instr, \\\n dst_buf.access_ptr('w', offset=init_times * UINT8_MAX\n * vconv_compute_num), \\\n src_buf.access_ptr('r', offset=init_times * UINT8_MAX\n * vconv_compute_num), \\\n init_remain, 1, 1, dst_strides, src_strides))\n\n with ibuilder.if_scope(remain != 0):\n with ibuilder.new_scope():\n mask_len = remain\n reset_mask_insn(ibuilder, dst_buf.dtype, bits=mask_len)\n ibuilder.emit(tvm.call_extern(dst_buf.dtype, vconv_instr, \\\n dst_buf.access_ptr('w', offset=repeats\n * vconv_compute_num), \\\n src_buf.access_ptr('r', offset=repeats\n * vconv_compute_num), \\\n 1, 1, 1, 0, 0))", "def apply_fourier_transform(chunked_audio):\n pass", "def extract_data(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(28 * 28 * 10000 * 1)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (255 / 2.0)) / 255\n data = data.reshape(10000, 28, 28, 1)\n return data", "def compute_chunk_features(mp3_file):\n # On convertit le fichier mp3 en un fichier wav mono, 1avec un sample rate de 10000Hertz: on utilise\n # On utilise l'application sox \"c:/Program Files (x86)/sox-14.4.0/sox\"\n\n sox_command = \"./sox-14.4.0/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def compute_chunk_features(mp3_file):\n # Extract MP3 file to a mono, 10kHz WAV file\n sox_command = \"/usr/local/bin/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def crc16_array_prep( width, data):\n if width != 4:\n raise Exception(\"Error, crc16_array_prep called but data width is not 4-bits on the SDIO interface\")\n\n def get_bits_and_shift(byte,bit1,bit0,shift):\n \"\"\"\n Return {byte[bit1],byte[bit0]} << shift\n \"\"\"\n bit1 = ((byte & (1 << bit1)) >> bit1) << 1\n bit0 = ((byte & (1 << bit0)) >> bit0)\n return (bit1 | bit0) << shift\n\n D0=[]\n D1=[]\n D2=[]\n D3=[]\n B0=0\n B1=0\n B2=0\n B3=0\n for byte_num in range(0,len(data)):\n if (byte_num % 4 == 0) and byte_num > 0:\n # Append the bytes and reset them\n D0.append(B0)\n D1.append(B1)\n D2.append(B2)\n D3.append(B3)\n B0=0\n B1=0\n B2=0\n B3=0\n # Get the next byte \n B = data[byte_num]\n if byte_num % 4 == 0:\n B0 |= get_bits_and_shift(B,4,0,6)\n B1 |= get_bits_and_shift(B,5,1,6)\n B2 |= get_bits_and_shift(B,6,2,6)\n B3 |= get_bits_and_shift(B,7,3,6)\n if byte_num % 4 == 1:\n B0 |= get_bits_and_shift(B,4,0,4)\n B1 |= get_bits_and_shift(B,5,1,4)\n B2 |= get_bits_and_shift(B,6,2,4)\n B3 |= get_bits_and_shift(B,7,3,4)\n if byte_num % 4 == 2:\n B0 |= get_bits_and_shift(B,4,0,2)\n B1 |= get_bits_and_shift(B,5,1,2)\n B2 |= get_bits_and_shift(B,6,2,2)\n B3 |= get_bits_and_shift(B,7,3,2)\n if byte_num % 4 == 3:\n B0 |= get_bits_and_shift(B,4,0,0)\n B1 |= get_bits_and_shift(B,5,1,0)\n B2 |= get_bits_and_shift(B,6,2,0)\n B3 |= get_bits_and_shift(B,7,3,0)\n # Always append what is left over\n D0.append(B0)\n D1.append(B1)\n D2.append(B2)\n D3.append(B3)\n return (D0,D1,D2,D3)", "def pack_unpack_hard():\n # Array is apprx. 1.5 GB large\n # should make apprx 1536 chunks\n pack_unpack(100, chunk_size=reverse_pretty('1M'), progress=simple_progress)", "def preprocess_sample(file, params):\n\n videoFile = file + \".mp4\"\n audioFile = file + \".wav\"\n roiFile = file + \".png\"\n visualFeaturesFile = file + \".npy\"\n\n roiSize = params[\"roiSize\"]\n normMean = params[\"normMean\"]\n normStd = params[\"normStd\"]\n vf = params[\"vf\"]\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n #Extract the audio from the video file using the FFmpeg utility and save it to a wav file.\n v2aCommand = \"ffmpeg -y -v quiet -i \" + videoFile + \" -ac 1 -ar 16000 -vn \" + audioFile\n os.system(v2aCommand)\n\n\n #for each frame, resize to 224x224 and crop the central 112x112 region\n captureObj = cv.VideoCapture(videoFile)\n roiSequence = list()\n while (captureObj.isOpened()):\n ret, frame = captureObj.read()\n if ret == True:\n grayed = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n grayed = grayed/255\n grayed = cv.resize(grayed, (224,224))\n roi = grayed[int(112-(roiSize/2)):int(112+(roiSize/2)), int(112-(roiSize/2)):int(112+(roiSize/2))]\n roiSequence.append(roi)\n else:\n break\n captureObj.release()\n cv.imwrite(roiFile, np.floor(255*np.concatenate(roiSequence, axis=1)).astype(np.int))\n\n\n #normalise the frames and extract features for each frame using the visual frontend\n #save the visual features to a .npy file\n inp = np.stack(roiSequence, axis=0)\n inp = np.expand_dims(inp, axis=[1,2])\n inp = (inp - normMean)/normStd\n inputBatch = torch.from_numpy(inp)\n inputBatch = (inputBatch.float()).to(device)\n vf.eval()\n with torch.no_grad():\n outputBatch = vf(inputBatch)\n out = torch.squeeze(outputBatch, dim=1)\n out = out.cpu().numpy()\n np.save(visualFeaturesFile, out)\n return", "def Decode(scaleFactorFull,bitAllocFull,mantissaFull,overallScaleFactorFull,codingParams):\n\n if(codingParams.blocksize == 3):\n #print \"MDCTLines: \", codingParams.nMDCTLines\n a = codingParams.longBlockSize/2\n b = codingParams.shortBlockSize/2\n elif (codingParams.blocksize == 2):\n a = codingParams.shortBlockSize/2\n b = a\n elif (codingParams.blocksize == 1):\n b = codingParams.longBlockSize/2\n a = codingParams.shortBlockSize/2\n else:\n a = codingParams.longBlockSize/2\n b = a\n N = a+b\n halfN = N/2\n\n #halfN = codingParams.nMDCTLines\n #N = 2*halfN\n # vectorizing the Dequantize function call\n# vDequantize = np.vectorize(Dequantize)\n data = []\n mdctLines = []\n for iCh in range(codingParams.nChannels):\n\n scaleFactor = scaleFactorFull[iCh]\n bitAlloc = bitAllocFull[iCh]\n mantissa = mantissaFull[iCh]\n overallScaleFactor = overallScaleFactorFull[iCh]\n rescaleLevel = 1.*(1<<overallScaleFactorFull[iCh])\n # reconstitute the first halfN MDCT lines of this channel from the stored data\n mdctLine = np.zeros(halfN,dtype=np.float64)\n iMant = 0\n for iBand in range(codingParams.sfBands.nBands):\n nLines =codingParams.sfBands.nLines[iBand]\n if bitAlloc[iBand]:\n mdctLine[iMant:(iMant+nLines)]=vDequantize(scaleFactor[iBand], mantissa[iMant:(iMant+nLines)],codingParams.nScaleBits, bitAlloc[iBand])\n iMant += nLines\n mdctLine /= rescaleLevel # put overall gain back to original level\n mdctLines.append(mdctLine)\n\n #print codingParams.couplingParams\n if codingParams.doCoupling == True and len(mdctLines[0]) > 128:\n #print len(mdctLines[0])\n mdctLines = np.array(mdctLines)\n # better to just pass codingParams to channelDecoupling?\n mdctLines = ChannelDecoupling(mdctLines,codingParams.coupledChannel,codingParams.couplingParams,codingParams.sampleRate,codingParams.nCouplingStart)\n\n mdctLines = np.array(mdctLines)\n for iCh in range(codingParams.nChannels):\n data.append(np.array([],dtype=np.float64)) # add location for this channel's data\n mdctLine = mdctLines[iCh]\n if codingParams.doSBR == True:\n ### SBR Decoder Module 1 - High Frequency Reconstruction ###\n mdctLine = HiFreqRec(mdctLine,codingParams.sampleRate,codingParams.sbrCutoff)\n ### SBR Decoder Module 2 - Additional High Frequency Components ###\n mdctLine = AddHiFreqs(mdctLine,codingParams.sampleRate,codingParams.sbrCutoff)\n ### SBR Decoder Module 3 - Envelope Adjustment ###\n mdctLine = EnvAdjust(mdctLine,codingParams.sampleRate,codingParams.sbrCutoff,codingParams.specEnv[iCh])\n # print codingParams.specEnv # Print envelope for debugging purposes\n\n # IMDCT and window the data for this channel\n # data = SineWindow( IMDCT(mdctLine, halfN, halfN) ) # takes in halfN MDCT coeffs\n imdct = IMDCT(mdctLine, a, b) # takes in halfN MDCT coeffs\n data[iCh] = np.append(SineWindow(np.append(imdct[:a],np.zeros(a)))[:a],SineWindow(np.append(np.zeros(b),imdct[a:]))[b:])\n #print data.size\n # end loop over channels, return reconstituted time samples (pre-overlap-and-add)\n\n return data", "def bytes_to_yuv(data, resolution):\n width, height = resolution\n fwidth, fheight = raw_resolution(resolution)\n y_len = fwidth * fheight\n uv_len = (fwidth // 2) * (fheight // 2)\n if len(data) != (y_len + 2 * uv_len):\n raise PiCameraValueError(\n 'Incorrect buffer length for resolution %dx%d' % (width, height))\n # Separate out the Y, U, and V values from the array\n a = np.frombuffer(data, dtype=np.uint8)\n Y = a[:y_len].reshape((fheight, fwidth))\n Uq = a[y_len:-uv_len].reshape((fheight // 2, fwidth // 2))\n Vq = a[-uv_len:].reshape((fheight // 2, fwidth // 2))\n # Reshape the values into two dimensions, and double the size of the\n # U and V values (which only have quarter resolution in YUV4:2:0)\n U = np.empty_like(Y)\n V = np.empty_like(Y)\n U[0::2, 0::2] = Uq\n U[0::2, 1::2] = Uq\n U[1::2, 0::2] = Uq\n U[1::2, 1::2] = Uq\n V[0::2, 0::2] = Vq\n V[0::2, 1::2] = Vq\n V[1::2, 0::2] = Vq\n V[1::2, 1::2] = Vq\n # Stack the channels together and crop to the actual resolution\n return np.dstack((Y, U, V))[:height, :width]", "def split(filepath, nsamples):\n start = np.cumsum([0] + list(nsamples[:-1]))\n if filepath[-10:] == 'analog.brw':\n filename = filepath[:-10]\n analog = read_3brain_analog(filepath)\n for i, (s,n) in enumerate(zip(start, nsamples)):\n name = f\"{filename}_part_{i}_analog.npz\"\n print(f\"Saving {name}\")\n sampling_rate = glia.sampling_rate(filepath)\n np.savez(name, analog=analog[s:s+n],\n sampling_rate=sampling_rate)\n elif filepath[-4:] == \".bxr\":\n filename = filepath[:-4]\n # split spike-sorted data\n with h5py.File(filepath, 'r') as h5:\n # shared setup for the concatenated arrays\n sampling_rate = float(h5[\"3BRecInfo\"][\"3BRecVars\"][\"SamplingRate\"][0])\n channel_map = h5[\"3BRecInfo\"][\"3BMeaStreams\"][\"Raw\"][\"Chs\"][()]\n \n # map 3brain unit num\n # numbers typically from -4 to 9000\n # where negative numbers appear across multiple channels\n # and thus are presumably bad units...?\n # positive-numbered units appear on one channel\n unit_id_2_num = {}\n\n n_unit_nums = 0\n if \"SpikeUnits\" in h5[\"3BResults\"][\"3BChEvents\"]:\n for chunk in iter_chunks(h5['3BResults/3BChEvents/SpikeUnits'], 10000):\n n_unit_nums = max(n_unit_nums, chunk.max())\n \n unit_map = {}\n channel_unit_count = {}\n\n\n # operate on each of the concatenated arrays, one at a time\n for i, (s,n) in enumerate(zip(start, nsamples)):\n startTime = s / sampling_rate\n first_idx = None\n for chunk in iter_chunks(h5['3BResults/3BChEvents/SpikeTimes'], 10000):\n valid_idxs = np.argwhere(h5[\"3BResults/3BChEvents/SpikeTimes\"] > s)\n if len(valid_idxs) > 0:\n first_idx = valid_idxs[0][0]\n break\n assert not first_idx is None\n print(f\"identified start idx of {first_idx}.\")\n\n # for simplicity, we just iterate again, could have faster implementation\n last_idx = len(h5['3BResults/3BChEvents/SpikeTimes'])\n chunk_size = 10000\n for j, chunk in enumerate(iter_chunks(h5['3BResults/3BChEvents/SpikeTimes'], chunk_size)):\n invalid_idxs = np.argwhere(chunk > s + n)\n if len(invalid_idxs) > 0:\n last_idx = invalid_idxs[0][0] + j*chunk_size\n break\n print(f\"identified stop idx of {last_idx}.\")\n \n spike_channel_ids = h5[\"3BResults\"][\"3BChEvents\"][\"SpikeChIDs\"][first_idx:last_idx]\n spike_unit_ids = h5[\"3BResults\"][\"3BChEvents\"][\"SpikeUnits\"][first_idx:last_idx]\n # poorly named; time is in units of 1/sampling_rate\n # aka sample number\n # subtract to adjust start time\n spike_times = h5[\"3BResults\"][\"3BChEvents\"][\"SpikeTimes\"][first_idx:last_idx] - s\n \n\n \n csv_name = f'{filename}_part_{i}_spikes.csv'\n spikes = zip(spike_channel_ids, spike_unit_ids, spike_times)\n tot_spikes = spike_times.shape[0]\n print(f\"creating {csv_name} ...\")\n with open(csv_name, 'w', newline='') as csvfile:\n fieldnames = ['channel_i', 'channel_j', 'unit', \"spike_time\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for channel, unit_id, spike_time in tqdm(spikes,\n total=tot_spikes):\n c = channel_map[channel]\n # convert to tuple\n # account for 1-indexing\n c = (c[0]-1,c[1]-1)\n \n # count num units on channel\n # first check if we've seen this channel before\n if not c in channel_unit_count:\n # if not, initialize channel_unit_count for the channel\n channel_unit_count[c] = 1\n unit_num = 0\n # add unit\n unit_id_2_num[unit_id] = unit_num\n else:\n \n # then check if we've seen this unit before\n if not unit_id in unit_id_2_num:\n # if not, assign unit_num for this new unit\n unit_num = channel_unit_count[c]\n unit_id_2_num[unit_id] = unit_num\n channel_unit_count[c] += 1\n else:\n # otherwise, look it up\n unit_num = unit_id_2_num[unit_id]\n \n \n t = spike_time / sampling_rate\n writer.writerow({\"channel_i\": c[0],\n \"channel_j\": c[1],\n \"unit\": unit_num,\n \"spike_time\": t})\n \n np.save(f\"{filename}_channel_map.npy\", channel_map)", "def testSplit(self):\n\n protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()\n bigstring = \"\".join(chr(byte) for byte in range(ord(\"a\"), ord(\"z\")+1))\n\n databuf = TTransport.TMemoryBuffer()\n prot = protocol_factory.getProtocol(databuf)\n prot.writeI32(42)\n prot.writeString(bigstring)\n prot.writeI16(24)\n data = databuf.getvalue()\n cutpoint = len(data)/2\n parts = [ data[:cutpoint], data[cutpoint:] ]\n\n framed_buffer = TTransport.TMemoryBuffer()\n framed_writer = TTransport.TFramedTransport(framed_buffer)\n for part in parts:\n framed_writer.write(part)\n framed_writer.flush()\n self.assertEquals(len(framed_buffer.getvalue()), len(data) + 8)\n\n # Recreate framed_buffer so we can read from it.\n framed_buffer = TTransport.TMemoryBuffer(framed_buffer.getvalue())\n framed_reader = TTransport.TFramedTransport(framed_buffer)\n prot = protocol_factory.getProtocol(framed_reader)\n self.assertEqual(prot.readI32(), 42)\n self.assertEqual(prot.readString(), bigstring)\n self.assertEqual(prot.readI16(), 24)", "def split_into_channels(audio):\n\tif AUDIO_CHANNELS == 1:\n\t\treturn [audio]\n\tif AUDIO_CHANNELS == 2:\n\t\treturn [audioop.tomono(audio, AUDIO_DEPTH // BITS_IN_BYTE, 1, 0),\n\t\t\t\taudioop.tomono(audio, AUDIO_DEPTH // BITS_IN_BYTE, 0, 1)]\n\traise NotImplementedError(\"Only 1 or 2 channels are supported\")", "def data_rearrange_case_one(self, tik_instance, ub_ori, ub_cast_fp16,\n ub_trans, ub_cast_int8,\n num_loop_time, loop_num, is_last):\n cast_repeat_time = tik_instance.Scalar(\"uint64\")\n cast_remainder = tik_instance.Scalar(\"uint64\")\n with tik_instance.if_scope(num_loop_time == self.dst_shape[-3] - 1):\n if (self.src_shape[-2] % CUBE_SIZE) == 0:\n cast_repeat_time.set_as(loop_num * self.dst_shape[-1] *\n self.dst_shape[-2] // MAX_MASK)\n cast_remainder.set_as(loop_num * self.dst_shape[-1] *\n self.dst_shape[-2] % MAX_MASK)\n else:\n cast_repeat_time.set_as((self.src_shape[-2] % CUBE_SIZE) *\n loop_num * self.dst_shape[-1] //\n MAX_MASK)\n cast_remainder.set_as((self.src_shape[-2] % CUBE_SIZE) *\n loop_num * self.dst_shape[-1] %\n MAX_MASK)\n with tik_instance.else_scope():\n cast_repeat_time.set_as(loop_num * self.dst_shape[-1] *\n self.dst_shape[-2] // MAX_MASK)\n cast_remainder.set_as(loop_num * self.dst_shape[-1] *\n self.dst_shape[-2] % MAX_MASK)\n # cast the data from int8 to float16\n _cast_dtype(tik_instance, ub_cast_fp16, ub_ori, cast_repeat_time,\n cast_remainder, \"int8_2_float16\")\n\n scalar_zero = tik_instance.Scalar(dtype=\"float16\", init_value=0.0)\n with tik_instance.if_scope(is_last == 1):\n if self.src_shape[-1] % CUBE_SIZE_2 != 0:\n mask = 0\n for i, _ in enumerate(range(CUBE_SIZE_2 -\n self.src_shape[-1] % CUBE_SIZE_2)):\n mask += 2 ** (CUBE_SIZE_2 - 1 - i)\n\n tik_instance.vector_dup([0, mask],\n ub_cast_fp16[loop_num * CUBE_SIZE_2 -\n CUBE_SIZE_2],\n scalar_zero, CUBE_SIZE,\n self.cast_num_byte // 2,\n loop_num * CUBE_SIZE_2 //\n self.cast_num_data)\n with tik_instance.if_scope(num_loop_time == self.dst_shape[-3] - 1):\n if (self.src_shape[-2] % CUBE_SIZE) != 0:\n dup_number = (CUBE_SIZE - self.src_shape[-2] % CUBE_SIZE) * \\\n self.dst_shape[-1] * loop_num\n offset = (self.src_shape[-2] % CUBE_SIZE) * \\\n self.dst_shape[-1] * loop_num\n self.vector_dup_zero(tik_instance, ub_cast_fp16,\n dup_number, offset)\n with tik_instance.for_range(0, loop_num) as num_col_cube:\n tik_instance.vadds(CUBE_SIZE_2,\n ub_trans[num_col_cube *\n self.dst_shape[-2] *\n self.dst_shape[-1] +\n CUBE_SIZE_2 * num_col_cube],\n ub_cast_fp16[num_col_cube * CUBE_SIZE_2],\n scalar_zero, CUBE_SIZE,\n self.cast_num_byte // 2,\n self.cast_num_byte // 2,\n self.cast_num_byte,\n loop_num * self.dst_shape[-1] //\n self.cast_num_data)\n\n cast_repeat_time.set_as((CUBE_SIZE + 1) * loop_num *\n self.dst_shape[-1] // MAX_MASK)\n cast_remainder.set_as((CUBE_SIZE + 1) * loop_num * self.dst_shape[-1] %\n MAX_MASK)\n # cast the data from float16 to int8\n _cast_dtype(tik_instance, ub_cast_int8, ub_trans, cast_repeat_time,\n cast_remainder, \"float16_2_int8\")", "def read_bin_file(file_name, config, mode=0, header=True, packet_num=1443):\n # Read file\n if header:\n data = remove_header(file_name, packet_num)\n else:\n data = np.fromfile(file_name, dtype=np.int16)\n\n frame = config[0]\n sample = config[1]\n chirp = config[2]\n tx_num = config[3]\n rx_num = config[4]\n\n if mode == 0:\n data = np.reshape(data, [-1, 8])\n data = data[:, 0:4:] + 1j * data[:, 4::]\n if rx_num == 4:\n cdata1 = np.reshape(data[:, 0], [frame, chirp, tx_num, sample])\n cdata1 = np.transpose(cdata1, [0, 1, 3, 2]) # frame, chirp, sample, channel\n cdata2 = np.reshape(data[:, 1], [frame, chirp, tx_num, sample])\n cdata2 = np.transpose(cdata2, [0, 1, 3, 2]) # frame, chirp, sample, channel\n cdata3 = np.reshape(data[:, 2], [frame, chirp, tx_num, sample])\n cdata3 = np.transpose(cdata3, [0, 1, 3, 2]) # frame, chirp, sample, channel\n cdata4 = np.reshape(data[:, 3], [frame, chirp, tx_num, sample])\n cdata4 = np.transpose(cdata4, [0, 1, 3, 2]) # frame, chirp, sample, channel\n\n if tx_num == 3:\n cdata = np.array([cdata1[:, :, :, 0], cdata2[:, :, :, 0], cdata3[:, :, :, 0], cdata4[:, :, :, 0],\n cdata1[:, :, :, 1], cdata2[:, :, :, 1], cdata3[:, :, :, 1], cdata4[:, :, :, 1],\n cdata1[:, :, :, 2], cdata2[:, :, :, 2], cdata3[:, :, :, 2], cdata4[:, :, :, 2]])\n cdata = np.transpose(cdata, [1, 2, 3, 0])\n # cdata = np.concatenate([cdata1, cdata2, cdata3, cdata4], axis=3)\n return cdata # frame, chirp, sample, channel(tx1,tx2,tx3)\n\n elif tx_num == 1:\n cdata = np.array([cdata1[:, :, :, 0], cdata2[:, :, :, 0], cdata3[:, :, :, 0], cdata4[:, :, :, 0]])\n cdata = np.transpose(cdata, [1, 2, 3, 0])\n return cdata # frame, chirp, sample, channel\n\n elif mode == 1: # testing\n data = np.reshape(data, [-1, 4])\n data = data[:, 0:2:] + 1j * data[:, 2::]\n data = np.reshape(data, [frame, chirp, tx_num, rx_num, sample])\n if rx_num == 4:\n cdata1 = data[:, :, :, 0, :]\n cdata1 = np.transpose(cdata1, [0, 1, 3, 2])\n cdata2 = data[:, :, :, 1, :]\n cdata2 = np.transpose(cdata2, [0, 1, 3, 2])\n cdata3 = data[:, :, :, 2, :]\n cdata3 = np.transpose(cdata3, [0, 1, 3, 2])\n cdata4 = data[:, :, :, 3, :]\n cdata4 = np.transpose(cdata4, [0, 1, 3, 2])\n\n if tx_num == 3:\n cdata = np.concatenate((cdata1, cdata2, cdata3, cdata4), axis=3)\n return cdata # frame, chirp, sample, channel\n\n elif tx_num == 1:\n cdata = np.array([cdata1[:, :, :, 0], cdata2[:, :, :, 0], cdata3[:, :, :, 0], cdata4[:, :, :, 0]])\n cdata = np.transpose(cdata, [1, 2, 3, 0])\n return cdata # frame, chirp, sample, channel\n\n elif mode == 2:\n data = np.reshape(data, [-1, 4])\n data = data[:, 0:2:] + 1j * data[:, 2::]\n data = np.reshape(data, [frame, chirp * tx_num, rx_num, sample])\n return data\n\n else:\n raise ValueError", "def _rle_decode_segment(data: bytes) -> bytearray:\n data = bytearray(data)\n result = bytearray()\n pos = 0\n result_extend = result.extend\n\n try:\n while True:\n # header_byte is N + 1\n header_byte = data[pos] + 1\n pos += 1\n if header_byte > 129:\n # Extend by copying the next byte (-N + 1) times\n # however since using uint8 instead of int8 this will be\n # (256 - N + 1) times\n result_extend(data[pos : pos + 1] * (258 - header_byte))\n pos += 1\n elif header_byte < 129:\n # Extend by literally copying the next (N + 1) bytes\n result_extend(data[pos : pos + header_byte])\n pos += header_byte\n\n except IndexError:\n pass\n\n return result", "def segment_array(a):\n\n l = [array(a.typecode) for chaff in range(16)]\n index = 0\n\n for i in range(0, len(a), 16):\n l[index].extend(a[i:i + 16])\n index = (index + 1) % 16\n\n return l", "def get_blobs(self): # CHECK\n x=self.send_packet_check_response_without_retry('\\x90')\n n=len(x)/4 # CHECK? n = len(x)//4\n z=struct.unpack('<'+'I'*n,x)\n unpack=lambda i: tuple(i>>offset & (1<<length)-1 for offset,length in [(0,11),(11,11),(22,2),(24,8)])\n return z[0],[unpack(i) for i in z[1:]]", "def data_rearrange_case_zero(self, tik_instance, ub_ori, ub_cast_fp16,\n ub_trans, ub_cast_int8, loop_num, is_last):\n cast_repeat_time = tik_instance.Scalar(\"uint64\")\n cast_remainder = tik_instance.Scalar(\"uint64\")\n with tik_instance.if_scope(is_last == 1):\n if (self.src_shape[-2] % CUBE_SIZE) == 0:\n cast_repeat_time.set_as(loop_num * CUBE_SIZE *\n self.dst_shape[-4] *\n self.dst_shape[-1] // MAX_MASK)\n cast_remainder.set_as(loop_num * CUBE_SIZE *\n self.dst_shape[-4] *\n self.dst_shape[-1] % MAX_MASK)\n else:\n cast_repeat_time.set_as((loop_num * CUBE_SIZE - CUBE_SIZE +\n self.src_shape[-2] % CUBE_SIZE) *\n self.dst_shape[-4] *\n self.dst_shape[-1] // MAX_MASK)\n cast_remainder.set_as((loop_num * CUBE_SIZE - CUBE_SIZE +\n self.src_shape[-2] % CUBE_SIZE) *\n self.dst_shape[-4] * self.dst_shape[-1] %\n MAX_MASK)\n with tik_instance.else_scope():\n cast_repeat_time.set_as(loop_num * CUBE_SIZE * self.dst_shape[-4] *\n self.dst_shape[-1] // MAX_MASK)\n cast_remainder.set_as(loop_num * CUBE_SIZE * self.dst_shape[-4] *\n self.dst_shape[-1] % MAX_MASK)\n # cast the data from int8 to float16\n _cast_dtype(tik_instance, ub_cast_fp16, ub_ori, cast_repeat_time,\n cast_remainder, \"int8_2_float16\")\n\n num_row_one_loop = loop_num * CUBE_SIZE\n scalar_zero = tik_instance.Scalar(dtype=\"float16\", init_value=0.0)\n\n if self.src_shape[-1] % CUBE_SIZE_2 != 0:\n mask = 0\n for i, _ in enumerate(range(CUBE_SIZE_2 -\n self.src_shape[-1] % CUBE_SIZE_2)):\n mask += 2 ** (CUBE_SIZE_2 - 1 - i)\n\n with tik_instance.for_range(0, num_row_one_loop // MAX_REPEATS) \\\n as num_repeat:\n tik_instance.vector_dup([0, mask],\n ub_cast_fp16[(MAX_REPEATS *\n num_repeat + 1) *\n self.dst_shape[-4] *\n self.dst_shape[-1] -\n CUBE_SIZE_2],\n scalar_zero, MAX_REPEATS,\n self.cast_num_byte // 2,\n self.dst_shape[-4] *\n self.dst_shape[-1] //\n self.cast_num_data)\n with tik_instance.if_scope(num_row_one_loop % MAX_REPEATS != 0):\n tik_instance.vector_dup([0, mask],\n ub_cast_fp16[((num_row_one_loop //\n MAX_REPEATS) *\n MAX_REPEATS + 1) *\n self.dst_shape[-4] *\n self.dst_shape[-1] -\n CUBE_SIZE_2],\n scalar_zero,\n num_row_one_loop % MAX_REPEATS,\n 0, self.dst_shape[-4] *\n self.dst_shape[-1] //\n self.cast_num_data)\n\n with tik_instance.if_scope(is_last == 1):\n if (self.src_shape[-2] % CUBE_SIZE) != 0:\n dup_number = (CUBE_SIZE - self.src_shape[-2] % CUBE_SIZE) * \\\n self.dst_shape[-1] * self.dst_shape[-4]\n offset = ((loop_num - 1) * self.dst_shape[-2] +\n self.src_shape[-2] % CUBE_SIZE) * \\\n self.dst_shape[-1] * self.dst_shape[-4]\n self.vector_dup_zero(tik_instance, ub_cast_fp16,\n dup_number, offset)\n\n with tik_instance.for_range(0, self.dst_shape[-4]) as num_col_cube:\n with tik_instance.for_range(0, CUBE_SIZE * loop_num //\n MAX_REPEATS) as num_repeat_one:\n tik_instance.vadds(CUBE_SIZE_2,\n ub_trans[num_col_cube * loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] +\n MAX_REPEATS *\n num_repeat_one * CUBE_SIZE_2 +\n CUBE_SIZE_2 * num_col_cube],\n ub_cast_fp16[MAX_REPEATS * num_repeat_one *\n self.dst_shape[-1] *\n self.dst_shape[-4] +\n num_col_cube * CUBE_SIZE_2],\n scalar_zero, MAX_REPEATS,\n self.cast_num_byte // 2,\n self.cast_num_byte // 2,\n self.cast_num_byte,\n self.dst_shape[-4] * self.dst_shape[-1] //\n self.cast_num_data)\n\n with tik_instance.if_scope((CUBE_SIZE * loop_num) %\n MAX_REPEATS != 0):\n tik_instance.vadds(CUBE_SIZE_2,\n ub_trans[num_col_cube * loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] +\n (CUBE_SIZE * loop_num) //\n MAX_REPEATS * MAX_REPEATS *\n CUBE_SIZE_2 + CUBE_SIZE_2 *\n num_col_cube],\n ub_cast_fp16[(CUBE_SIZE * loop_num) //\n MAX_REPEATS * MAX_REPEATS *\n self.dst_shape[-1] *\n self.dst_shape[-4] +\n num_col_cube * CUBE_SIZE_2],\n scalar_zero,\n (CUBE_SIZE * loop_num) % MAX_REPEATS,\n self.cast_num_byte // 2,\n self.cast_num_byte // 2,\n self.cast_num_byte,\n self.dst_shape[-4] * self.dst_shape[-1] //\n self.cast_num_data)\n\n cast_repeat_time.set_as((loop_num * CUBE_SIZE + 1) *\n self.dst_shape[-4] * self.dst_shape[-1] //\n MAX_MASK)\n cast_remainder.set_as((loop_num * CUBE_SIZE + 1) * self.dst_shape[-4] *\n self.dst_shape[-1] % MAX_MASK)\n # cast the data from float16 to int8\n _cast_dtype(tik_instance, ub_cast_int8, ub_trans, cast_repeat_time,\n cast_remainder, \"float16_2_int8\")", "def extract_data(filename, num_images, IMAGE_WIDTH):\n\n# this function definition has been taken from internet\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_WIDTH * IMAGE_WIDTH * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32) #Interpret a buffer as a 1-dimensional array\n data = data.reshape(num_images, IMAGE_WIDTH*IMAGE_WIDTH)\n return data", "def test_parsing(self):\n truth = self.generate_fake_pos()\n batch_size = 4\n records = []\n for i in range(batch_size):\n record = b''\n for j in range(2):\n record += self.v4_record(*truth)\n records.append(record)\n\n parser = ChunkParser(ChunkDataSrc(records),\n shuffle_size=1,\n workers=1,\n batch_size=batch_size)\n batchgen = parser.parse()\n data = next(batchgen)\n\n batch = (np.reshape(np.frombuffer(data[0], dtype=np.float32),\n (batch_size, 112, 64)),\n np.reshape(np.frombuffer(data[1], dtype=np.int32),\n (batch_size, 1858)),\n np.reshape(np.frombuffer(data[2], dtype=np.float32),\n (batch_size, 3)),\n np.reshape(np.frombuffer(data[3], dtype=np.float32),\n (batch_size, 3)))\n\n fltplanes = truth[1].astype(np.float32)\n fltplanes[5] /= 99\n for i in range(batch_size):\n data = (batch[0][i][:104],\n np.array([batch[0][i][j][0] for j in range(104, 111)]),\n batch[1][i], batch[2][i], batch[3][i])\n self.assertTrue((data[0] == truth[0]).all())\n self.assertTrue((data[1] == fltplanes).all())\n self.assertTrue((data[2] == truth[2]).all())\n scalar_win = data[3][0] - data[3][-1]\n self.assertTrue(np.abs(scalar_win - truth[3]) < 1e-6)\n scalar_q = data[4][0] - data[4][-1]\n self.assertTrue(np.abs(scalar_q - truth[4]) < 1e-6)\n\n parser.shutdown()", "def parse_to_numpy(audio):\n\treturn numpy.frombuffer(audio, dtype=numpy.int16)", "def convert_urban_pcm24_to_pcm16():\n src_dir = ['/data1/data/UrbanSound8K/audio/fold{:d}'.format(i+1) for i in range(10)]\n dst_dir = ['/data1/data/UrbanSound8K-16bit/audio/fold{:d}'.format(i+1) for i in range(10)]\n converted_wav_paths = []\n for dsrc, ddst in zip(src_dir, dst_dir):\n maybe_create_directory(ddst)\n wav_files = filter(lambda FP: FP if FP.endswith('.wav') else None, \n [FP for FP in os.listdir(dsrc)])\n for wav_file in wav_files:\n src_wav, dst_wav = os.path.join(dsrc, wav_file), os.path.join(ddst, wav_file)\n convert_wav(src_wav, dst_wav, subtype='PCM_16')\n converted_wav_paths.append(dst_wav)\n print('converted count:', len(converted_wav_paths))\n print(converted_wav_paths, len(converted_wav_paths))", "def hamming_algorithm(data: bytearray):\n print(f\"data: {data}\")\n # 12345678 12345678 12345678 12345678\n if len(data) % 4 != 0:\n diff = 4 - len(data) % 4\n data += bytes(diff)\n m = len(data)\n r = 0\n chunck = 0\n i = 0\n ret_data = bytearray()\n while i < m // 4:\n chunck = struct.unpack(\"I\", data[i*4:i*4 + 4])[0]\n chunck, chunck_str = insert_redundant_bits(chunck)\n print(f\"chunck: {chunck} chunck_str:{chunck_str}\")\n i += 1", "def create_signal_chunks(ds, length_ms, step_ms, max_pad_ms=0, deterministic_output_order=True, max_num_chunks_per_signal=int(1e6), avg_num_chunks_from_signals=100):\n logger.info(\"Dividing every signal in the dataset into new signals by creating signal chunks of length %d ms and offset %d ms. Maximum amount of padding allowed in the last chunk is %d ms.\", length_ms, step_ms, max_pad_ms)\n\n chunk_length_sec = tf.constant(1e-3 * length_ms, tf.float32)\n chunk_step_sec = tf.constant(1e-3 * step_ms, tf.float32)\n max_pad_sec = tf.constant(1e-3 * max_pad_ms, tf.float32)\n id_str_padding = tf.cast(tf.round(audio_features.log10(tf.cast(max_num_chunks_per_signal, tf.float32))), tf.int32)\n\n def chunks_to_elements(chunk, chunk_num, x):\n chunk_num_str = tf.strings.as_string(chunk_num, width=id_str_padding, fill='0')\n chunk_id = tf.strings.join((x[\"id\"], chunk_num_str), separator='-')\n s = tf.reshape(chunk, [-1])\n out = dict(x, signal=s, id=chunk_id)\n if \"duration\" in x:\n out = dict(out, duration=tf.cast(tf.size(s) / x[\"sample_rate\"], tf.float32))\n return out\n\n def chunk_signal_and_flatten(x):\n signal = x[\"signal\"]\n sample_rate = tf.cast(x[\"sample_rate\"], tf.float32)\n\n chunk_length = tf.cast(sample_rate * chunk_length_sec, tf.int32)\n chunk_step = tf.cast(sample_rate * chunk_step_sec, tf.int32)\n max_pad = tf.cast(sample_rate * max_pad_sec, tf.int32)\n num_full_chunks = tf.math.maximum(0, 1 + (tf.size(signal) - chunk_length) // chunk_step)\n tf.debugging.assert_less(num_full_chunks, max_num_chunks_per_signal, message=\"Too many chunks created from signal, cannot create unique utterance ids, raise the max_num_chunks_per_signal parameter\")\n\n last_chunk_length = tf.size(signal) - num_full_chunks * chunk_step\n if last_chunk_length < chunk_length and chunk_length <= last_chunk_length + max_pad:\n signal = tf.pad(signal, [[0, chunk_length - last_chunk_length]])\n\n chunks = tf.signal.frame(signal, chunk_length, chunk_step, axis=0)\n num_chunks = tf.cast(tf.shape(chunks)[0], tf.int64)\n chunk_ds = tf.data.Dataset.from_tensor_slices(chunks)\n chunk_nums_ds = tf.data.Dataset.range(1, num_chunks + 1)\n repeat_x_ds = tf.data.Dataset.from_tensors(x).repeat(num_chunks)\n\n return (tf.data.Dataset\n .zip((chunk_ds, chunk_nums_ds, repeat_x_ds))\n .map(chunks_to_elements))\n\n interleave_kwargs = {\n \"block_length\": avg_num_chunks_from_signals,\n \"num_parallel_calls\": TF_AUTOTUNE,\n \"deterministic\": deterministic_output_order}\n if TF_VERSION_MAJOR == 2 and TF_VERSION_MINOR < 2:\n del interleave_kwargs[\"deterministic\"]\n logger.warning(\"Deleted unsupported 'deterministic' kwarg from tf.data.Dataset.interleave call, TF version >= 2.2 is required.\")\n\n return ds.interleave(chunk_signal_and_flatten, **interleave_kwargs)", "def unchunkify(chunks):\n recreated_chunks = list(map(lambda x: np.fft.irfft(combine_phase_and_power(*x)), chunks))\n total_length = len(recreated_chunks) * CHUNK_SIZE // 2\n output = np.zeros(total_length)\n window = np.power(np.sin(np.linspace(0, np.pi, CHUNK_SIZE)), 2)\n \n for i, j in enumerate(xrange(0, total_length - CHUNK_SIZE, CHUNK_SIZE // 2)):\n o = window * recreated_chunks[i]\n \n output[j: j+CHUNK_SIZE] += o\n return output", "def buffer(data, duration, data_overlap):\n # 向上取整(分段数),data_overlap代表相邻时间窗之间的重叠部分\n number_segments = int(math.ceil((len(data) - data_overlap) / (duration - data_overlap)))\n # 按时间窗获取数据(一时间窗为宽度的数据段组)\n temp_buf = [data[i:i + duration] for i in range(0, len(data), (duration - int(data_overlap)))]\n # 补全最后一个数据段\n temp_buf[number_segments - 1] = np.pad(temp_buf[number_segments - 1],\n (0, duration - temp_buf[number_segments - 1].shape[0]), 'constant')\n # 按顺序堆叠数据形成(数据段的数组)\n segmented_data = np.vstack(temp_buf[0:number_segments])\n\n return segmented_data", "def _reshape_memory(self, memory):\n # Grouping multiple frames if necessary\n if memory.size(-1) == self.frame_channels:\n memory = memory.view(memory.shape[0], memory.size(1) // self.r, -1)\n # Time first (T_decoder, B, frame_channels)\n memory = memory.transpose(0, 1)\n return memory", "def calculate_16bit_parts(value):\n if not (0 <= value < 65535):\n value = min(max(value, 0), 65535)\n # high_byte = value // 256\n # low_byte = value % 256\n # return high_byte, low_byte\n # faster:\n # return value // 256, value % 256\n # faster again:\n return value >> 8, value & 255", "def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:\n\n def compress_video(x: np.ndarray, video_format: str, constant_rate_factor: int, dir_: str = \"\"):\n \"\"\"\n Apply video compression to video input of shape (frames, height, width, channel).\n \"\"\"\n import ffmpeg\n\n video_path = os.path.join(dir_, f\"tmp_video.{video_format}\")\n _, height, width, _ = x.shape\n\n # numpy to local video file\n process = (\n ffmpeg.input(\"pipe:\", format=\"rawvideo\", pix_fmt=\"rgb24\", s=f\"{width}x{height}\")\n .output(video_path, pix_fmt=\"yuv420p\", vcodec=\"libx264\", crf=constant_rate_factor)\n .overwrite_output()\n .run_async(pipe_stdin=True, quiet=True)\n )\n process.stdin.write(x.flatten().astype(np.uint8).tobytes())\n process.stdin.close()\n process.wait()\n\n # local video file to numpy\n stdout, _ = (\n ffmpeg.input(video_path)\n .output(\"pipe:\", format=\"rawvideo\", pix_fmt=\"rgb24\")\n .run(capture_stdout=True, quiet=True)\n )\n return np.frombuffer(stdout, np.uint8).reshape(x.shape)\n\n if x.ndim != 5:\n raise ValueError(\"Video compression can only be applied to spatio-temporal data.\")\n\n if self.channels_first:\n x = np.transpose(x, (0, 2, 3, 4, 1))\n\n # apply video compression per video item\n x_compressed = x.copy()\n with TemporaryDirectory(dir=config.ART_DATA_PATH) as tmp_dir:\n for i, x_i in enumerate(tqdm(x, desc=\"Video compression\", disable=not self.verbose)):\n x_compressed[i] = compress_video(x_i, self.video_format, self.constant_rate_factor, dir_=tmp_dir)\n\n if self.channels_first:\n x_compressed = np.transpose(x_compressed, (0, 4, 1, 2, 3))\n\n return x_compressed, y", "def extract_buffer(sample: Gst.Sample) -> np.ndarray:\n\n buffer = sample.get_buffer() # Gst.Buffer\n caps = sample.get_caps()\n caps_format = sample.get_caps().get_structure(0) # Gst.Structure\n\n # GstVideo.VideoFormat\n video_format = GstVideo.VideoFormat.from_string(caps_format.get_value('format'))\n w, h = caps_format.get_value('width'), caps_format.get_value('height')\n # c = utils.get_num_channels(video_format)\n buffer_size = buffer.get_size()\n\n # Change back to 800x900 when YUV decoding in place\n shape = (int(h * 1.5), w) # One channel? 800 * 600 = 720000\n # The YUV420 format will be uint8\n return np.ndarray(shape=shape, buffer=buffer.extract_dup(0, buffer_size), dtype=np.uint8)", "def test_chunk_memory(self):\n layer = tl.Serial(tl.Dense(1024*1024), tl.Dense(128))\n chunked = tl.Chunk(layer, 256)\n x = np.random.uniform(size=(16*1024, 16))\n chunked.init(shapes.signature(x))\n y = chunked(x)\n z = tl.Accelerate(chunked)(x)\n self.assertEqual(y.shape, (16*1024, 128))\n self.assertEqual(z.shape, (16*1024, 128))", "def vidSeq(SetData,loop=0):\r\n\r\n w,h = 16,16\r\n arr = numpy.zeros((h,w), dtype=numpy.uint8)\r\n i = 0\r\n m = 0\r\n while m < loop or loop==0:\r\n print i\r\n arr[i/h,i%w] = 255\r\n h,w = arr.shape\r\n SetData(arr)\r\n time.sleep(0.1)\r\n i += 1\r\n if not (i < w*h):\r\n arr = numpy.zeros((h,w), dtype=numpy.uint8)\r\n i = 0\r\n m += 1", "def test04(self):\n a = np.arange(1e4)\n b = chunk(a, atom=a.dtype, cparams=bcolz.cparams())\n # print \"b[1:8000]->\", `b[1:8000]`\n assert_array_equal(a[1:8000], b[1:8000], \"Arrays are not equal\")", "def readFIBSEMdat(path, channel_index=-1, header=1024, magic_number=3555587570):\n ra = RandomAccessFile(path, 'r')\n try:\n # Check the magic number\n ra.seek(0)\n if ra.readInt() & 0xffffffff != magic_number:\n print \"Magic number mismatch\"\n return None\n # Read the number of channels\n ra.seek(32)\n numChannels = ra.readByte() & 0xff # a single byte as unsigned integer\n # Parse width and height\n ra.seek(100)\n width = ra.readInt()\n ra.seek(104)\n height = ra.readInt()\n print numChannels, width, height\n # Read the whole interleaved pixel array\n ra.seek(header)\n bytes = zeros(width * height * 2 * numChannels, 'b') # 2 for 16-bit\n ra.read(bytes)\n print \"read\", len(bytes), \"bytes\" # takes ~2 seconds\n # Parse as 16-bit array\n sb = ByteBuffer.wrap(bytes).order(ByteOrder.BIG_ENDIAN).asShortBuffer()\n shorts = zeros(width * height * numChannels, 'h')\n sb.get(shorts)\n # Deinterleave channels\n # With Weaver: fast\n channels = w.deinterleave(shorts, numChannels, channel_index)\n # With python array sampling: very slow, and not just from iterating whole array once per channel\n # seq = xrange(numChannels) if -1 == channel_index else [channel_index]\n #channels = [shorts[i::numChannels] for i in seq]\n # With clojure: extremely slow, may be using reflection unexpectedly\n #channels = deinterleave.invoke(shorts, numChannels)\n print len(channels)\n # Shockingly, these values are signed shorts, not unsigned!\n return [ArrayImgs.shorts(s, [width, height]) for s in channels]\n finally:\n ra.close()", "def _decode_35701(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29995:\n start_byte += n_bytes\n n_bytes = 4\n n_points = struct.unpack(\n '<I', data[start_byte:start_byte + n_bytes])[0]\n return {'n_points': n_points}", "def deserialize_binary_to_frames(data_blob):\n\n from .FacesoftFlatbuffersSchema import SerializedKeyframesTimeline\n\n buf = bytearray(data_blob)\n serialized_keyframes = SerializedKeyframesTimeline.SerializedKeyframesTimeline.GetRootAsSerializedKeyframesTimeline(buf, 0)\n\n dt = serialized_keyframes.TimeBetweenFramesSecs()\n dim2 = serialized_keyframes.SamplesPerFrame()\n # dim1 = serialized_keyframes.KeyframesDataLength() / dim2\n\n keyframes = serialized_keyframes.KeyframesDataAsNumpy()\n keyframes = keyframes.reshape((-1, dim2))\n\n return keyframes, dt", "def unpack_int16(data):\n value = unpack(DecodeUtils.INT16_BYTE_FORMAT, data[:2])[0]\n return value, 2", "def read_sp2(file_name, debug=False, arm_convention=True):\n\n my_data = open(file_name, \"rb\").read()\n # Get file date from name\n if platform.system() == \"Windows\":\n split_file_name = file_name.split(\"\\\\\")\n else:\n split_file_name = file_name.split(\"/\")\n if arm_convention:\n next_split = split_file_name[-1].split(\".\")\n dt = datetime.strptime(next_split[2], \"%Y%m%d\")\n else:\n dt = datetime.strptime(split_file_name[-1][0:8], \"%Y%m%d\")\n\n if len(my_data) > 0:\n bytepos = 0\n numCols = struct.unpack(\">I\", my_data[bytepos:bytepos + 4])[0]\n bytepos += 4\n numChannels = struct.unpack(\">I\", my_data[bytepos:bytepos + 4])[0]\n if debug:\n print((\"Loaded file with numCols = {}, numChannels = {}\"\n .format(numCols, numChannels)))\n\n data_points_per_record = numChannels * numCols\n\n bytes_per_record = 2 * data_points_per_record\n bytes_not_data_array = 12 + 2 + 28 + 16\n bytes_per_record += bytes_not_data_array\n last_pos = int(bytes_per_record - 1)\n num_spare_cols = struct.unpack(\">I\", my_data[last_pos - 4:last_pos])[0]\n if debug:\n print(\"Number of spare columns = %d\" % num_spare_cols)\n\n if num_spare_cols != 0:\n bytes_per_record += num_spare_cols\n\n numRecords = int(len(my_data) / bytes_per_record)\n totalRows = numChannels * numRecords\n DataWave = np.zeros((totalRows, numCols), dtype='int16')\n Flag = np.zeros(int(totalRows / numChannels), dtype='int16')\n TimeWave = np.zeros(numRecords, dtype='float64')\n Res1 = np.zeros(numRecords, dtype='float32')\n EventIndex = np.zeros(numRecords, dtype='float32')\n TimeDiv10000 = np.zeros(numRecords, dtype='float64')\n TimeRemainder = np.zeros(numRecords, dtype='float64')\n Res5 = np.zeros(numRecords, dtype='float32')\n Res6 = np.zeros(numRecords, dtype='float32')\n Res7 = np.zeros(numRecords, dtype='float64')\n Res8 = np.zeros(numRecords, dtype='float64')\n if num_spare_cols != 0:\n SpareDataArray = np.zeros(numRecords, num_spare_cols)\n\n arrayFmt = \">\"\n for i in range(data_points_per_record):\n arrayFmt += \"h\"\n\n for record in range(numRecords):\n dataStartPoint = record * bytes_per_record + 8\n startRow = record * numChannels\n endRow = startRow + numChannels - 1\n the_row = np.array(struct.unpack(\n arrayFmt, my_data[dataStartPoint:dataStartPoint + int(data_points_per_record * 2)]))\n\n DataWave[startRow:endRow + 1, 0:numCols] = the_row.reshape(\n numCols, numChannels).T\n dataStartPoint += data_points_per_record * 2\n Flag[record] = struct.unpack(\">h\", my_data[dataStartPoint:dataStartPoint + 2])[0]\n next_floats = struct.unpack(\">ffffffff\", my_data[dataStartPoint + 2:dataStartPoint + 34])\n TimeWave[record] = next_floats[0]\n Res1[record] = next_floats[1]\n EventIndex[record] = next_floats[2]\n TimeDiv10000[record] = next_floats[3]\n TimeRemainder[record] = next_floats[4]\n Res5[record] = next_floats[5]\n Res6[record] = next_floats[6]\n next_doubles = struct.unpack(\">dd\", my_data[dataStartPoint + 34:dataStartPoint + 50])\n Res7[record] = next_doubles[0]\n Res8[record] = next_doubles[1]\n dataStartPoint += 50\n\n if num_spare_cols != 0:\n startRow = (2 * num_spare_cols) * record\n dataStartPoint += bytes_not_data_array - 4\n spareFmt = \">\"\n for i in range(num_spare_cols):\n spareFmt += \"f\"\n\n SpareDataArray[record] = np.array(\n struct.unpack(spareFmt, my_data[dataStartPoint:dataStartPoint+4*num_spare_cols]))\n\n UTCtime = TimeDiv10000 * 10000 + TimeRemainder\n diff_epoch_1904 = (\n datetime(1970, 1, 1) - datetime(1904, 1, 1)).total_seconds()\n UTCdatetime = np.array([\n datetime.utcfromtimestamp(x - diff_epoch_1904) for x in UTCtime])\n\n DateTimeWave = (dt - datetime(1904, 1, 1)).total_seconds() + TimeWave\n\n # Make an xarray dataset for SP2\n Flag = xr.DataArray(Flag, dims={'event_index': EventIndex})\n Res1 = xr.DataArray(Res1, dims={'event_index': EventIndex})\n Res5 = xr.DataArray(Res5, dims={'event_index': EventIndex})\n Res6 = xr.DataArray(Res6, dims={'event_index': EventIndex})\n Res7 = xr.DataArray(Res7, dims={'event_index': EventIndex})\n Res8 = xr.DataArray(Res8, dims={'event_index': EventIndex})\n Time = xr.DataArray(UTCdatetime, dims={'event_index': EventIndex})\n EventInd = xr.DataArray(EventIndex, dims={'event_index': EventIndex})\n DateTimeWaveUTC = xr.DataArray(UTCtime, dims={'event_index': EventIndex})\n DateTimeWave = xr.DataArray(DateTimeWave, dims={'event_index': EventIndex})\n TimeWave = xr.DataArray(TimeWave, dims={'event_index': EventIndex})\n my_ds = xr.Dataset({'time': Time, 'Flag': Flag, 'Res1': Res1, 'Res5': Res5,\n 'Res6': Res6, 'Res7': Res7, 'Res8': Res8, 'EventIndex': EventInd,\n 'DateTimeWaveUTC': DateTimeWaveUTC, 'TimeWave': TimeWave,\n 'DateTimeWave': DateTimeWave})\n\n for i in range(numChannels):\n temp_array = np.zeros((numRecords, numCols), dtype='int')\n for j in range(numRecords):\n k = i + j*numChannels\n temp_array[j] = DataWave[k]\n my_ds['Data_ch' + str(i)] = xr.DataArray(\n temp_array, dims={'event_index': EventIndex, 'columns': np.arange(0, 100, 1)})\n del my_data\n del DataWave\n return my_ds\n else:\n return None", "def load_data(path, rng, epoch, batch_size, x_,y_):\n #global x_,t_,y_,\n #global first_report2 \n #first_report2 = True\n start_time = time()\n v,p,skeleton_feature,l = load_gzip(path)\n v = v[:,:,:res_shape[2]]\n res_shape[0] = v.shape[0]\n v_new = empty(res_shape,dtype=\"uint8\")\n\n for i in xrange(v.shape[0]): #batch\n if p[i] < 10: p[i] = 100\n ofs = p[i]*ratio\n mid = v.shape[-1]/2.\n sli = None\n if ofs < mid:\n start = int(round(mid-ofs))\n end = int(round(mid+ofs))\n sli = slice(start,end)\n\n for j in xrange(v.shape[2]): #maps\n for k in xrange(v.shape[3]): #frames\n #body\n img = v[i,0,j,k]\n img = cut_img(img,5)\n img = misc.imresize(img,(h,h))\n # if j==0: img = 255-misc.imfilter(img,\"contour\")\n v_new[i,0,j,k] = img\n\n #hand\n img = v[i,1,j,k]\n img = img[sli,sli]\n img = misc.imresize(img,(h,h))\n v_new[i,1,j,k] = img\n\n vid, lbl = v_new,l\n\n #if epoch==0: print \"get in\",str(time()-start_time)[:3]+\"s\",\n # shuffle data\n ind = rng.permutation(l.shape[0])\n ind = ind[:batch_size]\n vid = vid[:,:,:,:4,:,:]\n vid, skeleton_feature, lbl = vid[ind].astype(floatX), skeleton_feature[ind].astype(floatX),lbl[ind].astype(floatX)\n #vid, skeleton_feature, lbl = vid.astype(floatX), skeleton_feature.astype(floatX),lbl.astype(floatX)\n\n # vid = vid/(255./(scaler*2.))-scaler\n #traj = traj/(255./(scaler_traj*2.))-scaler_traj\n # traj = traj/(255./5.)\n\n # Wudi already made labels start from 0\n #lbl -= 1 \n\n #if first_report2:\n # print \"data range:\",vid.min(),vid.max()\n # print \"traj range:\",skeleton_feature.min(),skeleton_feature.max()\n # print \"lbl range:\",lbl.min(),lbl.max()\n # first_report2 = False\n\n # set value\n x_.set_value(vid, borrow=True)\n #t_.set_value(skeleton_feature, borrow=True)\n y_.set_value(lbl, borrow=True)", "def convert_uint16_to_array(value):\n return [\n (value >> 0 & 0xFF),\n (value >> 8 & 0xFF)\n ]", "def getC3DFrameFeats(model, srcVideoPath, onGPU, gpu_id, depth, i):\n # get the VideoCapture object\n cap = cv2.VideoCapture(srcVideoPath)\n \n # if the videoCapture object is not opened then exit without traceback\n if not cap.isOpened():\n print(\"Error reading the video file !!\")\n return None\n \n W, H = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n totalFrames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n frameCount = 0\n features_current_file = []\n #ret, prev_frame = cap.read()\n assert cap.isOpened(), \"Capture object does not return a frame!\"\n #prev_frame = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)\n X = [] # input, initially a list, after first 16 frames converted to ndarray\n # Iterate over the entire video to get the optical flow features.\n while(cap.isOpened()):\n \n ret, curr_frame = cap.read() # H x W x C\n if not ret:\n break\n \n # resize to 180 X 320 and taking centre crop of 112 x 112\n curr_frame = cv2.resize(curr_frame, (W/2, H/2), cv2.INTER_AREA)\n (h, w) = curr_frame.shape[:2]\n # size is 112 x 112 x 3\n curr_frame = curr_frame[(h/2-56):(h/2+56), (w/2-56):(w/2+56), :]\n \n if frameCount < (depth-1): # append to list till first 16 frames\n X.append(curr_frame)\n else: # subsequent frames\n if type(X)==list: # For exactly first 16 frames, convert to np.ndarray \n X.append(curr_frame)\n X = np.stack(X)\n X = np.float32(X)\n X = torch.from_numpy(X)\n if onGPU:\n X = X.cuda(gpu_id)\n else: # sliding the window (taking 15 last frames and append next)\n # Adding a new dimension and concat on first axis\n curr_frame = np.float32(curr_frame)\n curr_frame = torch.from_numpy(curr_frame)\n if onGPU:\n curr_frame = curr_frame.cuda(gpu_id)\n #X = np.concatenate((X[1:], curr_frame[None, :]), axis=0)\n X = torch.cat([X[1:], curr_frame[None, :]])\n \n # TODO: Transpose once, and concat on first axis for subsequent frames\n # passing the matrix X to the C3D model\n # X is (depth, H, W, Ch)\n #input_mat = X.transpose(3, 0, 1, 2) # ch, depth, H, W\n input_mat = X.permute(3, 0, 1, 2) # transpose a 4D torch Tensor\n #input_mat = np.expand_dims(input_mat, axis=0)\n input_mat = input_mat.unsqueeze(0) # expand dims on Tensor\n #input_mat = np.float32(input_mat)\n \n # Convert to Variable\n #input_mat = torch.from_numpy(input_mat)\n input_mat = Variable(input_mat)\n \n # get the prediction after passing the input to the C3D model\n prediction = model(input_mat)\n # convert to numpy vector\n prediction = prediction.data.cpu().numpy()\n features_current_file.append(prediction)\n \n frameCount +=1\n if onGPU and (frameCount%1000)==0:\n print \"Video : {} :: Frame : {} / {}\".format((i+1), frameCount, totalFrames)\n\n # When everything done, release the capture\n cap.release()\n #return features_current_file\n return np.array(features_current_file) # convert to (N-depth+1) x 1 x 4096", "def prepare_chunk(\n self,\n file: AudioFile,\n chunk: Segment,\n duration: float = None,\n ) -> Tuple[np.ndarray, np.ndarray, List[Text]]:\n\n X, _ = self.model.audio.crop(\n file,\n chunk,\n mode=\"center\",\n fixed=self.duration if duration is None else duration,\n )\n\n introspection = self.model.introspection\n\n if self.is_multi_task:\n # this assumes that all tasks share the same model introspection.\n # this is a reasonable assumption for now.\n any_task = next(iter(introspection.keys()))\n num_frames, _ = introspection[any_task](X.shape[1])\n else:\n num_frames, _ = introspection(X.shape[1])\n\n annotation = file[\"annotation\"].crop(chunk)\n labels = annotation.labels() if self.chunk_labels is None else self.chunk_labels\n\n y = np.zeros((num_frames, len(labels)), dtype=np.int8)\n frames = SlidingWindow(\n start=chunk.start,\n duration=self.duration / num_frames,\n step=self.duration / num_frames,\n )\n for label in annotation.labels():\n try:\n k = labels.index(label)\n except ValueError:\n warnings.warn(\n f\"File {file['uri']} contains unexpected label '{label}'.\"\n )\n continue\n\n segments = annotation.label_timeline(label)\n for start, stop in frames.crop(segments, mode=\"center\", return_ranges=True):\n y[start:stop, k] += 1\n\n # handle corner case when the same label is active more than once\n y = np.minimum(y, 1, out=y)\n\n return X, y, labels", "def _create_chunks(opts, inputs, idx1, idx2):\n # idx2 = 75\n # idx1 = 71\n num_batch = idx2 - idx1\n # img1 = torch.zeros(num_batch, 1, 10, 224, 224)\n # img2 = torch.zeros(num_batch, 1, 10, 224, 224)\n # labels = torch.zeros(num_batch)\n\n feat1_list = []\n label_list = []\n for i in range(num_batch):\n curr_idx = i + idx1\n frames = range(curr_idx - 5, curr_idx + 5)\n temp1 = _load_chunk(opts, inputs, frames)\n feat1_list.append(temp1)\n\n temp_label = inputs[1][curr_idx, :].nonzero()\n if len(temp_label.size()) == 0:\n temp_label = 6\n else:\n if temp_label.size()[0] != 0:\n temp_label = temp_label[0][0]\n label_list.append(temp_label)\n\n feat1 = torch.cat(feat1_list, dim=0)\n labels = torch.LongTensor(label_list)\n return feat1, labels", "def _convert_to_multi_segment(self):\n\n self.header['nb_segment'] = [self.info['n_episodes']]\n\n # drop repeated signal headers\n self.header['signal_channels'] = \\\n self.header['signal_channels'].reshape(\n self.info['n_episodes'], -1)[0]\n\n # reshape signal memmap list\n new_sig_memmaps = []\n n_channels = len(self.header['signal_channels'])\n sig_memmaps = self._raw_signals[0]\n for first_index in np.arange(0, len(sig_memmaps), n_channels):\n new_sig_memmaps.append(\n sig_memmaps[first_index:first_index + n_channels])\n self._raw_signals = new_sig_memmaps\n\n self.logger.debug('New number of segments: {}'.format(\n self.info['n_episodes']))\n\n return", "def _load(self, filepath):\n import subprocess as sp\n command = ['ffmpeg',\n '-i', filepath,\n '-f', 's16le',\n '-acodec', 'pcm_s16le',\n '-ac', '1'] # channels: 2 for stereo, 1 for mono\n if self.sampling_rate != SAMPLING_RATE:\n command.extend(['-ar', str(self.sampling_rate)])\n command.append('-')\n # 30s at 44.1 kHz ~= 1.3e6\n proc = sp.run(command, stdout=sp.PIPE, bufsize=10**7, stderr=sp.DEVNULL, check=True)\n\n return np.fromstring(proc.stdout, dtype=\"int16\")", "def collate_frame_gru_fn(data):\n # Sort a data list by caption length\n if len(data[0]) == 10:\n if data[0][1] is not None:\n data.sort(key=lambda x: len(x[1]), reverse=True)\n videos, captions, cap_bows, idxs, cap_ids, video_ids, videos_target, video_ids_target, cap_tensor_target, cap_bow_target= zip(*data)\n\n # Merge videos (convert tuple of 1D tensor to 4D tensor)\n video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos]\n\n frame_vec_len = len(videos[0][0])\n vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len)\n videos_origin = torch.zeros(len(videos), frame_vec_len)\n vidoes_mask = torch.zeros(len(videos), max(video_lengths))\n for i, frames in enumerate(videos):\n end = video_lengths[i]\n vidoes[i, :end, :] = frames[:end,:]\n videos_origin[i,:] = torch.mean(frames,0)\n vidoes_mask[i,:end] = 1.0\n\n video_lengths_target = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos_target]\n\n frame_vec_len = len(videos_target[0][0])\n vidoes_target = torch.zeros(len(videos_target), max(video_lengths_target), frame_vec_len)\n videos_origin_target = torch.zeros(len(videos_target), frame_vec_len)\n vidoes_mask_target = torch.zeros(len(videos_target), max(video_lengths_target))\n for i, frames in enumerate(videos_target):\n end = video_lengths_target[i]\n vidoes_target[i, :end, :] = frames[:end,:]\n videos_origin_target[i,:] = torch.mean(frames,0)\n vidoes_mask_target[i,:end] = 1.0\n \n if captions[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [len(cap) for cap in captions]\n target = torch.zeros(len(captions), max(lengths)).long()\n words_mask = torch.zeros(len(captions), max(lengths))\n for i, cap in enumerate(captions):\n end = lengths[i]\n target[i, :end] = cap[:end]\n words_mask[i, :end] = 1.0\n else:\n target = None\n lengths = None\n words_mask = None\n\n if cap_tensor_target[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths_target = [len(cap) for cap in cap_tensor_target]\n target_target = torch.zeros(len(cap_tensor_target), max(lengths_target)).long()\n words_mask_target = torch.zeros(len(cap_tensor_target), max(lengths_target))\n for i, cap in enumerate(cap_tensor_target):\n end = lengths_target[i]\n target_target[i, :end] = cap[:end]\n words_mask_target[i, :end] = 1.0\n else:\n target_target = None\n lengths_target = None\n words_mask_target = None\n\n\n cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None\n\n cap_bow_target = torch.stack(cap_bow_target, 0) if cap_bow_target[0] is not None else None\n\n video_data = (vidoes, videos_origin, video_lengths, vidoes_mask)\n text_data = (target, cap_bows, lengths, words_mask)\n text_data_target = (target_target, cap_bow_target, lengths_target, words_mask_target)\n video_data_target = (vidoes_target, videos_origin_target, video_lengths_target, vidoes_mask_target)\n\n return video_data, text_data, idxs, cap_ids, video_ids, video_ids_target, video_data_target, text_data_target\n\n elif len(data[0]) == 14:\n if data[0][1] is not None:\n data.sort(key=lambda x: len(x[1]), reverse=True)\n videos, captions, cap_bows, idxs, cap_ids, video_ids, videos_target, video_ids_target, cap_tensor_target, cap_bow_target, videos_source2, video_ids_source2, cap_tensor_source2, cap_bow_source2= zip(*data)\n\n # Merge videos (convert tuple of 1D tensor to 4D tensor)\n video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos]\n\n frame_vec_len = len(videos[0][0])\n vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len)\n videos_origin = torch.zeros(len(videos), frame_vec_len)\n vidoes_mask = torch.zeros(len(videos), max(video_lengths))\n for i, frames in enumerate(videos):\n end = video_lengths[i]\n vidoes[i, :end, :] = frames[:end,:]\n videos_origin[i,:] = torch.mean(frames,0)\n vidoes_mask[i,:end] = 1.0\n\n video_lengths_target = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos_target]\n\n frame_vec_len = len(videos_target[0][0])\n vidoes_target = torch.zeros(len(videos_target), max(video_lengths_target), frame_vec_len)\n videos_origin_target = torch.zeros(len(videos_target), frame_vec_len)\n vidoes_mask_target = torch.zeros(len(videos_target), max(video_lengths_target))\n for i, frames in enumerate(videos_target):\n end = video_lengths_target[i]\n vidoes_target[i, :end, :] = frames[:end,:]\n videos_origin_target[i,:] = torch.mean(frames,0)\n vidoes_mask_target[i,:end] = 1.0\n\n video_lengths_source2 = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos_source2]\n\n frame_vec_len = len(videos_source2[0][0])\n vidoes_source2 = torch.zeros(len(videos_source2), max(video_lengths_source2), frame_vec_len)\n videos_origin_source2 = torch.zeros(len(videos_source2), frame_vec_len)\n vidoes_mask_source2 = torch.zeros(len(videos_source2), max(video_lengths_source2))\n for i, frames in enumerate(videos_source2):\n end = video_lengths_source2[i]\n vidoes_source2[i, :end, :] = frames[:end,:]\n videos_origin_source2[i,:] = torch.mean(frames,0)\n vidoes_mask_source2[i,:end] = 1.0\n\n if captions[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [len(cap) for cap in captions]\n target = torch.zeros(len(captions), max(lengths)).long()\n words_mask = torch.zeros(len(captions), max(lengths))\n for i, cap in enumerate(captions):\n end = lengths[i]\n target[i, :end] = cap[:end]\n words_mask[i, :end] = 1.0\n else:\n target = None\n lengths = None\n words_mask = None\n\n if cap_tensor_target[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths_target = [len(cap) for cap in cap_tensor_target]\n target_target = torch.zeros(len(cap_tensor_target), max(lengths_target)).long()\n words_mask_target = torch.zeros(len(cap_tensor_target), max(lengths_target))\n for i, cap in enumerate(cap_tensor_target):\n end = lengths_target[i]\n target_target[i, :end] = cap[:end]\n words_mask_target[i, :end] = 1.0\n else:\n target_target = None\n lengths_target = None\n words_mask_target = None\n\n if cap_tensor_source2[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths_source2 = [len(cap) for cap in cap_tensor_source2]\n target_source2 = torch.zeros(len(cap_tensor_source2), max(lengths_source2)).long()\n words_mask_source2 = torch.zeros(len(cap_tensor_source2), max(lengths_source2))\n for i, cap in enumerate(cap_tensor_source2):\n end = lengths_source2[i]\n target_source2[i, :end] = cap[:end]\n words_mask_source2[i, :end] = 1.0\n else:\n target_source2 = None\n lengths_source2 = None\n words_mask_source2 = None\n\n cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None\n cap_bow_target = torch.stack(cap_bow_target, 0) if cap_bow_target[0] is not None else None\n cap_bow_source2 = torch.stack(cap_bow_source2, 0) if cap_bow_source2[0] is not None else None\n\n video_data = (vidoes, videos_origin, video_lengths, vidoes_mask)\n text_data = (target, cap_bows, lengths, words_mask)\n text_data_target = (target_target, cap_bow_target, lengths_target, words_mask_target)\n video_data_target = (vidoes_target, videos_origin_target, video_lengths_target, vidoes_mask_target)\n text_data_source2 = (target_source2, cap_bow_source2, lengths_source2, words_mask_source2)\n video_data_source2 = (vidoes_source2, videos_origin_source2, video_lengths_source2, vidoes_mask_source2)\n\n\n return video_data, text_data, idxs, cap_ids, video_ids, video_ids_target, video_data_target, text_data_target, video_ids_source2, video_data_source2, text_data_source2\n\n\n else:\n if data[0][1] is not None:\n data.sort(key=lambda x: len(x[1]), reverse=True)\n videos, captions, cap_bows, idxs, cap_ids, video_ids = zip(*data)\n\n # Merge videos (convert tuple of 1D tensor to 4D tensor)\n video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos]\n frame_vec_len = len(videos[0][0])\n vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len)\n videos_origin = torch.zeros(len(videos), frame_vec_len)\n vidoes_mask = torch.zeros(len(videos), max(video_lengths))\n for i, frames in enumerate(videos):\n end = video_lengths[i]\n vidoes[i, :end, :] = frames[:end,:]\n videos_origin[i,:] = torch.mean(frames,0)\n vidoes_mask[i,:end] = 1.0\n \n if captions[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [len(cap) for cap in captions]\n target = torch.zeros(len(captions), max(lengths)).long()\n words_mask = torch.zeros(len(captions), max(lengths))\n for i, cap in enumerate(captions):\n end = lengths[i]\n target[i, :end] = cap[:end]\n words_mask[i, :end] = 1.0\n else:\n target = None\n lengths = None\n words_mask = None\n\n\n cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None\n\n video_data = (vidoes, videos_origin, video_lengths, vidoes_mask)\n text_data = (target, cap_bows, lengths, words_mask)\n\n return video_data, text_data, idxs, cap_ids, video_ids", "def split_and_upload(filepath,\n # upload parameters\n project_name=None, project_id=None, dataset_name=None, dataset_id=None, remote_path=None,\n # split parameters\n split_seconds=None, split_chunks=None, split_pairs=None,\n loglevel='panic'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n # https://www.ffmpeg.org/ffmpeg-formats.html#Examples-9\n\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n logger.info('Extracting video information...')\n # call to ffmpeg to get frame rate\n probe = Videos.get_info(filepath)\n fps = eval(probe['streams'][0]['avg_frame_rate'])\n n_frames = eval(probe['streams'][0]['nb_frames'])\n video_length = eval(probe['streams'][0]['duration'])\n logger.info('Video frame rate: {}[fps]'.format(fps))\n logger.info('Video number of frames: {}'.format(n_frames))\n logger.info('Video length in seconds: {}[s]'.format(video_length))\n\n # check split params and calc split params for ffmpeg\n if split_seconds is not None:\n # split by seconds\n split_length = split_seconds\n if split_length <= 0:\n raise ValueError('\"split_length\" can\\'t be 0')\n split_count = int(np.ceil(video_length / split_length))\n list_frames_to_split = [fps * split_length * n for n in range(1, split_count)]\n elif split_chunks is not None:\n # split for known number of chunks\n split_count = split_chunks\n if split_chunks <= 0:\n raise ValueError('\"split_chunks\" size can\\'t be 0')\n split_length = int(np.ceil(video_length / split_chunks))\n list_frames_to_split = [fps * split_length * n for n in range(1, split_count)]\n elif split_pairs is not None:\n if not isinstance(split_pairs, list):\n raise ValueError('\"split_times\" must be a list of tuples to split at.')\n if not (isinstance(split_pairs[0], list) or isinstance(split_pairs[0], tuple)):\n raise ValueError('\"split_times\" must be a list of tuples to split at.')\n list_frames_to_split = [fps * split_second for segment in split_pairs for split_second in segment]\n split_count = len(list_frames_to_split)\n else:\n raise ValueError('Must input one split option (\"split_chunks\", \"split_time\" or \"split_pairs\")')\n if split_count == 1:\n raise ValueError('Video length is less than the target split length.')\n # to integers\n list_frames_to_split = [int(i) for i in list_frames_to_split]\n # remove 0 if in the first segmetn\n if list_frames_to_split[0] == 0:\n list_frames_to_split.pop(0)\n # add last frames if not exists\n if list_frames_to_split[-1] != n_frames:\n list_frames_to_split = list_frames_to_split + [n_frames]\n logger.info('Splitting to %d chunks' % split_count)\n\n basename, ext = os.path.splitext(filepath)\n output_regex = os.path.join(basename, '%%03d.mp4')\n # create folder\n if not os.path.exists(basename):\n os.makedirs(basename, exist_ok=True)\n # run ffmpeg\n try:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_regex,\n **{'x264opts': 'bframes=0',\n 'f': 'segment',\n 'reset_timestamps': '1',\n 'map': '0',\n 'segment_frames': ','.join(\n [str(i) for i in\n list_frames_to_split])\n })\n ffmpeg.overwrite_output(stream).run(capture_stdout=True)\n except Exception:\n logger.exception('ffmpeg error in disassemble:')\n raise\n\n # split_cmd = 'ffmpeg -y -i \"%s\" -b 0 -f mp4 -reset_timestamps 1 -map 0 -f segment -segment_frames %s \"%s\"' % (\n # filepath, ','.join([str(int(i)) for i in list_frames_to_split]), output_regex)\n # logger.info('About to run: %s' % split_cmd)\n # subprocess.check_call(shlex.split(split_cmd), universal_newlines=True)\n\n # rename\n list_frames_to_split = [0] + list_frames_to_split\n filenames = list()\n for n in range(split_count):\n old_filename = output_regex.replace('%03d', '%03d' % n)\n new_filename = output_regex.replace('%03d', '%s__%s' %\n (time.strftime('%H_%M_%S', time.gmtime(list_frames_to_split[n] / fps)),\n time.strftime('%H_%M_%S',\n time.gmtime(list_frames_to_split[n + 1] / fps))))\n filenames.append(new_filename)\n # rename to informative name\n if os.path.isfile(new_filename):\n logger.warning('File already exists. Overwriting!: {}'.format(new_filename))\n os.remove(new_filename)\n os.rename(old_filename, new_filename)\n # check if in pairs, if not - delete\n if split_pairs is not None:\n start_frames = [pair[0] for pair in split_pairs]\n end_frames = [pair[1] for pair in split_pairs]\n if (list_frames_to_split[n] // fps) in start_frames and (\n list_frames_to_split[n + 1] // fps) in end_frames:\n # keep video\n pass\n else:\n os.remove(new_filename)\n Videos.upload_to_platform(project_name=project_name,\n project_id=project_id,\n dataset_name=dataset_name,\n dataset_id=dataset_id,\n remote_path=remote_path,\n local_path=basename)", "def make_chunks(self, audio_segment, chunk_length):\r\n\t\tnumber_of_chunks = math.ceil(len(audio_segment) / float(chunk_length))\r\n\t\treturn [audio_segment[i * chunk_length:(i + 1) * chunk_length]\r\n\t\t\t\tfor i in range(int(number_of_chunks))]", "def num_43():\n \n def block(a, r=3, cs=3, row_order=True):\n \"\"\"Block slice an array using a window of (rs, cs) size\n \"\"\"\n lenr = a.shape[0]//rs\n lenc = a.shape[1]//cs\n if row_order:\n iter = [(i, j) for (i, j) in np.ndindex(lenr, lenc)]\n else:\n iter = [(j, i) for (i, j) in np.ndindex(lenr, lenc)]\n b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] for (i,j) in iter])\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n return b\n r = 6\n c = 6\n a = np.arange(r*c).reshape(r, c)\n vs = np.array(np.vsplit(a, 2))\n hs = np.array(np.hsplit(a, 2))\n #a.squeeze(axis=(2,3))\n rs = 3\n cs = 4\n #lenr = a.shape[0]//rs\n #lenc = a.shape[1]//cs\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n #b1 = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (j, i) in np.ndindex(lenr, lenc)])\n e = block(a, 3, 4, row_first=False)\n b = block(a, rs, cs, True)\n b1 = block(a, rs, cs, False)\n c = np.array([np.vsplit(i, 2) for i in np.hsplit(a, 2)])\n d = np.array([np.hsplit(i, 2) for i in np.vsplit(a, 2)])\n #c = c.reshape(lenr*lenc, rs, cs) \n return a, b, b1, c, d, e", "def stats_from_data(data,reverse_data):\n start = -1\n end = -1\n for num, pix in enumerate(data):\n if (pix[0]+pix[1]+pix[2]) < (192*3):\n start = num\n break\n for num, pix in enumerate(reverse_data):\n if (pix[0]+pix[1]+pix[2]) < (192*3):\n end = len(reverse_data) - (num + 1)\n break\n totalr, totalg, totalb = 0,0,0\n minr, ming, minb = 255,255,255\n maxr, maxg, maxb = 0,0,0\n count = 0\n dark = True\n tcount = 0\n if start == -1 or end == -1 or start==(end-1):\n return (start,end,(end-start)+1, 0,0,0, 0,0,0, 0,0,0, 0)\n for pix in data[start:end+1]:\n totalr += pix[0]\n totalg += pix[1]\n totalb += pix[2]\n if pix[0]<minr: minr=pix[0]\n if pix[1]<ming: ming=pix[1]\n if pix[2]<minb: minb=pix[2]\n if pix[0]>maxr: maxr=pix[0]\n if pix[1]>maxg: maxg=pix[1]\n if pix[2]>maxb: maxb=pix[2]\n#Light to dark transitions will be those in which, following a value above\n#208, the value drops to 184 or below on any color channel.\n if (pix[0]+pix[1]+pix[2])<=(184*3) and not dark:\n dark = True\n tcount += 1\n elif (pix[0]+pix[1]+pix[2])>(208*3) and dark:\n dark = False\n count += 1\n meanr = int(round(float(totalr*10)/count))\n meang = int(round(float(totalg*10)/count))\n meanb = int(round(float(totalb*10)/count))\n return (start, end, (end-start)+1, \n tcount, \n minr,maxr,meanr, \n ming,maxg,meang, \n minb,maxb,meanb)", "def Motion_estimate_compute(data,block_size=16):\n\n nb_blocks = width//block_size*height//block_size\n nb_frames = data.size//frame_size\n frames = np.array(data).reshape(nb_frames,frame_size)\n symbols_stream = [DCT_compute(frames[0],offset=128)]\n print(symbols_stream[-1].shape)\n\n for frame_index in range(1,nb_frames-1,2):\n # I\n symbols_stream.append(DCT_compute(frames[frame_index+1],offset=128))\n print(symbols_stream[-1].shape)\n # P\n P_frame = Motion_estimate_compute_1frame(frames[frame_index-1],\n frames[frame_index+1],\n frames[frame_index],\n block_size=block_size)\n \n print(P_frame[-1].shape)\n symbols_stream.append(P_frame)\n\n # Extra I if there is an odd number of frames\n if nb_frames%2 == 0:\n symbols_stream.append(np.array([-1]))\n symbols_stream.append(DCT_compute(frames[-1],offset=128))\n print(symbols_stream[-1].shape)\n symbols_stream = np.concatenate(symbols_stream)\n \n print(symbols_stream[17870:17890])\n return symbols_stream", "def cut_all_data_and_labels_on_chunks(data: Data_dict_type, labels: Labels_dict_type,\n window_size: float, window_step: float) -> Tuple[\n Data_dict_type, Labels_dict_type]:\n for key, item in data.items():\n # extract data and sample rate of videofile\n data_array, sample_rate = item\n # calculate size of window in units (indexes)\n window_size_in_units = int(np.round(window_size * sample_rate))\n window_step_in_units = int(np.round(window_step * sample_rate))\n try:\n # try to cut data on chunks with defined window\n data_array = cut_data_on_chunks(data_array, window_size_in_units, window_step_in_units)\n data_array = np.concatenate([x[np.newaxis, ...] for x in data_array], axis=0)\n except AttributeError:\n # if size of window or step of window are more than length of data, takes full data as one window.\n data_array = data_array[np.newaxis, ...]\n data[key] = (data_array, sample_rate)\n # labels cutting, everything the same as with data cutting\n labels_dataframe = labels[key]\n try:\n labels_dataframe = cut_data_on_chunks(labels_dataframe.values, window_size_in_units, window_step_in_units)\n labels_dataframe = np.concatenate([x[np.newaxis, ...] for x in labels_dataframe], axis=0)\n except AttributeError:\n # labels now will be saved in np.ndarray format\n labels_dataframe = labels_dataframe.values[np.newaxis, ...]\n labels[key] = labels_dataframe\n return data, labels", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def parse_decoder_outputs(self, mel_outputs):\n mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()\n # decouple frames per step\n mel_outputs = mel_outputs.view(\n mel_outputs.size(0), -1, self.n_mel_channels)\n # (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)\n mel_outputs = mel_outputs.transpose(1, 2)\n\n return mel_outputs", "def match_chunks(*arrays):\n target = arrays[0].datashape\n result = []\n for a in arrays:\n ds = a.datashape\n for i, j in zip(reversed(list(range(a.ndim))),\n reversed(list(range(target.ndim)))):\n ds = change_axis_schema(ds, i, chunk=target.chunk_size[j],\n overlap=target.chunk_overlap[j])\n if a.datashape.schema != ds.schema:\n a = a.redimension(ds.schema)\n result.append(a)\n\n return tuple(result)", "def extract_data(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n# data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE * IMAGE_SIZE)\n return data", "def multimodal_encode_data_generator(data_root=\"MP4_download\", shuffle_data=False,\n txt_encoder_filename_prefix='text_encoder', txt_maxlen=25,\n max_video_frame_number=None, video_width=640, video_height=360):\n\n if not os.path.exists(txt_encoder_filename_prefix + \".tokens\"):\n print(\"Create text_encoder from txt\")\n text_list = get_text_list_from_raw_txt_file(data_root)\n text_encoder = tfds_text_encoder(text_list)\n else:\n print(\"TokenTextEncoder.load_from_file(txt_encoder_filename_prefix)\")\n text_encoder = tfds.features.text.TokenTextEncoder.load_from_file(txt_encoder_filename_prefix)\n\n def process_video(video_file_path, max_video_frame_number=None, video_width=640, video_height=360):\n videoCapture = cv2.VideoCapture(video_file_path)\n success, frame = videoCapture.read()\n frame_list = []\n frame_number = 0\n while success:\n if frame is None:\n break\n if isinstance(max_video_frame_number, int) and frame_number == max_video_frame_number:\n break\n image_np = frame\n resize_image_np = cv2.resize(image_np, dsize=(video_width, video_height))\n resize_image_np_expanded = np.expand_dims(resize_image_np, axis=0)\n frame_list.append(resize_image_np_expanded)\n frame_number += 1\n success, frame = videoCapture.read()\n encode_video = np.concatenate(frame_list, axis=0)\n return encode_video\n\n def process_image_data(label):\n encode_label = video_label_to_id[label]\n return encode_label\n\n def process_txt_data(txt_file_path, txt_maxlen=25):\n description_information_dict = get_description_information(txt_file_path)\n encode_txt = text_encoder.encode(description_information_dict['mp4_txt_brief'])\n encode_txt = keras.preprocessing.sequence.pad_sequences(\n [encode_txt], maxlen=txt_maxlen, dtype='int32', padding='post', truncating='post', value=0.0)\n return encode_txt[0]\n\n for mp4_file_path, jpeg_file_path, txt_file_path, label in multimodal_data_path_generator(data_root, shuffle_data):\n encode_video = process_video(mp4_file_path, max_video_frame_number, video_width, video_height)\n image_file_path = jpeg_file_path\n encode_label = process_image_data(label)\n encode_txt = process_txt_data(txt_file_path, txt_maxlen)\n yield encode_video, image_file_path, encode_txt, encode_label", "def reformat(dataset):\n x = dataset[:, 1] \n x = np.stack(x) # reshape to (n, mel bands, timesteps)\n x = np.expand_dims(np.moveaxis(x, 1, -1), axis=3) # reformat x to (n, timesteps, mel bands, 1) \n y = dataset[:, 2] \n y = np.moveaxis(np.stack(y), 1, -1) # reformat y to (n, timesteps, 8)\n return x, y", "def extract_vob(in_vob, guid):\n\t#Detect interlacing.\n\tmediainfo_command = \"mediainfo --Inform='Video;%ScanType%,%ScanOrder%' \" + in_vob\n\tprint(mediainfo_command)\n\tprocess = subprocess.Popen(mediainfo_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0:\n\t\traise Exception(\"Calling Mediainfo on {in_vob} failed with exit code {exit_code}.\".format(in_vob=in_vob, exit_code=exit_code))\n\tmediainfo_parts = cout.decode(\"utf-8\").split(\",\")\n\tis_interlaced = mediainfo_parts[0] == \"Interlaced\"\n\tfield_order = mediainfo_parts[1].lower().strip()\n\tprint(\"Interlace detection:\", is_interlaced, field_order, \"(\", mediainfo_parts, \")\")\n\n\tffmpeg_command = [\"ffmpeg\", \"-i\", in_vob]\n\tprint(ffmpeg_command)\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\tprocess.wait() #Ignore the exit code. It always fails.\n\tvobinfo = cerr.decode(\"utf-8\")\n\ttracks = []\n\tfor match in re.finditer(r\" Stream #0:(\\d+)\\[0x[0-9a-f]+\\]: (\\w+): ([^\\n]+)\", vobinfo):\n\t\ttrack_nr = match.group(1)\n\t\ttrack_type = match.group(2)\n\t\ttrack_codec = match.group(3)\n\t\tnew_track = track.Track()\n\t\tnew_track.from_vob(track_nr, track_type, track_codec, is_interlaced, field_order)\n\t\tnew_track.file_name = guid + \"-T\" + str(new_track.track_nr) + \".\" + new_track.codec\n\t\tif new_track.type != \"unknown\":\n\t\t\ttracks.append(new_track)\n\n\t#Generate the parameters to pass to ffmpeg.\n\ttrack_params = [\"-i\", in_vob]\n\tfor track_metadata in tracks:\n\t\ttrack_params.append(\"-map\")\n\t\ttrack_params.append(\"0:\" + str(track_metadata.track_nr))\n\t\ttrack_params.append(\"-c\")\n\t\ttrack_params.append(\"copy\")\n\t\ttrack_params.append(track_metadata.file_name)\n\n\t#Extract all tracks.\n\tprint(\"---- Extracting tracks...\")\n\tffmpeg(*track_params)\n\n\treturn tracks", "def divide_with_stride(arr: np.ndarray) -> List[np.ndarray]:\n\n result_list: List[np.ndarray] = []\n # slice by z axis\n for z in range(0, z_len := arr.shape[0], 16):\n if z + 31 >= z_len:\n z = z_len - 16\n z_arr: np.ndarray = arr[z:z+16]\n\n # slice by y axis\n for y in range(0, y_len := arr.shape[1], 16):\n y_arr: np.ndarray = z_arr[:, y:y+16]\n\n # slice by x axis\n for x in range(0, x_len := arr.shape[2], 16):\n x_arr: np.ndarray = y_arr[:, :, x:x+16]\n if len(set(x_arr.shape)) == 1 and x_arr.shape[0] == 16:\n result_list.append(x_arr)\n \n return result_list", "def get_chunks(sequence, ck_size):\n \n list_chunk = []\n i=1\n l = len(sequence)\n if l < 4*ck_size:\n raise ValueError(\"Chunk size should be of 4 at least \")\n for i in range(1, l):\n if i*ck_size < l:\n list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #while(i*ck_size < l):\n #list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #i += 1\n return list_chunk", "def _read_adsc_chunk(self, chunk):\n try:\n (a_size, self._nchannels, ansamples, acquifreq,\n sampwidth, highest, lowest, zero,\n reccode, recver) = unpack(\n '<L' # a_size 4\n 'H' # _nchannels 2 (nch)\n 'L' # ansamples 4\n 'L' # acquifreq 4\n 'H' # sampwidth 2 (bps)\n 'i' # highest 4\n 'i' # lowest 4\n 'i' # zero 4\n 'H' # reccode 2\n 'H', # recver 2\n chunk.read(32)\n )\n except struct.error:\n raise EOFError from None\n self._sampwidth = (sampwidth + 7) // 8\n if not self._sampwidth:\n raise Error('bad sample width')\n if not self._nchannels:\n raise Error('bad # of channels')\n self._framesize = self._nchannels * self._sampwidth\n self._comptype = 'NONE'\n self._compname = 'not compressed'", "def convertFromBytes(byteStr: bytes, sampleWidth: int) -> Tuple[int, ...]:\n byteCode = sampleWidthDict[sampleWidth]\n actualNumFrames = int(len(byteStr) / float(sampleWidth))\n audioFrameList = struct.unpack(\"<\" + byteCode * actualNumFrames, byteStr)\n\n return audioFrameList", "def broadcast_offset_param(data):\n return np.array([[data[int(i / 16)][(j + i * 32) % 128]\n for j in range(32)] \n for i in range(32)])", "def modulate(data):\n\n wave = ''\n levels = ('\\x00', '\\x55', '\\xaa', '\\xff')\n \n for frame in data:\n next_num = frame\n for grp in range(4):\n wave += levels[next_num % 4]\n next_num /= 4\n\n return wave", "def BCH_decode(self,encoded_parts,parameter,correcting_capability):\n code = komm.BCHCode(parameter,correcting_capability)\n decoded_parts = []\n for i in range (0, len(encoded_parts)):\n decoded_part = code.decode(encoded_parts[i])\n decoded_parts.append(decoded_part)\n \n decoded_parts = np.array(decoded_parts)\n decoded_parts = np.concatenate(decoded_parts)\n if(len(self.image_bits)%code.dimension != 0):\n for i in range(0,self.calculate_zeros_addition_BCH(parameter,correcting_capability)):\n decoded_parts = np.delete(decoded_parts,len(decoded_parts)-1)\n \n return decoded_parts", "def num_44():\n def block_array(a, rows=3, cols=4, col_first=True, nodata=-1):\n \"\"\" a variant on array_split\n requires a N*m array\n \"\"\"\n s = np.array(a.shape)\n w = np.array([rows, cols])\n m = divmod(s, w)\n new_shape = w*m[0] + w*(m[1]!=0)\n ypad, xpad = new_shape - a.shape \n b = np.pad(a, pad_width=((0, ypad),(0, xpad)), \n mode='constant', \n constant_values=((nodata, nodata),(nodata, nodata)))\n rn, cn = new_shape\n x_s = np.arange(0, cn+cols, cols)[1:] #.tolist()\n y_s = np.arange(0, rn+rows, rows)[1:] #.tolist()\n print(\"x_s {}\\ny_s {}\".format(x_s, y_s))\n #c = np.array([i for i in np.hsplit(b, x_s) if len(i) > 0])\n c = np.array([i for i in np.split(b, x_s, axis=1) if len(i) > 0])\n d = np.array([i for i in np.split(c, y_s, axis=1) if len(i) > 0])\n e = d.swapaxes(0, 1)\n ix = np.in1d(e.ravel(), nodata).reshape(e.shape)\n f = np.ma.array(e, mask=ix, fill_value=-1)\n return b, c, d, e, f\n y, x = 9, 11\n a = np.arange(x*y).reshape(y,x)\n b, c, d, e, f = block_array(a)\n print(\"\\n{}\".format(num_44.__doc__)) \n for i in [a, b, c, d, e, f]:\n _f(i)\n return a, b, c, d, e, f", "def recv_chunk(self, data):", "def get_signal_gwgds1072au(a_signal_packed: bytes, a_scale : float ) -> list:\n the_return = None\n the_signal_packed=a_signal_packed\n the_scale=a_scale\n the_signal_sequence=[]\n the_signal=0.0 #TODO reminder check this before allowing it\n the_info=[]\n n=4\n bla=0\n blb=bla+n\n print(the_signal_packed)\n JX=unpack('>%sh' % 2 ,the_signal_packed[bla:blb])\n for ii in range(0,2003):\n the_info.append(unpack('>%sh' % 2 ,the_signal_packed[bla:blb])[0])\n bla=bla+n\n blb=blb+n\n #TODO get the potential scale\n #TODO get the offset\n #TODO get the time scale\n\n return the_info", "def segment(data):", "def test_split_data(self):\n Xlists = tuple([[np.zeros((200,9)) for b in range(14)] for c in range(9)])\n ybinarylists = [np.zeros((14,12)) for c in range(9)]\n indices = slice(7, 9)\n x_test, y_test = tutorial_pamap2.split_data(Xlists, ybinarylists, \\\n indices)\n test = y_test[0].shape == (12,) and x_test[0].shape == (200, 9)\n assert test", "def images16(self, first, last, shape, validfirst, validlast):\n size = shape[0] * shape[1] * (1 + last - first)\n array = np.ascontiguousarray(np.zeros(size, dtype=np.int16))\n self.lib.GetImages16(ct.c_long(first), ct.c_long(last),\n array.ctypes.data_as(ct.POINTER(ct.c_int16)),\n ct.c_ulong(size),\n ct.pointer(ct.c_long(validfirst)),\n ct.pointer(ct.c_long(validlast)))\n\n return array.reshape(-1, shape[0], shape[1])", "def get_beat_sync_chroma_and_spectrum(audio, sr=None, bpm=None):\n if not isinstance(audio, np.ndarray):\n sr = 44100\n y = std.MonoLoader(filename=audio, samplerate=44100)()\n else:\n y = audio\n eql_y = std.EqualLoudness()(y)\n tempo, framed_dbn = self_tempo_estimation(y, sr, tempo=bpm)\n if framed_dbn.shape[0] % 4 == 0:\n framed_dbn = np.append(framed_dbn, np.array(len(y)/sr))\n band1 = (0, 220)\n band2 = (220, 1760)\n band3 = (1760, sr / 2)\n band1list = []\n band2list = []\n band3list = []\n chromas = []\n for i in range(1, len(framed_dbn)):\n fft_eq = abs(np.fft.fft(eql_y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n freqs = np.fft.fftfreq(len(fft_eq), 1 / sr)\n band1list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band1[0], freqs < band1[1]))]**2))))\n band2list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band2[0], freqs < band2[1]))]**2))))\n band3list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band3[0], freqs < band3[1]))]**2))))\n stft = abs(core.stft(y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n chroma = np.mean(feature.chroma_stft(y=None, S=stft ** 2), axis=1)\n chromas.append(chroma)\n chromas = np.array(chromas).transpose()\n band1list = np.array(band1list).transpose()\n band2list = np.array(band2list).transpose()\n band3list = np.array(band3list).transpose()\n return (chromas, np.vstack([band1list, band2list, band3list]))", "def preprocess(self, data):\n (w,h,f) = self.rawinputformat()\n dt = numpy.dtype(numpy.uint8)\n nb = numpy.frombuffer(data,dt,-1,0)\n actual_stream_width = (w&1)+w # hack, rather get this from the app sink\n if(actual_stream_width != self.reqsize):\n nb = nb.reshape(h,actual_stream_width,3)\n nb = nb[0:h,0:w,0:3] # crop to network input size\n else:\n nb = nb.reshape((actual_stream_width,actual_stream_width,3))\n img = nb.astype('float32')\n #Preprocess image\n #for i in range(3):\n # img[:,:,i] = (img[:,:,i] - self.mean[i]) * self.std[i]\n #img = resize(img/255.0,(w,h),1)\n img = img/255.0\n print(img.shape)\n #print(img[0,0,:])\n return img.astype(numpy.float16)" ]
[ "0.6967046", "0.61986065", "0.6189992", "0.61811805", "0.6017552", "0.60136306", "0.59860003", "0.5980767", "0.58180374", "0.5771538", "0.5734991", "0.5357844", "0.53522146", "0.53145593", "0.52839905", "0.52355176", "0.5201002", "0.5169548", "0.51465833", "0.5143027", "0.5133002", "0.5122566", "0.5120537", "0.5119742", "0.51016587", "0.5089576", "0.5088515", "0.50796115", "0.50706935", "0.50619346", "0.5030261", "0.5006572", "0.49920025", "0.49897307", "0.49808773", "0.4979582", "0.49608722", "0.49547094", "0.4945711", "0.492646", "0.49230003", "0.492056", "0.49050325", "0.49017027", "0.48770514", "0.48661903", "0.48507974", "0.48495483", "0.4847109", "0.48439074", "0.4827938", "0.48204294", "0.48139858", "0.48073152", "0.47975194", "0.47958487", "0.4793128", "0.47825608", "0.47772178", "0.47718418", "0.4754642", "0.4750353", "0.47372094", "0.47357064", "0.4731322", "0.47273743", "0.4722708", "0.4721016", "0.47093922", "0.47066846", "0.4698875", "0.46963775", "0.4694109", "0.46929583", "0.4681099", "0.4679503", "0.46728", "0.4664424", "0.4663927", "0.4658792", "0.4657783", "0.46563065", "0.46527782", "0.4652146", "0.4650875", "0.46508077", "0.46481523", "0.46481466", "0.46454555", "0.4640538", "0.46391714", "0.46390164", "0.46376395", "0.46353027", "0.4634678", "0.46323377", "0.4630429", "0.46243998", "0.4616323", "0.46155223" ]
0.6670928
1
Get item from self.human_data.
def prepare_raw_data(self, idx: int): info = super().prepare_raw_data(idx) if self.cache_reader is not None: self.human_data = self.cache_reader.get_item(idx) idx = idx % self.cache_reader.slice_size if 'smplx' in self.human_data: smplx_dict = self.human_data['smplx'] info['has_smplx'] = 1 else: smplx_dict = {} info['has_smplx'] = 0 if 'global_orient' in smplx_dict: info['smplx_global_orient'] = smplx_dict['global_orient'][idx] info['has_smplx_global_orient'] = 1 else: info['smplx_global_orient'] = np.zeros((3), dtype=np.float32) info['has_smplx_global_orient'] = 0 if 'body_pose' in smplx_dict: info['smplx_body_pose'] = smplx_dict['body_pose'][idx] info['has_smplx_body_pose'] = 1 else: info['smplx_body_pose'] = np.zeros((21, 3), dtype=np.float32) info['has_smplx_body_pose'] = 0 if 'right_hand_pose' in smplx_dict: info['smplx_right_hand_pose'] = smplx_dict['right_hand_pose'][idx] info['has_smplx_right_hand_pose'] = 1 else: info['smplx_right_hand_pose'] = np.zeros((15, 3), dtype=np.float32) info['has_smplx_right_hand_pose'] = 0 if 'left_hand_pose' in smplx_dict: info['smplx_left_hand_pose'] = smplx_dict['left_hand_pose'][idx] info['has_smplx_left_hand_pose'] = 1 else: info['smplx_left_hand_pose'] = np.zeros((15, 3), dtype=np.float32) info['has_smplx_left_hand_pose'] = 0 if 'jaw_pose' in smplx_dict: info['smplx_jaw_pose'] = smplx_dict['jaw_pose'][idx] info['has_smplx_jaw_pose'] = 1 else: info['smplx_jaw_pose'] = np.zeros((3), dtype=np.float32) info['has_smplx_jaw_pose'] = 0 if 'betas' in smplx_dict: info['smplx_betas'] = smplx_dict['betas'][idx] info['has_smplx_betas'] = 1 else: info['smplx_betas'] = np.zeros((self.num_betas), dtype=np.float32) info['has_smplx_betas'] = 0 if 'expression' in smplx_dict: info['smplx_expression'] = smplx_dict['expression'][idx] info['has_smplx_expression'] = 1 else: info['smplx_expression'] = np.zeros((self.num_expression), dtype=np.float32) info['has_smplx_expression'] = 0 return info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_item(self):\n return self.item", "def get_item(self):\n return self.item", "def __getitem__(self, item):\n return self._data[item]", "def __getitem__(self, item):\n return self.data[item]", "def __getitem__(self, item):\n return self.data[item]", "def __getitem__(self, item):\n return self.data[item]", "def getItem(self):\n return self.getItem(0)", "def __getitem__(self, item):\n return self._state[\"data\"].get(item, None)", "def __getitem__(self, item):\n return self._state[\"data\"].get(item, None)", "def __getitem__(self, item):\n return self._state[\"data\"].get(item, None)", "def GetItem(self):\r\n \r\n return self._item", "def __getitem__(self, item):\n return self._metadata[item]", "def __getitem__(self, item):\r\n return self._state[\"data\"].get(item, None)", "def __getitem__(self, item):\r\n return self._state[\"data\"].get(item, None)", "def __getitem__(self, item):\r\n\r\n return self.data.__getitem__(item)", "def get_item(self):\n raise NotImplementedError", "def __getitem__(self, item):\n return self.hdus[item]", "def __getattr__(self, item):\n return self._state[\"data\"].get(item, None)", "def __getattr__(self, item):\n return self._state[\"data\"].get(item, None)", "def __getattr__(self, item):\n return self._state[\"data\"].get(item, None)", "def __getitem__(self, name):\n return self._items[name.lower()][1]", "def __getitem__(self, index):\n item = self.data[index]\n return item", "def __getitem__(self, item):\n return self.get_data(stock=item)", "def __getitem__(self, item):\n return self.__dict__[item]", "def __getattr__(self, item):\r\n return self._state[\"data\"].get(item, None)", "def __getattr__(self, item):\r\n return self._state[\"data\"].get(item, None)", "def __getitem__(self, item):\n return self._recordings[item]", "def get_item_from_label(self, label):\n idx = self.labels.index(label)\n item = self[idx][0]\n return item", "def get(self, item, default=None):\n return self._data.get(item, default)", "def __getitem__(self, item):\n return self.default_dataset[item]", "def get(self, item):\r\n raise NotImplementedError", "def get(self, item, default=None):\n\n return self._data.get(item, default)", "def __getitem__(self, item):\n return self.elements[item]", "def get(self, item):\n return self._attributes[item]", "def get_item(self, call_number):\n return self.item_list.get(call_number)", "def __getitem__(self, item):\n return self.row[item]", "def get_human(root, _info, id):\n return human_data.get(id)", "def __getitem__(self, key):\n return self.data[key]", "def __getitem__(self, key):\n return self.data[key]", "def __getitem__(self, key):\n return self.data[key]", "def get_item(self, index: int) -> _T:\n return self.index_to_item[index]", "def __getitem__(self, name):\n if name in self.data: return self.data[name]", "def __getitem__(self, item):\n return getattr(self, item)", "def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError", "def __getitem__(self, key):\n return self.__data[key]", "def __getitem__(self, key):\r\n return self.data[key]", "def __getitem__(self, idx):\n return self.items[idx]", "def get(self, item_name):\n if isinstance(item_name, BaseItem):\n return item_name\n return self.all_items.get(item_name)", "def __getitem__(self, item):\n return self.fields[item]", "def __getitem__(self, index):\n return self.data[index]", "def __getitem__(self, index):\n return self.data[index]", "def __getitem__(self, index):\r\n return self._items[index]", "def __getattr__(self, item):\n return self.__dict__[item] if item in self.__dict__ else self.data.get(item)", "def __getitem__(self, key):\n return self.data[key]\n # pass", "def __getitem__(self, i):\n return self.data[i]", "def __getitem__(self, index):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # ask my tile do the rest\n value = self.data[self.tile.offset(index)]\n # otherwise\n else:\n # retrieve the item directly from my container\n value = self.data[index]\n # all done\n return value", "def get_data_item(self):\n raise exceptions.NotImplemented", "def __getitem__(self, item):\n u, v = item\n return self.__getitem(u, v)", "def __getitem__(self, item):\n return self._object_names[item]", "def __getitem__(self, item):\n return self.get(sighash=item)", "def __getitem__(self, where):\n return self._data[where]", "def __getitem__(self,i):\n return self._items[i]", "def get_item(self, name: str) -> Optional[Item]:\n item = self.filter_items(name, limit=1)\n return item[0] if item else None", "def __getitem__(self, item) -> SurveyRow:\n return self.rows[item]", "def get(self, item):\n return self.graph._get(self.parent.handle,\n **{self.type: item})", "def get_item(self, index):\n if index == 0:\n raise IndexError(\"<{0}> Index start as 1\".format(type(self).__name__))\n index = self.get_index(index)\n res = self.get_item_type()()\n self.get_Item(index, res)\n return res", "def __getitem__(self, key):\n return self.data.get(key, '')", "def __getitem__(self, idx):\n return self._data[idx]", "def __getitem__(self, item):\n return self.top[item]", "def get_item(self, key):\n search_slot = self.count_hash(key, len(self.slots))\n\n if self.slots[search_slot] == key:\n data = self.data[search_slot]\n elif isinstance(self.slots[search_slot], tuple):\n index_tuple = (self.slots[search_slot].index(key))\n data = (self.data[search_slot][index_tuple])\n else:\n data = None\n\n return data", "def read(self):\n\n return self.__items__[0]", "def __getitem__(self, item):\n if not hasattr(self, 'hdu_list'):\n self.update_hdu_list()\n\n ext, ver, ver_sent = self._extract_item(item)\n\n try:\n # if it is an int\n hdu = self.hdu_list[ext]\n except Exception:\n # might be a string\n ext = mks(ext)\n if not self.case_sensitive:\n mess = '(case insensitive)'\n ext = ext.lower()\n else:\n mess = '(case sensitive)'\n\n if ver > 0:\n key = '%s-%s' % (ext, ver)\n if key not in self.hdu_map:\n raise IOError(\"extension not found: %s, \"\n \"version %s %s\" % (ext, ver, mess))\n hdu = self.hdu_map[key]\n else:\n if ext not in self.hdu_map:\n raise IOError(\"extension not found: %s %s\" % (ext, mess))\n hdu = self.hdu_map[ext]\n\n return hdu", "def __getitem__(self, item):\n return self.cube[item]", "def __getitem__(self, item):\r\n return item.get_value(borrow=True)", "def __getitem__(self, name):\n return self.entry[name]", "def __getitem__(self, item):\n result = self.get(item)\n if not result:\n raise KeyError(item)\n else:\n return result", "def __getitem__(self, item):\n if isinstance(item, str):\n item = [i for i, v in enumerate(self.list) if item == v.name]\n if len(item) > 0:\n item = item[0]\n return self.list[item]", "def __getitem__(self, index):\n\t\treturn self.data[index]", "def GetItem(self,index):\r\n return self.itemId_item[self.gList.GetItemData(index)]", "def __getitem__(self, position):\n\n return self.data[position]", "def __getitem__(self, item):\n try:\n return self._values[item]\n except KeyError:\n raise FactInvalidIndex(str(item))", "def __getitem__(self, item):\n return foreign_get(self.vars, item)", "def __getitem__(self, x):\n return self.data[self.name][x]", "def __getitem__(self, name):\n return self.get(name)", "def getItem(self) -> Optional[items.Item]:\n return None if self.__itemRef is None else self.__itemRef()", "def __getitem__(self, v):\r\n return self.unif.get(v, (v, None))[0]", "def get_at_index(self, index: int) -> object:\n return self.data[index]", "def __getitem__(self, index):\n return self.dataset[index]", "def __getitem__(self, key):\n return self._user_data.get(key)", "def getItem(self, column, position):\n return self.data[column, position]", "def __getitem__(self, item):\n return self.children[item]", "def __getitem__(self, key):\r\n return self.items[bytes(key)]", "def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)", "def __getitem__(self, item):\n return self.notes[item]", "def __getattr__(self, item):\n return getattr(self.desc, item)", "def __getitem__(self, item):\n # type (Any) -> Any\n value = self.data[item]\n if isinstance(value, pa.ChunkedArray):\n return type(self)(value)\n else:\n return value", "def __getitem__(self, item) -> Union[MoleculeDatapoint, List[MoleculeDatapoint]]:\n if self.preload:\n return self.data_ram[item]\n else:\n return self.data[item]", "def __getattr__(self, item):\n return getattr(self._get_storage(), item)", "def __getitem__(self, key):\n return self._get(key)", "def get_item(self, item_type):\n if item_type not in self._internal_type_mapping:\n return None\n else:\n return self._internal_type_mapping[item_type]", "def __getitem__(self, key):\n return self.get(key)" ]
[ "0.7406864", "0.7406864", "0.73144776", "0.7309078", "0.7309078", "0.7309078", "0.7219982", "0.71420383", "0.71420383", "0.71420383", "0.70934916", "0.70712644", "0.7018385", "0.7018385", "0.70036954", "0.70014024", "0.7000123", "0.6842417", "0.6842417", "0.6842417", "0.67811406", "0.6779614", "0.6776245", "0.67547727", "0.67446536", "0.67446536", "0.66754264", "0.6662596", "0.6649086", "0.66304904", "0.6584274", "0.6559228", "0.6552894", "0.6524472", "0.6520897", "0.6518981", "0.6496489", "0.6469004", "0.6469004", "0.6469004", "0.6451205", "0.64141136", "0.6398231", "0.63836324", "0.6379117", "0.63711023", "0.6370926", "0.6369705", "0.63630897", "0.6360366", "0.6360366", "0.6357872", "0.63194215", "0.63001746", "0.62951314", "0.6291649", "0.62872714", "0.6276656", "0.6248052", "0.6247183", "0.62435514", "0.6240407", "0.6231174", "0.6225249", "0.6219874", "0.6219135", "0.62083185", "0.6205918", "0.6195078", "0.61920387", "0.61854357", "0.6171022", "0.61623", "0.61454135", "0.6143507", "0.6143408", "0.61355597", "0.6126249", "0.6124652", "0.6105049", "0.6080463", "0.607544", "0.607299", "0.6044679", "0.6042689", "0.6040721", "0.6040526", "0.6039687", "0.60377884", "0.60367644", "0.60302716", "0.60248464", "0.6023421", "0.60214156", "0.60036343", "0.6000597", "0.60005647", "0.59971577", "0.5993775", "0.59926826", "0.59896606" ]
0.0
-1
compute the 3DRMSE between a predicted 3D face shape and the 3D ground truth scan.
def _report_3d_rmse(self, res_file): pred_vertices, gt_vertices, _ = self._parse_result( res_file, mode='vertice') pred_keypoints3d, gt_keypoints3d, _ = self._parse_result( res_file, mode='keypoint') errors = [] for pred_vertice, gt_vertice, pred_points, gt_points in zip( pred_vertices, gt_vertices, pred_keypoints3d, gt_keypoints3d): error = fg_vertices_to_mesh_distance(gt_vertice, gt_points, pred_vertice, self.body_model.faces, pred_points) errors.append(error) error = np.array(errors).mean() name_value_tuples = [('3DRMSE', error)] return name_value_tuples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_3d(self, box_point_3d, instance):\n azimuth_error, polar_error = self.evaluate_viewpoint(box_point_3d, instance)\n iou = self.evaluate_iou(box_point_3d, instance)\n return azimuth_error, polar_error, iou", "def get_3d_points(preds_3d):\n for i,p in enumerate(preds_3d):\n preds_3d[i] = preds_3d[i] - preds_3d[i].mean(0)*np.ones((16,1));\n return preds_3d;", "def calc_RMSE(pred, gt, mask, percentage=False, model='', index=None):\n if model[:6] == 'conv3d':\n #true_mask[:, :, 1:-1] = mask[:, :, 1:-1]\n mask[:, :, 0] = 0\n mask[:, :, -1] = 0\n if index is not None:\n index = index - 1\n\n if index is not None:\n pred = pred[:, :, index:index+1]\n gt = gt[:, :, index:index+1]\n mask = mask[:, :, index:index+1]\n\n pred = pred[mask > 0]\n gt = gt[mask > 0]\n\n if percentage:\n return np.sqrt((((pred - gt) / (gt + 1e-7)) ** 2).mean())\n else:\n return np.sqrt(((pred - gt) ** 2).mean())", "def calculate_potential_3D(true_csd, ele_xx, ele_yy, ele_zz, \n csd_x, csd_y, csd_z):\n xlin = csd_x[:,0,0]\n ylin = csd_y[0,:,0]\n zlin = csd_z[0,0,:]\n xlims = [xlin[0], xlin[-1]]\n ylims = [ylin[0], ylin[-1]]\n zlims = [zlin[0], zlin[-1]]\n sigma = 1.0\n pots = np.zeros(len(ele_xx))\n tic = time.time()\n for ii in range(len(ele_xx)):\n pots[ii] = integrate_3D(ele_xx[ii], ele_yy[ii], ele_zz[ii],\n xlims, ylims, zlims, true_csd, \n xlin, ylin, zlin, \n csd_x, csd_y, csd_z)\n print 'Electrode:', ii\n pots /= 4*np.pi*sigma\n toc = time.time() - tic\n print toc, 'Total time taken - series, sims'\n return pots", "def test_unet_3d(self):\n for model_class in [UNet3D, UNetPlus3D]:\n b, d, h, w = 4, 8, 64, 64\n in_channel, out_channel = 1, 3\n x = torch.rand(b, in_channel, d, h, w)\n model = model_class(block_type='residual', in_channel=in_channel,\n out_channel=out_channel, pooling=True)\n out = model(x)\n self.assertTupleEqual(tuple(out.shape), (b, out_channel, d, h, w))\n\n b, d, h, w = 4, 9, 65, 65\n in_channel, out_channel = 1, 2\n x = torch.rand(b, in_channel, d, h, w)\n model = model_class(block_type='residual_se', in_channel=in_channel,\n out_channel=out_channel, pooling=False)\n out = model(x)\n self.assertTupleEqual(tuple(out.shape), (b, out_channel, d, h, w))\n\n b, d, h, w = 1, 65, 65, 65\n in_channel, out_channel = 1, 2\n x = torch.rand(b, in_channel, d, h, w)\n model = model_class(block_type='residual_se', in_channel=in_channel,\n out_channel=out_channel, pooling=False, is_isotropic=True)\n out = model(x)\n self.assertTupleEqual(tuple(out.shape), (b, out_channel, d, h, w))", "def val_real_eigen_dataset(self):\n self.set_eval()\n\n pred_disps = []\n\n print('Start running monoDEVSNet model on KITTI-Eigen validation set : ', end=' ')\n for batch_idx, inputs in tqdm(enumerate(self.real_eigen_val_loader, 0)):\n\n # Move all available tensors to GPU memory\n for key, ipt in inputs.items():\n if type(key) == tuple or key == \"depth_gt\":\n inputs[key] = ipt.to(self.device)\n\n outputs, losses = {}, {}\n with torch.no_grad():\n features, raw_hrnet_features = self.models[\"encoder\"](inputs[\"color_aug\", 0, 0])\n outputs = self.models[\"depth_decoder\"](features)\n pred_disp, _ = disp_to_depth(outputs[(\"disp\", 0)], self.opt.min_depth, self.opt.max_depth)\n pred_disp = pred_disp.cpu()[:, 0].numpy()\n pred_disps.append(pred_disp)\n\n pred_disps = np.concatenate(pred_disps)\n\n errors_rsf, errors_asf = [], []\n ratios = []\n pred_depths_copy, gt_depths_copy = [], []\n for i in range(pred_disps.shape[0]):\n\n gt_depth = self.gt_depths[i]\n gt_height, gt_width = gt_depth.shape[:2]\n\n pred_disp = pred_disps[i]\n pred_disp = cv2.resize(pred_disp, (gt_width, gt_height))\n pred_depth = 1 / pred_disp\n\n if self.opt.eval_split == \"eigen\":\n mask = np.logical_and(gt_depth > self.opt.min_depth, gt_depth < self.opt.max_depth)\n\n crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,\n 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)\n crop_mask = np.zeros(mask.shape)\n crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1\n mask = np.logical_and(mask, crop_mask)\n else:\n mask = gt_depth > 0\n\n pred_depth_asf = pred_depth.copy() * self.opt.syn_scaling_factor\n ratio = np.median(gt_depth[mask]) / np.median(pred_depth[mask])\n ratios.append(ratio)\n pred_depth_rsf = pred_depth.copy() * ratio\n\n # Create a copy\n pred_depths_copy.append(np.expand_dims(pred_depth_asf.copy(), axis=0))\n gt_depths_copy.append(np.expand_dims(gt_depth.copy(), axis=0))\n\n # Choose only valid pixels - using mask\n pred_depth_rsf = pred_depth_rsf[mask]\n pred_depth_asf = pred_depth_asf[mask]\n gt_depth = gt_depth[mask]\n\n # Clamping the min and max depth\n pred_depth_rsf[pred_depth_rsf < self.opt.min_depth] = self.opt.min_depth\n pred_depth_rsf[pred_depth_rsf > self.opt.max_depth] = self.opt.max_depth\n pred_depth_asf[pred_depth_asf < self.opt.min_depth] = self.opt.min_depth\n pred_depth_asf[pred_depth_asf > self.opt.max_depth] = self.opt.max_depth\n\n # Compute depth metric error values for individual and constant scaling factor\n errors_rsf.append(compute_errors(gt_depth, pred_depth_rsf))\n errors_asf.append(compute_errors(gt_depth, pred_depth_asf))\n\n print(\"\\n \\n KITTI Eigen Validation Split: {}\".format(self.gt_depths.__len__()))\n med_std = [np.inf, np.inf]\n ratios = np.array(ratios)\n med = np.median(ratios)\n print(\" Scaling ratios | med: {:0.3f} | std: {:0.3f}\".format(med, np.std(ratios / med)))\n med_std = [med, np.std(ratios / med)]\n\n mean_errors_rsf = np.array(errors_rsf).mean(0)\n mean_errors_asf = np.array(errors_asf).mean(0)\n print(\"\\n rsf \\n \" + (\"{:>8} | \" * 7).format(\"abs_rel\", \"sq_rel\", \"rmse\", \"rmse_log\", \"a1\", \"a2\", \"a3\"))\n print((\"&{: 8.3f} \" * 7).format(*mean_errors_rsf.tolist()) + \"\\\\\\\\\")\n print(\"\\n asf \\n \" + (\"{:>8} | \" * 7).format(\"abs_rel\", \"sq_rel\", \"rmse\", \"rmse_log\", \"a1\", \"a2\", \"a3\"))\n print((\"&{: 8.3f} \" * 7).format(*mean_errors_asf.tolist()) + \"\\\\\\\\\")\n\n self.log_evaluation('val_real_eigen', mean_errors_rsf, med_std, mean_errors_asf)\n self.set_train()\n\n return mean_errors_rsf, mean_errors_asf", "def getAverageErr(self):\n TotalErr = np.zeros(self.pts3D.shape[1])\n\n for view in range(self.nViews):\n # Weights: 1 for points that appear in the image, zero otherwise\n idx_valid = self.getValidPtsInView(view)\n # Project 3D points onto the image plane\n proj_pts2D = utils.ProjectPts(self.pts3D[:, idx_valid], \\\n self.cam_poses[:, view], \\\n self.cam.KK)\n # Reprojection error for each point\n ErrView = np.sqrt( np.sum( ( self.pts2D[:, idx_valid, view] - \\\n proj_pts2D )**2, axis = 0 ))\n TotalErr[idx_valid] += ErrView\n \n # Count how many 2D views a pts3D appears\n num_views = self.getNumViews() \n\n self.avg_err = TotalErr / num_views.astype(float)\n # Average error per view\n return self.avg_err", "def euclidean_distance_3D(y_true, y_pred):\n ed3D = K.flatten(K.sqrt(K.sum(K.pow(y_true - y_pred, 2), axis=1)))\n return K_nanmean_infmean(ed3D)", "def mse_r2(true, predicted):\n # Reshaping set of images\n # n_imgs, nx, ny = true.shape\n # true = np.reshape(true, (n_imgs, nx*ny))\n # predicted = np.reshape(predicted, (n_imgs, nx*ny))\n nx = 33\n ny = 33\n\n # Compute MSE\n se = np.sum((true - predicted)**2, axis=1)\n mse = se*(nx*ny)**-1\n\n # Compute R squared\n mean = np.mean(true, axis=1)\n r2 = 1 - se*np.sum((true - np.expand_dims(mean, axis=1))**2, axis=1)**-1\n\n return mse, r2", "def sky_error_se(profile, sky_adj, fit_name):\n\tfit_data = profile.fits[fit_name]\n\tP = copy_params(fit_data[0].params, False)\n\tdiff = {}\n\tprofile.I += sky_adj*1.\n\tfit_sersic_exp(profile, fit_data[1], store=False)\n\toutup = find_confs_se(profile, fit_name)\n\tprofile.I -= 1.*sky_adj\n\toutdown = find_confs_se(profile, fit_name)\n\tprofile.I += 1.*sky_adj\n\tfor key, val in outup.iteritems():\n\t\tup = outup[key][0] - P[key].value\n\t\tdown = P[key].value - outdown[key][0]\n\t\tif up > 0 and down > 0:\n\t\t\tup = max((up, down))\n\t\t\tdown = 0.\n\t\telif up < 0 and down < 0:\n\t\t\tup = 0.\n\t\t\tdown = min((up, down))\n\t\telif up < 0 and down > 0:\n\t\t\tup, down = down, up\n\t\tdiff.update({key: [up, -down]})\n\tprofile.sky_fit[fit_name] = diff\n\treturn diff", "def k3(self) -> float:\n return self.distortion_coefficients[2]", "def feed(self, jt_uvd_pred, jt_xyz_gt, center_xyz, M, cube, jt_vis=0, skip_check=False):\n if not skip_check:\n jt_uvd_pred = np.squeeze(jt_uvd_pred).astype(np.float32)\n jt_xyz_gt = np.squeeze(jt_xyz_gt).astype(np.float32)\n jt_vis = np.squeeze(jt_vis).astype('bool')\n center_xyz = np.squeeze(center_xyz).astype(np.float32)\n M = np.squeeze(M).astype(np.float32)\n cube = np.squeeze(cube).astype(np.float32)\n\n assert len(jt_uvd_pred.shape) == 2\n assert len(jt_xyz_gt.shape) == 2\n\n try:\n M_inv = np.linalg.inv(M)\n except:\n print('Inverse matrix does not exist.')\n\n jt_uvd_pred[:, :2] = (jt_uvd_pred[:, :2] + 1) * self.img_size / 2.\n jt_uvd_pred[:, 2] = jt_uvd_pred[:, 2] * cube[2] / 2. + center_xyz[2]\n jt_uvd_trans = np.hstack([jt_uvd_pred[:, :2], np.ones((jt_uvd_pred.shape[0], 1))])\n jt_uvd_pred[:, :2] = np.dot(M_inv, jt_uvd_trans.T).T[:, :2]\n self.jt_uvd_pred.append(jt_uvd_pred)\n jt_xyz_pred = uvd2xyz(jt_uvd_pred, self.paras, self.flip)\n\n jt_xyz_gt = jt_xyz_gt * (cube / 2.) + center_xyz\n\n # calc euclidean distance\n diff = jt_xyz_gt - jt_xyz_pred\n euclidean_dist = np.sqrt(np.sum(np.square(diff), axis=1))\n self.diff.append(diff.mean(axis=0))\n\n num_kp = jt_xyz_gt.shape[0]\n for i in range(num_kp):\n if jt_vis == 0:\n self.data[i].append(euclidean_dist[i])\n else:\n if jt_vis[i]:\n self.data[i].append(euclidean_dist[i])", "def test_std_3d(self):\r\n inp3d = array( # 2,2,3\r\n [[[0, 2, 2],\r\n [3, 4, 5]],\r\n\r\n [[1, 9, 0],\r\n [9, 10, 1]]])\r\n exp3d = ( # for axis None, 0, 1, 2: calc from scipy.stats.std\r\n 3.63901418552,\r\n array([[0.70710678, 4.94974747, 1.41421356],\r\n [4.24264069, 4.24264069, 2.82842712]]),\r\n array([[2.12132034, 1.41421356, 2.12132034],\r\n [5.65685425, 0.70710678, 0.70710678]]),\r\n array([[1.15470054, 1.],\r\n [4.93288286, 4.93288286]]))\r\n res = tuple(std(inp3d, ax) for ax in [None, 0, 1, 2])\r\n for obs, exp in zip(res, exp3d):\r\n testing.assert_almost_equal(obs, exp)", "def frame3dlin_KeMe(E,G,Kv1,Kv2,A1,A2,Iy1,Iy2,Iz1,Iz2,L,me1,me2,R=None):\n # --- Stifness matrix\n ke = np.array([\n [((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0 , -((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0] , \n [0 , ((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , ((2*Iz2+4*Iz1)*E)/L**2 , 0 , -((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , ((4*Iz2+2*Iz1)*E)/L**2] , \n [0 , 0 , ((6*Iy2+6*Iy1)*E)/L**3 , 0 , -((2*Iy2+4*Iy1)*E)/L**2 , 0 , 0 , 0 , -((6*Iy2+6*Iy1)*E)/L**3 , 0 , -((4*Iy2+2*Iy1)*E)/L**2 , 0] , \n [0 , 0 , 0 , ((Kv2+Kv1)*G)/(2*L) , 0 , 0 , 0 , 0 , 0 , -((Kv2+Kv1)*G)/(2*L) , 0 , 0] , \n [0 , 0 , -((2*Iy2+4*Iy1)*E)/L**2 , 0 , ((Iy2+3*Iy1)*E)/L , 0 , 0 , 0 , ((2*Iy2+4*Iy1)*E)/L**2 , 0 , ((Iy2+Iy1)*E)/L , 0] , \n [0 , ((2*Iz2+4*Iz1)*E)/L**2 , 0 , 0 , 0 , ((Iz2+3*Iz1)*E)/L , 0 , -((2*Iz2+4*Iz1)*E)/L**2 , 0 , 0 , 0 , ((Iz2+Iz1)*E)/L] , \n [-((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0 , ((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0] , \n [0 , -((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , -((2*Iz2+4*Iz1)*E)/L**2 , 0 , ((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , -((4*Iz2+2*Iz1)*E)/L**2] , \n [0 , 0 , -((6*Iy2+6*Iy1)*E)/L**3 , 0 , ((2*Iy2+4*Iy1)*E)/L**2 , 0 , 0 , 0 , ((6*Iy2+6*Iy1)*E)/L**3 , 0 , ((4*Iy2+2*Iy1)*E)/L**2 , 0] , \n [0 , 0 , 0 , -((Kv2+Kv1)*G)/(2*L) , 0 , 0 , 0 , 0 , 0 , ((Kv2+Kv1)*G)/(2*L) , 0 , 0] , \n [0 , 0 , -((4*Iy2+2*Iy1)*E)/L**2 , 0 , ((Iy2+Iy1)*E)/L , 0 , 0 , 0 , ((4*Iy2+2*Iy1)*E)/L**2 , 0 , ((3*Iy2+Iy1)*E)/L , 0] , \n [0 , ((4*Iz2+2*Iz1)*E)/L**2 , 0 , 0 , 0 , ((Iz2+Iz1)*E)/L , 0 , -((4*Iz2+2*Iz1)*E)/L**2 , 0 , 0 , 0 , ((3*Iz2+Iz1)*E)/L]\n ])\n # --- Mass matrix\n me = np.array([\n [(me2+3*me1)/12 , 0 , 0 , 0 , 0 , 0 , (me2+me1)/12 , 0 , 0 , 0 , 0 , 0] , \n [0 , (3*me2+10*me1)/35 , 0 , 0 , 0 , (7*L*me2+15*L*me1)/420 , 0 , (9*me2+9*me1)/140 , 0 , 0 , 0 , -(6*L*me2+7*L*me1)/420] , \n [0 , 0 , (3*me2+10*me1)/35 , 0 , -(7*L*me2+15*L*me1)/420 , 0 , 0 , 0 , (9*me2+9*me1)/140 , 0 , (6*L*me2+7*L*me1)/420 , 0] , \n [0 , 0 , 0 , (me2+3*me1)/12 , 0 , 0 , 0 , 0 , 0 , (me2+me1)/12 , 0 , 0] , \n [0 , 0 , -(7*L*me2+15*L*me1)/420 , 0 , (3*L**2*me2+5*L**2*me1)/840 , 0 , 0 , 0 , -(7*L*me2+6*L*me1)/420 , 0 , -(L**2*me2+L**2*me1)/280 , 0] , \n [0 , (7*L*me2+15*L*me1)/420 , 0 , 0 , 0 , (3*L**2*me2+5*L**2*me1)/840 , 0 , (7*L*me2+6*L*me1)/420 , 0 , 0 , 0 , -(L**2*me2+L**2*me1)/280] , \n [(me2+me1)/12 , 0 , 0 , 0 , 0 , 0 , (3*me2+me1)/12 , 0 , 0 , 0 , 0 , 0] , \n [0 , (9*me2+9*me1)/140 , 0 , 0 , 0 , (7*L*me2+6*L*me1)/420 , 0 , (10*me2+3*me1)/35 , 0 , 0 , 0 , -(15*L*me2+7*L*me1)/420] , \n [0 , 0 , (9*me2+9*me1)/140 , 0 , -(7*L*me2+6*L*me1)/420 , 0 , 0 , 0 , (10*me2+3*me1)/35 , 0 , (15*L*me2+7*L*me1)/420 , 0] , \n [0 , 0 , 0 , (me2+me1)/12 , 0 , 0 , 0 , 0 , 0 , (3*me2+me1)/12 , 0 , 0] , \n [0 , 0 , (6*L*me2+7*L*me1)/420 , 0 , -(L**2*me2+L**2*me1)/280 , 0 , 0 , 0 , (15*L*me2+7*L*me1)/420 , 0 , (5*L**2*me2+3*L**2*me1)/840 , 0] , \n [0 , -(6*L*me2+7*L*me1)/420 , 0 , 0 , 0 , -(L**2*me2+L**2*me1)/280 , 0 , -(15*L*me2+7*L*me1)/420 , 0 , 0 , 0 , (5*L**2*me2+3*L**2*me1)/840]\n ])\n\n if (R is not None):\n RR = scipy.linalg.block_diag(R,R,R,R)\n me = np.transpose(RR).dot(me.dot(RR))\n ke = np.transpose(RR).dot(ke.dot(RR))\n\n return ke, me", "def test_el3_vs_original():\n # store computations from original implementation\n # from florian_ell3_paper import el3 as el30\n # N = 10000\n # x11 = np.random.rand(N)*5\n # kc11 = (np.random.rand(N)-.5)*10\n # p11 = (np.random.rand(N)-.5)*10\n # result0 = np.array([el30(x, kc, p) for x,kc,p in zip(x11,kc11,p11)])\n # np.save('data_test_el3', np.array([result0,x11,kc11,p11]))\n\n # load data from orginal implementation\n data = np.load(\"tests/testdata/testdata_el3.npy\")\n res0, x11, kc11, p11 = data\n\n # compare to vectorized\n resv = el3v(x11, kc11, p11)\n assert np.allclose(res0, resv)\n\n # compare to modified original\n res1 = np.array([el30(x, kc, p) for x, kc, p in zip(x11, kc11, p11)])\n assert np.allclose(res0, res1)", "def evaluate_batches( sess, model,\n data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,\n data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,\n current_step, encoder_inputs, decoder_outputs, current_epoch=0 ):\n\n n_joints = len( data_utils.DIMENSIONS_TO_USE ) - len( data_utils.ROOT_POSITIONS)\n nbatches = len( encoder_inputs )\n\n # Loop through test examples\n all_dists, start_time, loss = [], time.time(), 0.\n log_every_n_batches = 20\n diff_coordwise = []\n for i in range(nbatches):\n\n if current_epoch > 0 and (i+1) % log_every_n_batches == 0:\n print(\"Working on test epoch {0}, batch {1} / {2}\".format( current_epoch, i+1, nbatches) )\n\n enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]\n dp = 1.0 # dropout keep probability is always 1 at test time\n step_loss, loss_summary, poses3d = model.step( sess, enc_in, dec_out, dp, isTraining=False )\n loss += step_loss\n\n # denormalize\n enc_in = data_utils.unNormalize_batch( enc_in, data_mean_2d, data_std_2d, dim_to_use_2d )\n dec_out = data_utils.unNormalize_batch( dec_out, data_mean_3d, data_std_3d, dim_to_use_3d )\n poses3d = data_utils.unNormalize_batch( poses3d, data_mean_3d, data_std_3d, dim_to_use_3d )\n\n # Keep only the relevant dimensions\n dec_out = dec_out[:, dim_to_use_3d]\n poses3d = poses3d[:, dim_to_use_3d]\n\n assert dec_out.shape[0] == FLAGS.batch_size\n assert poses3d.shape[0] == FLAGS.batch_size\n\n # Compute Euclidean distance error per joint\n diff_coordwise.append(np.abs(poses3d - dec_out))\n sqerr = (poses3d - dec_out)**2 # Squared error between prediction and expected output\n dists = np.zeros( (sqerr.shape[0], n_joints) ) # Array with L2 error per joint in mm\n dist_idx = 0\n for k in np.arange(0, n_joints*3, 3):\n # Sum across X,Y, and Z dimenstions to obtain L2 distance\n dists[:,dist_idx] = np.sqrt( np.sum( sqerr[:, k:k+3], axis=1 ))\n dist_idx += 1\n\n all_dists.append(dists)\n assert sqerr.shape[0] == FLAGS.batch_size\n \n step_time = (time.time() - start_time) / nbatches\n loss = loss / nbatches\n\n all_dists = np.vstack( all_dists )\n\n # Error per joint and total for all passed batches\n joint_err = np.mean( all_dists, axis=0 )\n coordwise_err = np.mean(np.vstack(diff_coordwise).reshape((-1,3)), axis=0)\n total_err = np.mean( all_dists )\n\n return total_err, coordwise_err, joint_err, step_time, loss", "def mse(self):\n xs, ys = self.R.nonzero()\n predicted = self.full_matrix()\n error = 0\n for x, y in zip(xs, ys):\n # print(predicted[x, y], self.R[x, y] )\n error += pow(self.R[x, y] - predicted[x, y], 2)\n return np.sqrt(error)", "def get_error(self, params):\n return self.endog - self.predict(params)", "def calc_skylevel(self, skylevstdp = DEFAULT_SKYLEVELSTD):\n if self.data is None or self.skylevstd == skylevstdp:\n return\n fimagedata = self.data.flatten()\n skymask = fimagedata - self.meanval <= skylevstdp * self.stdval\n fimagedata = fimagedata[skymask]\n if len(fimagedata) < 100:\n raise RemFitsErr(\"No possible sky in file\")\n self.skylev = fimagedata.mean()\n self.skystd = fimagedata.std()\n self.skylevstd = skylevstdp", "def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ", "def train():\n\n # Load camera parameters\n rcams = cameras.load_cameras()\n\n # Load 3d data and 2d projections\n full_train_set_3d, full_test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d =\\\n data_utils.read_3d_data( FLAGS.camera_frame, rcams, FLAGS.origin_bc, FLAGS.augment_data,\n FLAGS.procrustes, FLAGS.lowpass )\n \n # Read stacked hourglass 2D predictions\n full_train_set_2d, full_test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = \\\n data_utils.read_2d_predictions( FLAGS.origin_bc, FLAGS.augment_data )\n \n print(\"\\n[+] done reading and normalizing data\")\n # Getting the number of training and test subjects\n tr_subj = 0\n for v in full_train_set_3d.values():\n tr_subj += v.shape[0]\n te_subj = 0\n for v in full_test_set_3d.values():\n te_subj += v.shape[0]\n print(\"{0} training subjects, {1} test subjects\".format(tr_subj, te_subj))\n print(dim_to_use_2d)\n print(dim_to_use_3d)\n # Un-normalizing data for visualizations\n unNorm_ftrs2d = data_utils.unNormalize_dic(full_train_set_2d, data_mean_2d, data_std_2d, dim_to_use_2d)\n unNorm_ftrs3d = data_utils.unNormalize_dic(full_train_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n unNorm_ftes3d = data_utils.unNormalize_dic(full_test_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n # Visualize the data\n viz.visualize_train_sample(unNorm_ftrs2d, unNorm_ftrs3d, FLAGS.camera_frame)\n viz.visualize_files_oneatatime(unNorm_ftrs3d, unNorm_ftes3d)\n\n # Getting only the dimensions to use (get rid of body coxas, other limb, antennas, abdomen\n train_set_3d, train_set_2d, test_set_3d, test_set_2d = {}, {}, {}, {}\n for k in full_train_set_3d:\n (f, c) = k\n train_set_3d[k] = full_train_set_3d[k][:, dim_to_use_3d]\n train_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_train_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n for k in full_test_set_3d:\n (f, c) = k\n test_set_3d[k] = full_test_set_3d[k][:, dim_to_use_3d]\n test_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_test_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n \n print(\"3D data mean:\")\n print(data_mean_3d)\n print(\"3D data std:\")\n print(data_std_3d)\n\n print(\"2D data mean:\")\n print(data_mean_2d)\n print(\"2D data std:\")\n print(data_std_2d)\n \n input(\"Press Enter to continue...\")\n\n # Avoid using the GPU if requested\n device_count = {\"GPU\": 0} if FLAGS.use_cpu else {\"GPU\": 1}\n with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(\n device_count=device_count,\n allow_soft_placement=True )) as sess:\n\n # === Create the model ===\n print(\"[*] creating %d bi-layers of %d units.\" % (FLAGS.num_layers, FLAGS.linear_size))\n model = create_model( sess, FLAGS.batch_size )\n model.train_writer.add_graph( sess.graph )\n print(\"[+] model created\")\n \n #=== This is the training loop ===\n step_time, loss, val_loss = 0.0, 0.0, 0.0\n current_step = 0 if FLAGS.load <= 0 else FLAGS.load + 1\n previous_losses = []\n\n step_time, loss = 0, 0\n current_epoch = 0\n log_every_n_batches = 100\n losses, errors, joint_errors = [], [], []\n for _ in range( FLAGS.epochs ):\n current_epoch = current_epoch + 1\n\n # === Load training batches for one epoch ===\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( train_set_2d, train_set_3d, FLAGS.camera_frame, training=True )\n nbatches = len( encoder_inputs )\n print(\"[*] there are {0} train batches\".format( nbatches ))\n start_time, loss = time.time(), 0.\n # === Loop through all the training batches ===\n for i in range( nbatches ):\n\n if (i+1) % log_every_n_batches == 0:\n # Print progress every log_every_n_batches batches\n print(\"Working on epoch {0}, batch {1} / {2}...\".format( current_epoch, i+1, nbatches),end=\"\" )\n\n enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]\n step_loss, loss_summary, lr_summary, _ =\\\n model.step( sess, enc_in, dec_out, FLAGS.dropout, isTraining=True )\n\n if (i+1) % log_every_n_batches == 0:\n # Log and print progress every log_every_n_batches batchespixels = pixels / pixels[2,:]\n model.train_writer.add_summary( loss_summary, current_step )\n model.train_writer.add_summary( lr_summary, current_step )\n step_time = (time.time() - start_time)\n start_time = time.time()\n print(\"done in {0:.2f} ms\".format( 1000*step_time / log_every_n_batches ) )\n\n loss += step_loss\n current_step += 1\n # === end looping through training batches ===\n\n loss = loss / nbatches\n losses.append(loss)\n print(\"=============================\\n\"\n \"Global step: %d\\n\"\n \"Learning rate: %.2e\\n\"\n \"Train loss avg: %.4f\\n\"\n \"=============================\" % (model.global_step.eval(),\n model.learning_rate.eval(), loss) )\n # === End training for an epoch ===\n\n # === Testing after this epoch ===\n isTraining = False\n \n n_joints = len(data_utils.DIMENSIONS_TO_USE)\n if FLAGS.origin_bc:\n n_joints -= len(data_utils.ROOT_POSITIONS)\n\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( test_set_2d, test_set_3d, FLAGS.camera_frame, training=False)\n\n total_err, coordwise_err, joint_err, step_time, loss = evaluate_batches( sess, model,\n data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,\n data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,\n current_step, encoder_inputs, decoder_outputs, current_epoch )\n\n print(\"=============================\\n\"\n \"Step-time (ms): %.4f\\n\"\n \"Val loss avg: %.4f\\n\"\n \"Val error avg (mm): %.2f (%.2f, %.2f, %.2f)\\n\"\n \"=============================\" % ( 1000*step_time, loss, total_err,\n coordwise_err[0], coordwise_err[1], coordwise_err[2] ))\n\n for i in range(n_joints):\n # 6 spaces, right-aligned, 5 decimal places\n print(\"Error in joint {0:02d} (mm): {1:>5.2f}\".format(i+1, joint_err[i]))\n print(\"=============================\")\n errors.append(coordwise_err)\n joint_errors.append(joint_err)\n # Log the error to tensorboard\n summaries = sess.run( model.err_mm_summary, {model.err_mm: total_err} )\n model.test_writer.add_summary( summaries, current_step )\n\n # Save the model\n print( \"Saving the model... \", end=\"\" )\n start_time = time.time()\n model.saver.save(sess, os.path.join(train_dir, 'checkpoint'), global_step=current_step )\n print( \"done in {0:.2f} ms\".format(1000*(time.time() - start_time)) )\n\n # Reset global time and loss\n step_time, loss = 0, 0\n\n sys.stdout.flush()\n # Save losses for future plots\n def print_list_tofile(l, filename):\n with open(filename, 'wb') as f:\n pickle.dump(l, f)\n print_list_tofile(losses, train_dir+\"/losses.pkl\")\n print_list_tofile(errors, train_dir+\"/errors.pkl\")\n print_list_tofile(joint_errors, train_dir+\"/joint_errors.pkl\")", "def compute_error(data, user_features, item_features, nz):\n sum_err = 0\n for d, n in nz:\n err = data[d,n] - np.dot(item_features[d,:],user_features[:,n])\n sum_err += err**2\n rmse = 0.5*sum_err/len(nz)\n return rmse", "def dist3d(self, endroit3D: Endroit3D) -> float:\n\n return self.p3ddict.dist3d(endroit3D.p3ddict)", "def make_female_3D_model\\\n (TABLE_info, m1_female_crvs, m2_female_left_crvs, m2_female_right_crvs,\\\n m3_female_left_crvs, m3_female_right_crvs, m4_female_crvs):\n \"\"\"\n 1 Get t_m from TABLE_info\n \"\"\"\n width = TABLE_info[0]\n height = TABLE_info[1]\n t_m = TABLE_info[2]\n\n \"\"\"\n 2 Get crvs from list.\n \"\"\"\n # m1\n m1_female_upper_crv = m1_female_crvs[0]\n m1_female_middle_crv = m1_female_crvs[1]\n m1_female_lower_crv = m1_female_crvs[2]\n\n # m2\n m2_female_left_upper_crv = m2_female_left_crvs[0]\n m2_female_left_middle_crv = m2_female_left_crvs[1]\n m2_female_left_lower_crv = m2_female_left_crvs[2]\n\n m2_female_right_upper_crv = m2_female_right_crvs[0]\n m2_female_right_middle_crv = m2_female_right_crvs[1]\n m2_female_right_lower_crv = m2_female_right_crvs[2]\n\n # m3\n m3_female_left_upper_crv = m3_female_left_crvs[0]\n m3_female_left_middle_crv = m3_female_left_crvs[1]\n m3_female_left_lower_crv = m3_female_left_crvs[2]\n\n m3_female_right_upper_crv = m3_female_right_crvs[0]\n m3_female_right_middle_crv = m3_female_right_crvs[1]\n m3_female_right_lower_crv = m3_female_right_crvs[2]\n\n # m4\n m4_female_upper_crv = m4_female_crvs[0]\n m4_female_middle_crv = m4_female_crvs[1]\n m4_female_lower_crv = m4_female_crvs[2]\n\n \"\"\"\n 3 Make 3D.\n \"\"\"\n # path\n start = (0, 0, 0)\n end = (0, 0, t_m)\n path = rs.AddLine(start, end)\n\n # m1\n m1_female_upper_model = rs.ExtrudeCurve(m1_female_upper_crv, path)\n m1_female_middle_model = rs.ExtrudeCurve(m1_female_middle_crv, path)\n m1_female_lower_model = rs.ExtrudeCurve(m1_female_lower_crv, path)\n\n rs.CapPlanarHoles(m1_female_upper_model)\n rs.CapPlanarHoles(m1_female_middle_model)\n rs.CapPlanarHoles(m1_female_lower_model)\n\n # m2 left\n m2_female_left_upper_model = rs.ExtrudeCurve(m2_female_left_upper_crv, path)\n m2_female_left_middle_model = rs.ExtrudeCurve(m2_female_left_middle_crv, path)\n m2_female_left_lower_model = rs.ExtrudeCurve(m2_female_left_lower_crv, path)\n\n rs.CapPlanarHoles(m2_female_left_upper_model)\n rs.CapPlanarHoles(m2_female_left_middle_model)\n rs.CapPlanarHoles(m2_female_left_lower_model)\n\n # m2 right\n m2_female_right_upper_model = rs.ExtrudeCurve(m2_female_right_upper_crv, path)\n m2_female_right_middle_model = rs.ExtrudeCurve(m2_female_right_middle_crv, path)\n m2_female_right_lower_model = rs.ExtrudeCurve(m2_female_right_lower_crv, path)\n\n rs.CapPlanarHoles(m2_female_right_upper_model)\n rs.CapPlanarHoles(m2_female_right_middle_model)\n rs.CapPlanarHoles(m2_female_right_lower_model)\n\n # m3 left\n m3_female_left_upper_model = rs.ExtrudeCurve(m3_female_left_upper_crv, path)\n m3_female_left_middle_model = rs.ExtrudeCurve(m3_female_left_middle_crv, path)\n m3_female_left_lower_model = rs.ExtrudeCurve(m3_female_left_lower_crv, path)\n\n rs.CapPlanarHoles(m3_female_left_upper_model)\n rs.CapPlanarHoles(m3_female_left_middle_model)\n rs.CapPlanarHoles(m3_female_left_lower_model)\n\n # m3 right\n m3_female_right_upper_model = rs.ExtrudeCurve(m3_female_right_upper_crv, path)\n m3_female_right_middle_model = rs.ExtrudeCurve(m3_female_right_middle_crv, path)\n m3_female_right_lower_model = rs.ExtrudeCurve(m3_female_right_lower_crv, path)\n\n rs.CapPlanarHoles(m3_female_right_upper_model)\n rs.CapPlanarHoles(m3_female_right_middle_model)\n rs.CapPlanarHoles(m3_female_right_lower_model)\n\n # m4\n m4_female_upper_model = rs.ExtrudeCurve(m4_female_upper_crv, path)\n m4_female_middle_model = rs.ExtrudeCurve(m4_female_middle_crv, path)\n m4_female_lower_model = rs.ExtrudeCurve(m4_female_lower_crv, path)\n\n rs.CapPlanarHoles(m4_female_upper_model)\n rs.CapPlanarHoles(m4_female_middle_model)\n rs.CapPlanarHoles(m4_female_lower_model)\n\n female_upper_models =\\\n [m1_female_upper_model, m2_female_left_upper_model, m2_female_right_upper_model,\\\n m3_female_left_upper_model, m3_female_right_upper_model, m4_female_upper_model]\n\n female_middle_models =\\\n [m1_female_middle_model, m2_female_left_middle_model, m2_female_right_middle_model,\\\n m3_female_left_middle_model, m3_female_right_middle_model, m4_female_middle_model]\n\n female_lower_models =\\\n [m1_female_lower_model, m2_female_left_lower_model, m2_female_right_lower_model,\\\n m3_female_left_lower_model, m3_female_right_lower_model, m4_female_lower_model]\n\n # move objects\n trans_upper = (0, 0, 2 * t_m)\n trans_middle = (0, 0, t_m)\n rs.MoveObjects(female_upper_models, trans_upper)\n rs.MoveObjects(female_middle_models, trans_middle)\n\n\n # deploy models\n O = (0, 0, 0)\n angle = 90\n rs.RotateObjects(female_upper_models, O, angle, None, False)\n rs.RotateObjects(female_middle_models, O, angle, None, False)\n rs.RotateObjects(female_lower_models, O, angle, None, False)\n\n axis = (1, 0, 0)\n rs.RotateObjects(female_upper_models, O, angle, axis, False)\n rs.RotateObjects(female_middle_models, O, angle, axis, False)\n rs.RotateObjects(female_lower_models, O, angle, axis, False)\n\n rs.RotateObjects(female_upper_models, O, angle, None, False)\n rs.RotateObjects(female_middle_models, O, angle, None, False)\n rs.RotateObjects(female_lower_models, O, angle, None, False)\n\n trans = (-2 * width - 3 * t_m / 2, width / 2 - 3 * t_m / 2, 0)\n rs.MoveObjects(female_upper_models, trans)\n rs.MoveObjects(female_middle_models, trans)\n rs.MoveObjects(female_lower_models, trans)\n\n rs.DeleteObject(path)\n\n female_models = [female_upper_models, female_middle_models, female_lower_models]", "def is3_d(self):\n return self.container['is3_d']", "def fz3d_2_ndhwc_compute(self):\n tik_instance = self.set_tik_instance()\n branch = self.check_branch()\n\n if branch == \"c_align_small\":\n tik_instance = self.c_align_small(tik_instance)\n elif branch == \"c_align_split_n\":\n tik_instance = self.c_align_split_n(tik_instance)\n elif branch == \"c_not_align_small_fp16\":\n tik_instance = self.c_not_align_small_fp16(tik_instance)\n elif branch == \"c_not_align_split_n_fp32\":\n tik_instance = self.c_not_align_split_n_fp32(tik_instance)\n\n return tik_instance", "def mid3d(self, endroit3D: Endroit3D) -> dict:\n\n return {\"point3D\": self.p3ddict.mid3d(endroit3D.p3ddict).getDict()}", "def get_state_prediction(self):\n if self.dynamics_object.standardizer_x is None:\n return (self.dynamics_object.C@self.cur_z.T).T\n else:\n return self.dynamics_object.standardizer_x.inverse_transform(self.dynamics_object.C@self.cur_z.T).T", "def mse(real, predicted):\n # Calculate the mse\n N = len(real)\n mse = (1 / N) * np.sum((real - predicted) ** 2)\n return mse", "def ThreeDTest(SMethod,IMethod,Fraction,Plot = False):\r\n \r\n # Cylinder Parameters--------------------------------------------------------- \r\n CL = 100 # cylinder length\r\n Pt = 120 # number of points in each cylinder\r\n Cn = 50 # number of horizontal slices in cylinder\r\n\r\n x = np.zeros(Cn*Pt)\r\n y = np.zeros(Cn*Pt)\r\n z = np.zeros(Cn*Pt)\r\n # Generate cylinder-----------------------------------------------------------\r\n n = 0\r\n for i in range(Cn):\r\n for j in range(Pt):\r\n x[n] = np.cos((2*pi*j)/Pt)\r\n y[n] = np.sin((2*pi*j)/Pt)\r\n z[n] = i*(CL/Cn)\r\n n += 1\r\n \r\n YFull = (np.sin(2*pi*0.03*z))+(np.cos(2*pi*x+2*pi*x))\r\n XFull = np.column_stack((x,y,z))\r\n MFull = np.column_stack((x,y,z,YFull))\r\n\r\n # Randomise matrix and Generate sparse version of geometry--------------------\r\n split = int(np.ceil((MFull.shape[0])*Fraction)) \r\n np.random.shuffle(MFull)\r\n # Sparse Set\r\n XTrain = MFull[:split,:3]\r\n YTrain = MFull[:split,3]\r\n # Training set\r\n XStar = MFull[split:,:3]\r\n CStar = MFull[split:,3]\r\n\r\n # Reconstruct XFull's geometry using XTrain and YTrain------------------------\r\n YHat = ThreeDPointInter(XTrain,YTrain,XFull,SMethod,IMethod,10)\r\n mse = mseCalc(YFull,YHat)\r\n print('Mean Squared Error =',mse)\r\n # Plot whole data-----------------------------------------------------------\r\n if Plot:\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(131, projection='3d')\r\n ax1.scatter(XFull[:,0],XFull[:,1],XFull[:,2],c=[float(i) for i in YFull],cmap='plasma')\r\n ax1.set_xlabel('x')\r\n ax1.set_ylabel('y')\r\n ax1.set_zlabel('z')\r\n # Plot training Data\r\n ax2 = fig.add_subplot(132, projection='3d')\r\n ax2.scatter(XTrain[:,0],XTrain[:,1],XTrain[:,2],c=[float(i) for i in YTrain],cmap='plasma')\r\n ax2.set_xlabel('XTrain1')\r\n ax2.set_ylabel('XTrain2')\r\n ax2.set_zlabel('XTrain3')\r\n # Plot Reconstruction of XFull\r\n ax3 = fig.add_subplot(133, projection='3d')\r\n ax3.scatter(XFull[:,0],XFull[:,1],XFull[:,2],c=[float(i) for i in YHat],cmap='plasma')\r\n ax3.set_xlabel('x')\r\n ax3.set_ylabel('y')\r\n ax3.set_zlabel('z')\r\n \r\n plt.show()\r\n\r\n return mse", "def evaluate(dataset, model, args):\n device = None\n if torch.cuda.is_available():\n device = torch.cuda.current_device()\n\n model.to(device)\n model.train(False)\n\n test_loader = DataLoader(dataset, num_workers=4)\n average_mse = 0\n average_psnr = 0\n\n for batch in tqdm(test_loader):\n # Preprocess the data\n image, camera_pose = batch\n batch_size = len(image)\n\n image = torch.as_tensor(image, device=device)\n image = image[..., :3].permute(0, 3, 1, 2)\n camera_pose = torch.as_tensor(camera_pose, device=device)\n camera_pose = camera_pose[:, :3, :]\n\n # Run the inference\n with torch.no_grad():\n prediction = model(camera_pose)\n predicted_pixels = prediction[\"rgb_map\"]\n target_pixels = image.reshape(batch_size, 3, -1).transpose(1, 2)\n\n mse = mse_loss(predicted_pixels[-1], target_pixels)\n average_mse += mse\n\n psnr = mse_to_psnr(mse)\n average_psnr += psnr\n\n average_mse /= len(dataset)\n average_psnr /= len(dataset)\n\n output = {\n \"average_mse\": float(average_mse),\n \"average_psnr\": float(average_psnr),\n }\n print(output)\n\n with open(args.output_file, \"w\") as json_file:\n json.dump(output, json_file)\n print(\"Saved results to:\", args.output_file)", "def df2dx3_func(self,X):\n result = (\n self.rj*self.rm*self.k_spr*self.b_spr * (\n np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0]))\n * ((self.rm*X[2] - self.rj*X[0])>=0)\n ) / self.Ij\n )\n return(result)", "def process_outputs(self, data, output, save=True):\n\n pred_spline = output['pred_polys']\n\n # preds = self.spline.sample_point(pred_spline)\n preds = pred_spline\n torch.cuda.synchronize()\n preds = preds.cpu().numpy()\n\n pred_spline = pred_spline.cpu()\n pred_spline = pred_spline.numpy()\n\n instances = data['instance']\n polys = []\n results = []\n for i, instance in enumerate(instances):\n detection = defaultdict(float)\n poly = preds[i]\n poly = poly * data['patch_w'][i]\n poly[:, 0] += data['starting_point'][i][0]\n poly[:, 1] += data['starting_point'][i][1]\n detection['image_id'] = instance['image_path']\n img_h, img_w = instance['height'], instance['width']\n\n detection['poly'] = poly\n detection['image_size'] = [img_w, img_h]\n # pred_sp = pred_spline[i]\n # pred_sp = pred_sp * data['patch_w'][i]\n # pred_sp[:, 0] += data['starting_point'][i][0]\n # pred_sp[:, 1] += data['starting_point'][i][1]\n #\n # instance['spline_pos'] = pred_sp.tolist()\n\n polys.append(poly)\n\n results.append(detection)\n\n\n # if save:\n\n # predicted_poly = []\n\n\n\n # pred_mask = np.zeros((img_h, img_w), dtype=np.uint8)\n # utils.draw_poly(pred_mask, poly.astype(np.int))\n # predicted_poly.append(poly.tolist())\n #\n # # gt_mask = utils.get_full_mask_from_instance(\n # # self.min_area,\n # # instance)\n #\n # instance['my_predicted_poly'] = predicted_poly\n # # instance_id = instance['instance_id']\n # image_id = instance['image_id']\n #\n # pred_mask_fname = os.path.join(self.output_dir, '{}_pred.png'.format(image_id))\n # instance['pred_mask_fname'] = os.path.relpath(pred_mask_fname, self.output_dir)\n #\n # # gt_mask_fname = os.path.join(self.output_dir, '{}_gt.png'.format(instance_id))\n # # instance['gt_mask_fname'] = os.path.relpath(gt_mask_fname, self.output_dir)\n #\n # instance['n_corrections'] = 0\n #\n # info_fname = os.path.join(self.output_dir, '{}_info.json'.format(image_id))\n #\n # with warnings.catch_warnings():\n # warnings.simplefilter(\"ignore\")\n # sio.imsave(pred_mask_fname, pred_mask)\n # # sio.imsave(gt_mask_fname, gt_mask)\n #\n # # print '==> dumping json'\n # with open(info_fname, 'w') as f:\n # json.dump(instance, f, indent=2)\n\n return results, polys", "def Has3d(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Has3d(self, *args)", "def calcS3env(Idc,I3, R, Tj, Genv):\n t = np.linspace(0, 1, 1000, endpoint=False)\n It = Idc[:, None]+I3*np.cos(2*np.pi*t[None, :])\n Dt = qnoise_fit.dSiidV(It, R, Tj)\n D3 = qnoise_fit.Fcoef(Dt, t, 1)[0]\n return Genv*D3", "def dynamics_prediction_variance_scorer(dynamics, episodes, window_size=1024):\n total_variances = []\n for episode in episodes:\n for batch in _make_batches(episode, window_size, dynamics.n_frames):\n pred = dynamics.predict(batch.observations, batch.actions, True)\n total_variances += pred[2].tolist()\n # smaller is better\n return -np.mean(total_variances)", "def test_2_2_3D_rec_splits(self):\n check = [(-3.0, -2.0, 0.0), (4.0, 10.0, 1.0), (4.0, -2.0, 0.0),\n (4.0, 10.0, 0.0), (4.0, -2.0, 1.0), (-3.0, 10.0, 0.0),\n (-3.0, 10.0, 1.0), (-3.0, -2.0, 1.0), (0.5, 4.0, 0.5),\n (-3.0, 4.0, 0.5), (-3.0, -2.0, 0.5), (-3.0, 4.0, 0.0),\n (0.5, -2.0, 0.5), (0.5, -2.0, 0.0), (0.5, 4.0, 0.0),\n (-1.25, 1.0, 0.25), (4.0, 4.0, 0.5), (4.0, 10.0, 0.5),\n (4.0, 4.0, 1.0), (0.5, 10.0, 0.5), (0.5, 10.0, 1.0),\n (0.5, 4.0, 1.0), (2.25, 7.0, 0.75), (4.0, -2.0, 0.5),\n (4.0, 4.0, 0.0), (2.25, 1.0, 0.25), (0.5, 10.0, 0.0),\n (2.25, 7.0, 0.25), (0.5, -2.0, 1.0), (2.25, 1.0, 0.75),\n (-3.0, 10.0, 0.5), (-1.25, 7.0, 0.25), (-3.0, 4.0, 1.0),\n (-1.25, 7.0, 0.75), (-1.25, 1.0, 0.75), (0.5, 1.0, 0.25),\n (0.5, 4.0, 0.25), (0.5, 1.0, 0.5), (-1.25, 4.0, 0.25),\n (-1.25, 4.0, 0.5), (-1.25, 1.0, 0.5), (-0.375, 2.5, 0.375),\n (-3.0, 1.0, 0.25), (-3.0, -2.0, 0.25), (-3.0, 1.0, 0.0),\n (-1.25, -2.0, 0.25), (-1.25, -2.0, 0.0), (-1.25, 1.0, 0.0),\n (-2.125, -0.5, 0.125), (-3.0, 4.0, 0.25), (-3.0, 1.0, 0.5),\n (-2.125, 2.5, 0.375), (-1.25, -2.0, 0.5),\n (-2.125, -0.5, 0.375), (-1.25, 4.0, 0.0), (-2.125, 2.5, 0.125),\n (0.5, -2.0, 0.25), (-0.375, -0.5, 0.375), (0.5, 1.0, 0.0),\n (-0.375, -0.5, 0.125), (-0.375, 2.5, 0.125), (0.5, 7.0, 0.75),\n (0.5, 4.0, 0.75), (0.5, 7.0, 0.5), (2.25, 4.0, 0.75),\n (2.25, 4.0, 0.5), (2.25, 7.0, 0.5), (1.375, 5.5, 0.625),\n (4.0, 7.0, 0.75), (4.0, 10.0, 0.75), (4.0, 7.0, 1.0),\n (2.25, 10.0, 0.75), (2.25, 10.0, 1.0), (2.25, 7.0, 1.0),\n (3.125, 8.5, 0.875), (4.0, 4.0, 0.75), (4.0, 7.0, 0.5),\n (3.125, 5.5, 0.625), (2.25, 10.0, 0.5), (3.125, 8.5, 0.625),\n (2.25, 4.0, 1.0), (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (1.375, 8.5, 0.625), (0.5, 7.0, 1.0), (1.375, 8.5, 0.875),\n (1.375, 5.5, 0.875), (2.25, 4.0, 0.25), (2.25, 1.0, 0.5),\n (1.375, 2.5, 0.375), (4.0, 1.0, 0.25), (4.0, -2.0, 0.25),\n (4.0, 1.0, 0.0), (2.25, -2.0, 0.25), (2.25, -2.0, 0.0),\n (2.25, 1.0, 0.0), (3.125, -0.5, 0.125), (4.0, 4.0, 0.25),\n (4.0, 1.0, 0.5), (3.125, 2.5, 0.375), (2.25, -2.0, 0.5),\n (3.125, -0.5, 0.375), (2.25, 4.0, 0.0), (3.125, 2.5, 0.125),\n (1.375, -0.5, 0.375), (1.375, -0.5, 0.125),\n (1.375, 2.5, 0.125), (0.5, 7.0, 0.25), (1.375, 5.5, 0.375),\n (4.0, 7.0, 0.25), (4.0, 10.0, 0.25), (4.0, 7.0, 0.0),\n (2.25, 10.0, 0.25), (2.25, 10.0, 0.0), (2.25, 7.0, 0.0),\n (3.125, 8.5, 0.125), (3.125, 5.5, 0.375), (3.125, 8.5, 0.375),\n (3.125, 5.5, 0.125), (0.5, 10.0, 0.25), (1.375, 8.5, 0.375),\n (0.5, 7.0, 0.0), (1.375, 8.5, 0.125), (1.375, 5.5, 0.125),\n (0.5, 1.0, 0.75), (1.375, 2.5, 0.625), (4.0, 1.0, 0.75),\n (4.0, -2.0, 0.75), (4.0, 1.0, 1.0), (2.25, -2.0, 0.75),\n (2.25, -2.0, 1.0), (2.25, 1.0, 1.0), (3.125, -0.5, 0.875),\n (3.125, 2.5, 0.625), (3.125, -0.5, 0.625), (3.125, 2.5, 0.875),\n (0.5, -2.0, 0.75), (1.375, -0.5, 0.625), (0.5, 1.0, 1.0),\n (1.375, -0.5, 0.875), (1.375, 2.5, 0.875), (-1.25, 7.0, 0.5),\n (-0.375, 5.5, 0.375), (-3.0, 7.0, 0.25), (-3.0, 10.0, 0.25),\n (-3.0, 7.0, 0.0), (-1.25, 10.0, 0.25), (-1.25, 10.0, 0.0),\n (-1.25, 7.0, 0.0), (-2.125, 8.5, 0.125), (-3.0, 7.0, 0.5),\n (-2.125, 5.5, 0.375), (-1.25, 10.0, 0.5), (-2.125, 8.5, 0.375),\n (-2.125, 5.5, 0.125), (-0.375, 8.5, 0.375),\n (-0.375, 8.5, 0.125), (-0.375, 5.5, 0.125), (-1.25, 4.0, 0.75),\n (-0.375, 5.5, 0.625), (-3.0, 7.0, 0.75), (-3.0, 10.0, 0.75),\n (-3.0, 7.0, 1.0), (-1.25, 10.0, 0.75), (-1.25, 10.0, 1.0),\n (-1.25, 7.0, 1.0), (-2.125, 8.5, 0.875), (-3.0, 4.0, 0.75),\n (-2.125, 5.5, 0.625), (-2.125, 8.5, 0.625), (-1.25, 4.0, 1.0),\n (-2.125, 5.5, 0.875), (-0.375, 8.5, 0.625),\n (-0.375, 8.5, 0.875), (-0.375, 5.5, 0.875),\n (-0.375, 2.5, 0.625), (-3.0, 1.0, 0.75), (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-1.25, -2.0, 0.75), (-1.25, -2.0, 1.0),\n (-1.25, 1.0, 1.0), (-2.125, -0.5, 0.875), (-2.125, 2.5, 0.625),\n (-2.125, -0.5, 0.625), (-2.125, 2.5, 0.875),\n (-0.375, -0.5, 0.625), (-0.375, -0.5, 0.875),\n (-0.375, 2.5, 0.875)]\n nn_checks = {(2.25, 7.0, 0.75): [(4.0, 7.0, 0.75), (2.25, 7.0, 1.0),\n (4.0, 7.0, 0.5), (4.0, 7.0, 1.0),\n (4.0, 4.0, 0.75), (1.375, 5.5, 0.875),\n (2.25, 4.0, 1.0), (2.25, 4.0, 0.5),\n (2.25, 4.0, 0.75), (3.125, 8.5, 0.875),\n (3.125, 8.5, 0.625), (4.0, 10.0, 0.75),\n (2.25, 10.0, 1.0), (2.25, 10.0, 0.75),\n (2.25, 10.0, 0.5), (1.375, 8.5, 0.625),\n (1.375, 8.5, 0.875), (0.5, 7.0, 0.75),\n (0.5, 7.0, 0.5), (3.125, 5.5, 0.625),\n (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (0.5, 7.0, 1.0), (0.5, 4.0, 0.75),\n (2.25, 7.0, 0.5), (1.375, 5.5, 0.625)],\n (4.0, -2.0, 0.5): [(4.0, -2.0, 0.75), (4.0, -2.0, 0.25),\n (2.25, 1.0, 0.5), (2.25, -2.0, 0.75),\n (2.25, -2.0, 0.5), (2.25, -2.0, 0.25),\n (4.0, 1.0, 0.25), (4.0, 1.0, 0.75),\n (4.0, 1.0, 0.5), (3.125, -0.5, 0.375),\n (3.125, -0.5, 0.625)],\n (-2.125, -0.5, 0.875): [(-1.25, 1.0, 1.0),\n (-1.25, 1.0, 0.75),\n (-1.25, -2.0, 0.75),\n (-1.25, -2.0, 1.0),\n (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-3, -2, 1),\n (-3.0, 1.0, 0.75)]}\n\n init_triangulation(3, 2, check, nn_checks, bounds=[(-3, 4), (-2, 10), (0, 1)])", "def get_mc_pred(output,eng,frame,nImg):\n preds_2d = [];\n preds_3d = [];\n \n c,s = ut.calc_cent_scale(frame);\n \n center = matlab.double([list(c)],(1,2));\n scale = matlab.double([s],(1,1));\n \n for i in tqdm(range(0,nImg)):\n preds_2d.append(eng.transformMPII(output[\"W_final\"][2*i:2*(i+1)],center,scale,matlab.double([64,64],(1,2)),1));\n preds_3d.append(output[\"S_final\"][3*i:3*i + 3]);\n \n print(\"Converting estimates to Python format...\");\n preds_2d = np.array(preds_2d);\n preds_3d = np.array(preds_3d);\n \n preds_2d = preds_2d.swapaxes(1, 2);\n preds_3d = preds_3d.swapaxes(1, 2);\n \n return preds_2d, preds_3d;", "def test_backward_injection_in_3D(self):\n self.fom = ModeMatch(monitor_name = 'figure_of_merit',\n mode_number = 1,\n direction = 'Backward',\n multi_freq_src = True,\n target_T_fwd = lambda wl: np.ones(wl.size),\n norm_p = 1)\n Optimization.set_source_wavelength(self.sim, 'source', self.fom.multi_freq_src, len(self.wavelengths))\n self.sim.fdtd.setnamed('FDTD','dimension','3D')\n self.sim.fdtd.setnamed('source', 'x', -self.sim.fdtd.getnamed('source','x'))\n self.sim.fdtd.setnamed('source','direction','Backward')\n self.sim.fdtd.setnamed('figure_of_merit','x', -self.sim.fdtd.getnamed('figure_of_merit','x'))\n self.fom.initialize(self.sim)\n self.fom.make_forward_sim(self.sim)\n self.sim.run(name = 'modematch_backward_injection_in_3D', iter = 1)\n FOM = self.fom.get_fom(self.sim)\n self.assertAlmostEqual(FOM, self.ref_fom, 4)", "def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)", "def rmse(actual: np.ndarray, predicted: np.ndarray):\n return np.sqrt(np.mean(np.square(_error(actual, predicted))))", "def Compute3d(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_Compute3d(self, *args)", "def predict(self, first_preprocessed_inputs, second_preprocessed_inputs,third_preprocessed_inputs):\r\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\r\n activation_fn=tf.nn.relu):\r\n eyeLeft = first_preprocessed_inputs\r\n eyeRight = second_preprocessed_inputs\r\n face = third_preprocessed_inputs\r\n\r\n ###左右眼网络\r\n ##左眼网络\r\n\r\n eyeLeft_net = slim.conv2d(eyeLeft, 4, [7, 7], scope='eyeLeft_conv1')\r\n eyeLeft_net = slim.max_pool2d(eyeLeft_net, [2, 2], 2, scope='eyeLeft_pool1')\r\n\r\n eyeLeft_net = slim.conv2d(eyeLeft_net, 8, [5, 5], scope='eyeLeft_conv2')\r\n eyeLeft_net = slim.max_pool2d(eyeLeft_net, [2, 2], 2, scope='eyeLeft_pool2')\r\n\r\n eyeLeft_net = slim.conv2d(eyeLeft_net, 8, [5, 5], scope='eyeLeft_conv3')\r\n eyeLeft_net = slim.max_pool2d(eyeLeft_net, [2, 2], 2, scope='eyeLeft_pool3')\r\n\r\n eyeLeft_net = slim.conv2d(eyeLeft_net, 200, [5, 5], scope='eyeLeft_conv4')\r\n eyeLeft_net = slim.max_pool2d(eyeLeft_net, [2, 2], 2, scope='eyeLeft_pool4')\r\n\r\n ##右眼网络\r\n\r\n eyeRight_net = slim.conv2d(eyeRight, 4, [7, 7], scope='eyeRight_conv1')\r\n eyeRight_net = slim.max_pool2d(eyeRight_net, [2, 2], 2, scope='eyeRight_pool1')\r\n\r\n eyeRight_net = slim.conv2d(eyeRight_net, 8, [5, 5], scope='eyeRight_conv2')\r\n eyeRight_net = slim.max_pool2d(eyeRight_net, [2, 2], 2, scope='eyeRight_pool2')\r\n\r\n eyeRight_net = slim.conv2d(eyeRight_net, 8, [5, 5], scope='eyeRight_conv3')\r\n eyeRight_net = slim.max_pool2d(eyeRight_net, [2, 2], 2, scope='eyeRight_pool3')\r\n\r\n eyeRight_net = slim.conv2d(eyeRight_net, 200, [5, 5], scope='eyeRight_conv4')\r\n eyeRight_net = slim.max_pool2d(eyeRight_net, [2, 2], 2, scope='eyeRight_pool4')\r\n\r\n #------左右眼网络提取特征图拼接\r\n eyeLeft = tf.reshape(eyeLeft_net, [-1, int(np.prod(eyeLeft_net.get_shape()[1:]))])\r\n eyeRight = tf.reshape(eyeRight_net, [-1, int(np.prod(eyeRight_net.get_shape()[1:]))])\r\n eye_net = tf.concat([eyeLeft, eyeRight], 1)\r\n #------左右眼网络提取特征拼接\r\n\r\n #全连接特征融合\r\n eye_net = slim.fully_connected(eye_net, 128, scope='eye_fc1')\r\n eye_net = slim.dropout(eye_net, 0.5, scope='eye_dropout1')\r\n\r\n ##脸部网络\r\n\r\n face_net = slim.conv2d(face, 4, [7, 7], scope='face_conv1')\r\n face_net = slim.max_pool2d(face_net, [2, 2], 2, scope='face_pool1')\r\n\r\n face_net = slim.conv2d(face_net, 8, [5, 5], scope='face_conv2')\r\n face_net = slim.max_pool2d(face_net, [2, 2], 2, scope='face_pool1')\r\n\r\n face_net = slim.conv2d(face_net, 8, [5, 5], scope='face_conv3')\r\n face_net = slim.max_pool2d(face_net, [2, 2], 2, scope='face_pool1')\r\n\r\n face_net = slim.conv2d(face_net, 8, [5, 5], scope='face_conv4')\r\n face_net = slim.max_pool2d(face_net, [2, 2], 2, scope='face_pool1')\r\n #全连接层\r\n face_net = slim.fully_connected(face_net, 256, scope='face_fc1')\r\n face_net = slim.dropout(face_net, 0.5, scope='face_dropout1')\r\n face_net = slim.fully_connected(face_net, 128, scope='face_fc2')\r\n face_net = slim.dropout(face_net, 0.5, scope='face_dropout2')\r\n\r\n #-----左右眼网络加脸部网络\r\n face_net = tf.reshape(face_net, [-1, int(np.prod(face_net.get_shape()[1:]))])\r\n eyeFace_net = tf.concat([eye_net, face_net], 1)\r\n #-----左右眼网络加脸部网络\r\n\r\n eyeFace_net = slim.fully_connected(eyeFace_net, 128, scope='eyeFace_fc1')\r\n eyeFace_net = slim.dropout(eyeFace_net, 0.5, scope='eyeFace_dropout1')\r\n\r\n eyeFace_logits = slim.fully_connected(eyeFace_net, self.num_classes,\r\n biases_initializer=tf.zeros_initializer(),\r\n weights_initializer=trunc_normal(1 / 192.0),\r\n weights_regularizer=None,\r\n activation_fn=None,\r\n scope='eyeFace_logits')\r\n\r\n prediction_dict = {'eyeFace_logits': eyeFace_logits}\r\n return prediction_dict", "def __det3x3__(a):\r\n # val = +a[0,0] * ( a[1,1] * a[2,2] - a[2,1] * a[1,2] )\r\n # val += -a[0,1] * ( a[1,0] * a[2,2] - a[2,0] * a[1,2] )\r\n # val += +a[0,2] * ( a[1,0] * a[2,1] - a[2,0] * a[1,1] )\r\n val = +a[0] * (a[4] * a[8] - a[7] * a[5])\r\n val += -a[1] * (a[3] * a[8] - a[6] * a[5])\r\n val += +a[2] * (a[3] * a[7] - a[6] * a[4])\r\n return val", "def loss(data, y_pred):\n # TODO: Try using points other than the training data points for the divergence calculation.\n y_true = data[:,:2]\n p1 = data[:,2:5]\n p2 = data[:,5:8]\n p3 = data[:,8:11]\n p4 = data[:,11:14]\n\n ### Calculate divergence using model predictions:\n\n # Step 1: Use the model to calculate predicted wind field in the surrounding points p1, p2, p3 and p4.\n y_pred_p1 = model(p1)\n y_pred_p2 = model(p2)\n y_pred_p3 = model(p3)\n y_pred_p4 = model(p4)\n\n # Step 2: Calculate the partial derivatives with a three-point centered difference.\n scale_x = self.scaler_data.scale_[0] #scale-factor for x\n scale_y = self.scaler_data.scale_[1] #scale-factor for y\n\n dudx = (y_pred_p1[:, 0] - y_pred_p3[:, 0]) / (p1[:,0] - p3[:,0]) # <- pj = transformed data\n dvdy = (y_pred_p2[:, 1] - y_pred_p4[:, 1]) / (p2[:,1] - p4[:,1]) # <- pj = transformed data\n\n # Step 3: Calculate the divergence.\n divergence = ( dudx / scale_x + dvdy / scale_y ) * np.mean([scale_x, scale_y])\n #tf.print(K.mean(K.abs(divergence)))\n\n # Step 4: Calculate and return total loss.\n return K.mean(K.square(y_true - y_pred)) + gamma*K.mean(K.square(divergence))", "def run_3D_predictions(self, min_size=5000):\n cases = self.test_loader.dataset.im_ids\n assert len(cases) == len(self.test_loader)\n for (test_batch, case) in tqdm(zip(self.test_loader, cases), total=len(cases)):\n test_x = torch.squeeze(test_batch[0], dim=0)\n if self.pseudo_3D:\n pred, _, act, _ = self.model.predict_3D_pseudo3D_2Dconv(test_x,\n **self.pred_3D_params)\n else:\n pred, _, act, _ = self.model.predict_3D(test_x,\n **self.pred_3D_params)\n assert len(pred.shape) == 3\n assert len(act.shape) == 4\n pred = remove_3D_connected_components(pred, min_size=min_size)\n pred = self.post_process_stage1(pred)\n self.save_pred(pred, act, case)\n case_raw = Path(case).name\n bbox_coord = self.create_bbox_stage1(pred, case_raw)\n self.bbox_coords[case_raw] = bbox_coord\n self.save_bbox_coords()", "def centered_euclidean_distance_3D(y_true, y_pred):\n y_true = y_true - K.mean(y_true, axis=-1, keepdims=True)\n y_pred = y_pred - K.mean(y_pred, axis=-1, keepdims=True)\n\n ced3D = K.flatten(K.sqrt(K.sum(K.pow(y_true - y_pred, 2), axis=1)))\n return K_nanmean_infmean(ced3D)", "def compute_exam_p_and_r(y_true_3d, y_pred_3d, n_sents, n_words, silent=False):\n p_list = list()\n r_list = list()\n d_batch = y_true_3d.shape[0]\n\n for sample_idx in range(d_batch):\n n_sent = n_sents[sample_idx, 0]\n for sent_idx in range(n_sent):\n n_word = n_words[sample_idx, sent_idx]\n y_true = y_true_3d[sample_idx, sent_idx, :n_word]\n y_pred = y_pred_3d[sample_idx, sent_idx, :n_word]\n if not silent and not y_pred.any() == 1:\n logger.info('No pred is made for: {0}.{1}. y_pred: {2}'.format(sample_idx, sent_idx, y_pred))\n\n p_list.append(precision_score(y_true, y_pred))\n r_list.append(recall_score(y_true, y_pred))\n\n return p_list, r_list", "def get3Dcoordinates(self, skel_2d):\n skel_2d = skel_2d.to(self.device)\n z_out = self.net(skel_2d)\n z_out = z_out.detach().cpu().numpy()\n z_out = z_out.reshape(-1)\n return z_out", "def _compute_densepose_part_and_coordinate_losses(\n self, input_height, input_width, part_predictions,\n surface_coord_predictions):\n gt_dp_num_points_list = self.groundtruth_lists(\n fields.BoxListFields.densepose_num_points)\n gt_dp_part_ids_list = self.groundtruth_lists(\n fields.BoxListFields.densepose_part_ids)\n gt_dp_surface_coords_list = self.groundtruth_lists(\n fields.BoxListFields.densepose_surface_coords)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n\n assigner = self._target_assigner_dict[DENSEPOSE_TASK]\n batch_indices, batch_part_ids, batch_surface_coords, batch_weights = (\n assigner.assign_part_and_coordinate_targets(\n height=input_height,\n width=input_width,\n gt_dp_num_points_list=gt_dp_num_points_list,\n gt_dp_part_ids_list=gt_dp_part_ids_list,\n gt_dp_surface_coords_list=gt_dp_surface_coords_list,\n gt_weights_list=gt_weights_list))\n\n part_prediction_loss = 0\n surface_coord_loss = 0\n classification_loss_fn = self._densepose_params.classification_loss\n localization_loss_fn = self._densepose_params.localization_loss\n num_predictions = float(len(part_predictions))\n num_valid_points = tf.math.count_nonzero(batch_weights)\n num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32)\n for part_pred, surface_coord_pred in zip(part_predictions,\n surface_coord_predictions):\n # Potentially upsample the feature maps, so that better quality (i.e.\n # higher res) groundtruth can be applied.\n if self._densepose_params.upsample_to_input_res:\n part_pred = tf.keras.layers.UpSampling2D(\n self._stride, interpolation=self._densepose_params.upsample_method)(\n part_pred)\n surface_coord_pred = tf.keras.layers.UpSampling2D(\n self._stride, interpolation=self._densepose_params.upsample_method)(\n surface_coord_pred)\n # Compute the part prediction loss.\n part_pred = cn_assigner.get_batch_predictions_from_indices(\n part_pred, batch_indices[:, 0:3])\n part_prediction_loss += classification_loss_fn(\n part_pred[:, tf.newaxis, :],\n batch_part_ids[:, tf.newaxis, :],\n weights=batch_weights[:, tf.newaxis, tf.newaxis])\n # Compute the surface coordinate loss.\n batch_size, out_height, out_width, _ = _get_shape(\n surface_coord_pred, 4)\n surface_coord_pred = tf.reshape(\n surface_coord_pred, [batch_size, out_height, out_width, -1, 2])\n surface_coord_pred = cn_assigner.get_batch_predictions_from_indices(\n surface_coord_pred, batch_indices)\n surface_coord_loss += localization_loss_fn(\n surface_coord_pred,\n batch_surface_coords,\n weights=batch_weights[:, tf.newaxis])\n part_prediction_loss = tf.reduce_sum(part_prediction_loss) / (\n num_predictions * num_valid_points)\n surface_coord_loss = tf.reduce_sum(surface_coord_loss) / (\n num_predictions * num_valid_points)\n return part_prediction_loss, surface_coord_loss", "def test_3D_m6_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def keypoint_3d_loss(self, pred_keypoints_3d, gt_keypoints_3d, has_pose_3d):\n pred_keypoints_3d = pred_keypoints_3d[:, 25:, :]\n conf = gt_keypoints_3d[:, :, -1].unsqueeze(-1).clone()\n gt_keypoints_3d = gt_keypoints_3d[:, :, :-1].clone()\n gt_keypoints_3d = gt_keypoints_3d[has_pose_3d == 1]\n conf = conf[has_pose_3d == 1]\n pred_keypoints_3d = pred_keypoints_3d[has_pose_3d == 1]\n if len(gt_keypoints_3d) > 0:\n gt_pelvis = (gt_keypoints_3d[:, 2,:] + gt_keypoints_3d[:, 3,:]) / 2\n gt_keypoints_3d = gt_keypoints_3d - gt_pelvis[:, None, :]\n pred_pelvis = (pred_keypoints_3d[:, 2,:] + pred_keypoints_3d[:, 3,:]) / 2\n pred_keypoints_3d = pred_keypoints_3d - pred_pelvis[:, None, :]\n return (conf * self.criterion_keypoints(pred_keypoints_3d, gt_keypoints_3d)).mean()\n else:\n return torch.FloatTensor(1).fill_(0.).to(self.device)", "def evaluate(self, batch):\n images, labels, projs, planes = [], [], [], []\n for serialized in batch:\n example = tf.train.Example.FromString(serialized)\n image, label = self.encoder.parse_example(example)\n images.append(image)\n labels.append(label)\n proj, _ = self.encoder.parse_camera(example)\n projs.append(proj)\n plane = self.encoder.parse_plane(example)\n planes.append(plane)\n\n\n #pred = self.model.predict(np.asarray(images), batch_size=len(batch))\n results = self.predict(np.asarray(images), batch_size=len(batch))\n \n # Creating some fake results for testing as well as example of what the \n # the results should look like.\n # results = []\n # for label in labels:\n # instances = label['2d_instance']\n # instances_3d = label['3d_instance']\n # boxes = []\n # for i in range(len(instances)):\n # point_2d = np.copy(instances[i])\n # point_3d = np.copy(instances_3d[i])\n # for j in range(9):\n # # Translating the box in 3D, this will have a large impact on 3D IoU.\n # point_3d[j] += np.array([0.01, 0.02, 0.5])\n # boxes.append((point_2d, point_3d))\n # results.append(boxes)\n\n for boxes, label, plane in zip(results, labels, planes): \n instances = label['2d_instance']\n instances_3d = label['3d_instance']\n visibilities = label['visibility']\n num_instances = 0\n for instance, instance_3d, visibility in zip(\n instances, instances_3d, visibilities):\n if (visibility > self._vis_thresh and\n self._is_visible(instance[0]) and instance_3d[0, 2] < 0):\n num_instances += 1\n # We don't have negative examples in evaluation.\n if num_instances == 0:\n continue\n\n iou_hit_miss = metrics.HitMiss(self._iou_thresholds)\n azimuth_hit_miss = metrics.HitMiss(self._azimuth_thresholds)\n polar_hit_miss = metrics.HitMiss(self._polar_thresholds)\n pixel_hit_miss = metrics.HitMiss(self._pixel_thresholds)\n\n num_matched = 0\n for box in boxes:\n box_point_2d, box_point_3d = box\n index = self.match_box(box_point_2d, instances, visibilities)\n if index >= 0:\n num_matched += 1\n pixel_error = self.evaluate_2d(box_point_2d, instances[index])\n\n # If you only compute the 3D bounding boxes from RGB images, \n # your 3D keypoints may be upto scale. However the ground truth\n # is at metric scale. There is a hack to re-scale your box using \n # the ground planes (assuming your box is sitting on the ground).\n # However many models learn to predict depths and scale correctly.\n #scale = self.compute_scale(box_point_3d, plane)\n #box_point_3d = box_point_3d * scale\n azimuth_error, polar_error, iou = self.evaluate_3d(box_point_3d, instances_3d[index])\n iou_hit_miss.record_hit_miss(iou)\n pixel_hit_miss.record_hit_miss(pixel_error, greater=False)\n azimuth_hit_miss.record_hit_miss(azimuth_error, greater=False)\n polar_hit_miss.record_hit_miss(polar_error, greater=False)\n\n if num_matched > 0:\n self._iou_ap.append(iou_hit_miss, num_instances)\n self._pixel_ap.append(pixel_hit_miss, num_instances)\n self._azimuth_ap.append(azimuth_hit_miss, num_instances)\n self._polar_ap.append(polar_hit_miss, num_instances)\n self._matched += num_matched", "def calcVariance(self,output=None):\n self.uu = self.U[0,:,:,:]**2\n self.vv = self.U[1,:,:,:]**2\n self.ww = self.U[2,:,:,:]**2\n self.uu_tavg = np.mean(self.uu,0) # time averages\n self.vv_tavg = np.mean(self.vv,0)\n self.ww_tavg = np.mean(self.ww,0)\n self.uu_mean = np.mean( self.uu_tavg ) # space/time average\n self.vv_mean = np.mean( self.vv_tavg )\n self.ww_mean = np.mean( self.ww_tavg )\n\n print('Spatial average of <u\\'u\\'>, <v\\'v\\'>, <w\\'w\\'> :',self.uu_mean,self.vv_mean,self.ww_mean)\n\n if output is not None:\n with open(output,'w') as f:\n f.write('Spatial average of <u\\'u\\'>, <v\\'v\\'>, <w\\'w\\'> : {} {} {}\\n'.format(self.uu_mean,self.vv_mean,self.ww_mean))\n f.write('\\n Height Standard deviation at grid points for the u component:\\n')\n for i,zi in enumerate(self.z):\n f.write('z= {:.1f} : {}\\n'.format(zi,np.sqrt(self.uu_tavg[:,i])))\n f.write('\\n Height Standard deviation at grid points for the v component:\\n')\n for i,zi in enumerate(self.z):\n f.write('z= {:.1f} : {}\\n'.format(zi,np.sqrt(self.vv_tavg[:,i])))\n f.write('\\n Height Standard deviation at grid points for the w component:\\n')\n for i,zi in enumerate(self.z):\n f.write('z= {:.1f} : {}\\n'.format(zi,np.sqrt(self.ww_tavg[:,i])))\n print('Wrote out',output)", "def fun(params, n_cameras, n_points, camera_indices, point_indices, points_2d, theta):\n \n camera_params = params[:n_cameras * 9].reshape((n_cameras, 9))\n points_3d = params[n_cameras * 9:].reshape((n_points, 3))\n points_proj = project(points_3d[point_indices], camera_params[camera_indices], theta)\n print(\"Residual is: \", (points_proj - points_2d).ravel())\n return (points_proj - points_2d).ravel()", "def calcS3eq(I3, R, T, G_env, G_lin):\n t = np.linspace(0, 1, 1000, endpoint=False)\n It = I3[:, None]*np.cos(2*np.pi*t[None, :])\n Dt = qnoise_fit.dSiidV(It, R, T)\n D3 = qnoise_fit.Fcoef(Dt, t, 1)[0]\n return G_env*D3+G_lin*I3", "def stack_red_detect(self):\n self.redundancy_pool.clear()\n\n for nslice in np.arange(self.nz-1):\n self._red_detect_(nslice, thresh = 1.0)\n\n # OK, let's check the the size of the pool and remove them one by one.\n dist_3d = np.zeros((0, 4)) # create an empty array to save z, y, x, f\n\n\n for sl_key, sl_value in self.redundancy_pool.items():\n z_start = sl_value.z_marker # where does the z_marker starts\n z_list = np.array(sl_value.list) # convert it into a 2d array\n z_key = 's_' + format(z_start, '03d')\n zframe_0 = self.z_dense[z_key]\n z_identifier = int(sl_key[3:]) - z_start*1000 # which cell?\n\n pz = self.z_step*np.inner(z_list[:,0], z_list[:,1])/z_list[:,1].sum() # weighted average estimation\n py, px = zframe_0[z_identifier, 0:2] # The x-y coordinates\n pf = zframe_0[z_identifier, 4] # the fluorescence\n\n\n new_entry = np.array([[pz, py, px, pf]])\n dist_3d = np.concatenate((dist_3d, new_entry), axis = 0)\n\n ord_z = np.argsort(dist_3d[:,0], axis = 0)\n # sort in the order of Z.\n\n\n self.dist_3d = dist_3d[ord_z, :]\n\n return dist_3d", "def evaluate_acc(\n model,\n ds\n):\n n = 0\n correct = 0\n for batch_x, batch_y in ds:\n batch_pred = get_model_prediction(model, batch_x)\n correct += tf.math.reduce_sum(\n tf.cast(batch_pred == batch_y, dtype=tf.int32)\n )\n n += batch_y.shape[0]\n return correct / n", "def error_function(prediction_dict, use_example_flags):\n\n predicted_flux_matrix_w_m02 = numpy.mean(\n prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY][\n use_example_flags, ...\n ],\n axis=-1\n )\n actual_flux_matrix_w_m02 = prediction_dict[\n prediction_io.SCALAR_TARGETS_KEY\n ][use_example_flags, :]\n\n predicted_net_flux_matrix_w_m02 = (\n predicted_flux_matrix_w_m02[:, 0] -\n predicted_flux_matrix_w_m02[:, 1]\n )\n actual_net_flux_matrix_w_m02 = (\n actual_flux_matrix_w_m02[:, 0] -\n actual_flux_matrix_w_m02[:, 1]\n )\n\n net_flux_sse_w2_m04 = numpy.sum(\n (predicted_net_flux_matrix_w_m02 - actual_net_flux_matrix_w_m02)\n ** 2\n )\n raw_flux_sse_w2_m04 = numpy.sum(\n (predicted_flux_matrix_w_m02 - actual_flux_matrix_w_m02) ** 2\n )\n\n num_examples = actual_flux_matrix_w_m02.shape[0]\n flux_mse_w_m02 = (\n (net_flux_sse_w2_m04 + raw_flux_sse_w2_m04) / (3 * num_examples)\n )\n\n predicted_hr_matrix_k_day01 = numpy.mean(\n prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY][\n use_example_flags, ...\n ],\n axis=-1\n )\n actual_hr_matrix_k_day01 = prediction_dict[\n prediction_io.VECTOR_TARGETS_KEY\n ][use_example_flags, ...]\n\n weight_matrix_k_day01 = numpy.maximum(\n numpy.absolute(predicted_hr_matrix_k_day01),\n numpy.absolute(actual_hr_matrix_k_day01)\n )\n heating_rate_dwmse_k3_day03 = numpy.mean(\n weight_matrix_k_day01 *\n (predicted_hr_matrix_k_day01 - actual_hr_matrix_k_day01) ** 2\n )\n\n return (\n scaling_factor_for_dwmse * heating_rate_dwmse_k3_day03 +\n scaling_factor_for_flux_mse * flux_mse_w_m02\n )", "def test_3D_m6_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def softmax_error_mod3(X, Y, theta, temp_parameter):\n pred = predict(X, theta, temp_parameter)\n pred = np.mod(pred, 3)\n\n return 1- np.mean(pred == Y)", "def mse(observed, predicted):\n return np.sqrt(np.mean((observed - predicted)**2))", "def test_3d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n db.close()\n\n date = pd.to_datetime('2015-04-01')\n self.full_iv.get_data()\n df_date0 = self.full_iv.df_all.query('date == %r' % date)\n df_date1 = df_iv.query('date == %r' % date)\n df_date = pd.concat([df_date0, df_date1])\n \"\"\":type: pd.DataFrame\"\"\"\n\n x = df_date['dte']\n y = df_date['strike']\n z = df_date['impl_vol']\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n # noinspection PyUnresolvedReferences\n ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)\n # ax.plot_wireframe(x, y, z, rstride=1, cstride=1)\n plt.show()", "def plot_3D_compare_voxels(Y_data_test, Y_pred_data, X_data_test, ref_shape):\n sample_len = Y_data_test.shape[0]\n for i in np.arange(0, sample_len):\n true_lab = Y_data_test[i, ]\n true_loc = np.where(true_lab == 1)\n pred_lab = Y_pred_data[i, ]\n pred_loc = np.where(pred_lab == 1)\n volume = X_data_test[i, ]\n voxels = ~(volume==0)\n fig = plt.figure(i)\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n vx = fig.gca(projection='3d')\n vx.voxels(voxels, facecolors=volume, edgecolor='k')\n ax.scatter3D(true_loc[0], true_loc[1], true_loc[2], marker=\".\", alpha=0.9)\n ax.scatter3D(pred_loc[0], pred_loc[1], pred_loc[2], marker=\".\", alpha=0.05)\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n plt.show()", "def is3D(data):\n return data.find(\"x3\") != -1 and data.find(\"y3\") != -1 and data.find(\"z3\") != -1", "def calculate_shape_decreases_3D_Net(self, input_crop_size):\n cropsize_x, cropsize_y, cropsize_z = input_crop_size\n input_crop = torch.ones((1, cropsize_z, cropsize_x, cropsize_y))\n net_output, _ = self.forward_net(input_crop)\n _, outsize_z, outsize_y, outsize_x = net_output.size()\n\n return cropsize_x-outsize_x, cropsize_y-outsize_y, cropsize_z-outsize_z", "def get_pred(self, pred):\n self.x_pred, self.y_pred = pred[0, 0], pred[0, 1]\n x_eye, y_eye = self.x_pred * 2 - 1, self.y_pred * 2 - 1\n self.display_pred(x_eye, y_eye)\n # x /= 1563\n # y /= 1093\n self.x_pred *= self.width\n self.y_pred *= self.height\n\n self.update_camera()\n\n # self.x_offset, self.y_offset = self.x_pred - self.last_x, - (self.y_pred - self.last_y)\n # self.last_x, self.last_y = self.x_pred, self.y_pred\n # self.x_offset *= self.sensitivity\n # self.y_offset *= self.sensitivity\n # self.yaw, self.pitch = self.yaw - self.x_offset, self.pitch + self.y_offset\n\n # Update the rotation and the View matrices\n # self.rot_y(self.yaw * np.pi / 180)\n # self.rot_x(self.pitch * np.pi / 180)\n # self.view = np.dot(self.rot_mat_y, self.rot_mat_x)\n # self.game_program['u_view'] = self.view.astype(np.float32)\n\n # self.update()", "def calculate_d3ct(self):\n data = deepcopy(self.ddct)\n data = data.set_index(['cell_line', 'replicate', 'Assay', 'time', 'treatment'])\n control = data.query('treatment == \"Control\"')#.reset_index(drop=True)\n tgfb = data.query('treatment == \"TGFb\"')#.reset_index(drop=True)\n control.index = control.index.droplevel(4)\n tgfb.index = tgfb.index.droplevel(4)\n return tgfb / control", "def ve(self) -> float:\n a = np.sum(np.abs(self.predicted - self.true))\n b = np.sum(self.true)\n return float(1 - (a / b))", "def func_d23_318(n, series):\n if series == \"3D3\":\n try: \n return np.sqrt((3*os_3D3[str(n)]*wl_3D3[str(n)]*1e-9*hbar*e**2)/(4*np.pi*m_e*c))\n except:\n return 0", "def RMSE(self, y_true, y_pred):\n try:\n l = y_true.shape[1]\n except:\n y_true.shape += (1,) # avoiding (l,) shape problem\n y_pred.shape += (1,) # avoiding (l,) shape problem\n l = 1\n\n rmse = np.zeros(l)\n\n for i in range(l):\n actual, forecast = self.filter(y_true[:,i],y_pred[:,i])\n rmse[i] = np.sqrt(np.mean((actual - forecast) ** 2))\n\n self.rmse = rmse\n return rmse", "def prediction_only(state_est_prev, cov_est_prev, delta_sr, delta_sl):\n\n theta = state_est_prev[2, 0]\n delta_s = (delta_sr + delta_sl) / 2\n delta_theta = (delta_sr - delta_sl) / b\n\n Fx = jacobianF_x(theta, delta_s, delta_theta)\n Fu = jacobianF_u(theta, delta_s, delta_theta)\n\n ## Prediciton step\n # estimated mean of the state\n state_est_a_priori = state_est_prev + np.array(\n [[delta_s * np.cos(theta + delta_theta / 2)], [delta_s * np.sin(theta + delta_theta / 2)], [delta_theta]])\n\n # Estimated covariance of the state\n cov_est_a_priori = np.dot(Fx, np.dot(cov_est_prev, Fx.T)) + np.dot(Fu, np.dot(R, Fu.T))\n\n return state_est_a_priori, cov_est_a_priori", "def faceDivz(self):\n if(self.dim < 3):\n return None\n if getattr(self, '_faceDivz', None) is None:\n # The number of cell centers in each direction\n n = self.vnC\n # Compute faceDivergence operator on faces\n D3 = kron3(ddx(n[2]), speye(n[1]), speye(n[0]))\n # Compute areas of cell faces & volumes\n S = self.r(self.area, 'F', 'Fz', 'V')\n V = self.vol\n self._faceDivz = sdiag(1/V)*D3*sdiag(S)\n return self._faceDivz", "def trap_depth_old(V,X,Y,Z,Im,Jm,Km,debug=False): \n from project_parameters import debug\n #from all_functions import sum_of_e_field\n def a(a,N):\n \"\"\"Shortcut function to convert array x into a row vector.\"\"\" \n a=np.ravel(a, order='F') # Same order\n return a\n def index_sort(y,x):\n \"\"\"Takes in two lists of the same length and returns y sorted by the indexing of x sorted.\"\"\"\n xs=np.sort(x)\n ix=np.argsort(x)\n ys=np.ones(len(y)) #Sorted by the sorting defined by f being sorted. \n for i in range(len(y)):\n j=ix[i]\n ys[i]=y[j]\n return ys\n if len(V.shape)!=3:\n return('Problem with find_saddle.py dimensionalities.\\n')\n N1,N2,N3=V.shape\n N=N1*N2*N3\n f=V\n [Ex,Ey,Ez]=np.gradient(f,abs(X[1]-X[0]),abs(Y[1]-Y[0]),abs(Z[1]-Z[0]))\n E=np.sqrt(Ex**2+Ey**2+Ez**2)\n fs,Es=a(f,N),a(E,N) # Convert 3D to 1D array\n fs,Es=np.real(fs),np.real(Es)\n # identify the escape position and height by checking each point\n minElectricField=max(fs) # initialize as maximum E field magnitude\n distance=0\n escapeHeight=1\n escapePosition=[0,0,0]\n for i in range(N1):\n for j in range(N2):\n for k in range(N3):\n if [i,j,k]==[Im,Jm,Km]:\n Vm=V[i,j,k]\n elif E[i,j,k]<minElectricField:\n minElectricField=E[i,j,k]\n escapeHeight=V[i,j,k]\n escapePosition=[i,j,k]\n distance=abs(Im+Jm+Km-i-j-k) \n if debug.trap_depth: # plot sortings of potential and electric field to view escape position\n plt.plot(np.sort(fs)) \n plt.title('sorted potential field')\n plt.show()\n plt.plot(np.sort(Es)) \n plt.title('sorted electric field')\n plt.show()\n q1=index_sort(fs,Es) \n plt.title('potential field sorted by sorted indexing of electric field')\n plt.plot(q1)\n plt.show()\n q2=index_sort(Es,fs) \n plt.title('electric field sorted by sorted indexing of potential field')\n plt.plot(q2)\n plt.show() \n check=1 \n if debug.trap_depth: \n print minElectricField,escapeHeight,escapePosition,distance \n if distance<check:\n print('trap_depth.py: Escape point too close to trap minimum. Improve grid resolution or extend grid.')\n if escapeHeight>0.2:\n print('trap_depth.py: Escape point parameter too high. Improve grid resolution or extend grid.')\n D=escapeHeight-Vm\n [Ie,Je,Ke]=escapePosition\n [Xe,Ye,Ze]=[X[Ie],Y[Je],Z[Ke]] \n return [D,Xe,Ye,Ze]", "def forward(self,y_out, y_truth): \n result = (np.square(np.subtract(y_out, y_truth)))\n #########################################################################\n # TODO: #\n # Implement the forward pass and return the output of the MSE loss. #\n #########################################################################\n\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n \n return result", "def modelStats(model,data, azSpacing=0.5,zenSpacing=0.5):\n az = np.linspace(0,360.,int(360./azSpacing)+1)\n zz = np.linspace(0,90,int(90./zenSpacing)+1)\n\n azCtr = 0\n iCtr = 0\n chi = 0\n SS_tot = 0\n SS_res = 0\n SS_reg = 0\n test_ctr = 0\n reg_ctr = 0\n\n for i in az:\n if(i - azSpacing/2. < 0) :\n criterion = (data[:,0] < (i + azSpacing/2.)) | (data[:,0] > (360. - azSpacing/2.) )\n else:\n criterion = (data[:,0] < (i + azSpacing/2.)) & (data[:,0] > (i - azSpacing/2.) )\n\n ind = np.array(np.where(criterion))\n \n if ind.size > 0:\n tmp = data[ind,:]\n tmp = tmp.reshape(tmp.shape[1],tmp.shape[2])\n jCtr = 0\n for j in zz:\n # disregard any observation above 80 in zenith, too noisy\n if j < 80. + zenSpacing:\n criterion = (tmp[:,1] < (j + zenSpacing/2.)) & (tmp[:,1] > (j - zenSpacing/2.) ) \n tmpZ = np.array( tmp[indZ[:],2] )\n if indZ.size > 3 and not np.isnan(model[iCtr,jCtr]): # and (model[iCtr,jCtr] > 0.00001 or model[iCtr,jCtr] < -0.00001):\n test_data = reject_outliers(reject_abs( tmp[indZ,2],70. ),5.)\n #print(i,j,test_data,model[iCtr,jCtr])\n if test_data.size > 0:\n y_mean = np.mean(test_data)\n SS_reg += (model[iCtr,jCtr] - y_mean)**2\n reg_ctr += 1\n for obs in test_data:\n #chi += (obs - model[iCtr,jCtr]) ** 2 / model[iCtr,jCtr]\n SS_tot += (obs - y_mean) ** 2\n SS_res += (obs - model[iCtr,jCtr])**2\n test_ctr += 1\n jCtr += 1\n iCtr += 1\n\n rr = 1. - SS_res/SS_tot\n\n rms = np.sqrt(SS_res) * 1./test_ctr\n\n # gives an indication of how different the models would be between the test and training data set \n rms2 = np.sqrt(SS_reg) * 1./reg_ctr\n\n # Used in matlab instead of rr\n norm_res = np.sqrt(SS_res)\n mse = 1./(2.*test_ctr) * SS_res\n return mse,rr,rms,rms2", "def runMT3D(self):\n \n # write mt3dms input\n self.__mt.write_input()\n # run mt3dms\n self.__mt.run_model()", "def epoch_diagnostics(self, train_loss, train_err, test_loss, test_err):\n m = self.nbatches\n logging.info(\"Epoch diagnostics computation\")\n\n layernum = 0\n layer_gradient_norm_sqs = []\n gavg_norm_acum = 0.0\n gavg_acum = []\n for group in self.param_groups:\n for p in group['params']:\n\n layer_gradient_norm_sqs.append([])\n gavg = self.state[p]['gavg'].cpu()\n gavg_acum.append(gavg.numpy())\n gavg_norm_acum += gavg.norm()**2 #torch.dot(gavg, gavg)\n layernum += 1\n\n gradient_norm_sqs = []\n vr_step_variance = []\n cos_acums = []\n variances = []\n\n for batch_id in range(m):\n norm_acum = 0.0\n ginorm_acum = 0.0\n vr_acum = 0.0\n layernum = 0\n cos_acum = 0.0\n var_acum = 0.0\n for group in self.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n\n gktbl = param_state['gktbl']\n gavg = param_state['gavg'].type_as(p.data).cpu()\n\n gi = gktbl[batch_id, :]\n var_norm_sq = (gi-gavg).norm()**2 #torch.dot(gi-gavg, gi-gavg)\n norm_acum += var_norm_sq\n ginorm_acum += gi.norm()**2 #torch.dot(gi, gi)\n layer_gradient_norm_sqs[layernum].append(var_norm_sq)\n\n gktbl_old = param_state['gktbl_old']\n gavg_old = param_state['gavg_old'].type_as(p.data).cpu()\n gi_old = gktbl_old[batch_id, :]\n #pdb.set_trace()\n vr_step = gi - gi_old + gavg_old\n vr_acum += (vr_step - gavg).norm()**2 #torch.dot(vr_step - gavg, vr_step - gavg)\n cos_acum += torch.sum(gavg*gi)\n\n var_acum += (gi - gavg).norm()**2\n\n layernum += 1\n gradient_norm_sqs.append(norm_acum)\n vr_step_variance.append(vr_acum)\n cosim = cos_acum/math.sqrt(ginorm_acum*gavg_norm_acum)\n #pdb.set_trace()\n cos_acums.append(cosim)\n variances.append(var_acum)\n\n variance = sum(variances)/len(variances)\n\n print(\"mean cosine: {}\".format(sum(cos_acums)/len(cos_acums)))\n\n #pdb.set_trace()\n\n with open('stats/{}fastdiagnostics_epoch{}.pkl'.format(self.test_name, self.epoch), 'wb') as output:\n pickle.dump({\n 'train_loss': train_loss,\n 'train_err': train_err,\n 'test_loss': test_loss,\n 'test_err': test_err,\n 'epoch': self.epoch,\n #'layer_gradient_norm_sqs': layer_gradient_norm_sqs,\n #'gradient_norm_sqs': gradient_norm_sqs,\n #'vr_step_variance': vr_step_variance,\n #'cosine_distances': cos_acums,\n #'variances': variances,\n 'variance': variance,\n #'gavg_norm': gavg_norm_acum,\n #'gavg': gavg_acum,\n #'iterate_distances': self.inrun_iterate_distances,\n #'grad_distances': self.inrun_grad_distances,\n }, output)\n print(\"Epoch diagnostics saved\")\n #pdb.set_trace()\n\n self.inrun_iterate_distances = []\n self.inrun_grad_distances = []", "def _get_max_preds_3d(heatmaps):\n assert isinstance(heatmaps, np.ndarray), 'heatmaps should be numpy.ndarray'\n assert heatmaps.ndim == 5, 'heatmaps should be 5-ndim'\n N, K, D, H, W = heatmaps.shape\n heatmaps_reshaped = heatmaps.reshape((N, K, -1))\n idx = np.argmax(heatmaps_reshaped, 2).reshape((N, K, 1))\n maxvals = np.amax(heatmaps_reshaped, 2).reshape((N, K, 1))\n preds = np.zeros((N, K, 3), dtype=np.float32)\n _idx = idx[..., 0]\n preds[..., 2] = _idx // (H * W)\n preds[..., 1] = _idx // W % H\n preds[..., 0] = _idx % W\n preds = np.where(maxvals > 0.0, preds, -1)\n return preds, maxvals", "def computeAmbient(self):\n maxMDP = -(1 + 1e-10)\n array_MDP = minkowskiArrayDot(self.examples, self.centroid)\n array_MDP[array_MDP > maxMDP] = maxMDP\n\n # multiplies last column of examples by-1\n dMDP_dcent = np.copy(self.examples)\n #dMDP_dcent[:, -1] *= -1\n\n distances = np.arccosh(-array_MDP)\n scales = (-2/len(distances)) * distances / np.sqrt((array_MDP ** 2) - 1)\n for row in range(len(dMDP_dcent)):\n dMDP_dcent[row, :] *= scales[row]\n grad_temp = np.sum(dMDP_dcent, axis=0)\n return grad_temp.reshape((grad_temp.shape[0], 1))\n # return np.matmul(dMDP_dcent.T, scales)", "def get_pred(sn, el, ns, set_id, step, compl):\n\n f = h5py.File(\"ref_%s%s_s%s.hdf5\" % (ns, set_id, step), 'r')\n\n print f.get('euler').shape\n\n euler = f.get('euler')[sn, ...]\n euler = euler.swapaxes(0, 1)\n\n print euler.shape\n\n et = np.zeros((el**3, 6))\n ep = np.zeros((el**3, 6))\n\n for ii in xrange(6):\n comp = compl[ii]\n tmp = f.get('r%s_epsilon_t' % comp)[sn, ...]\n et[:, ii] = tmp.reshape(el**3)\n\n tmp = f.get('r%s_epsilon_p' % comp)[sn, ...]\n ep[:, ii] = tmp.reshape(el**3)\n\n f.close()\n\n \"\"\"find the deviatoric strain tensor\"\"\"\n isdev = np.all(np.isclose(np.sum(et[:, 0:3]), np.zeros(el**3)))\n print \"is trace(et) == 0?: %s\" % isdev\n\n et_ = np.zeros(et.shape)\n et_[:, 0:3] = et[:, 0:3] - (1./3.)*np.expand_dims(np.sum(et[:, 0:3], 1), 1)\n et_[:, 3:] = et[:, 3:]\n\n isdev = np.all(np.isclose(np.sum(et_[:, 0:3]), np.zeros(el**3)))\n print \"is trace(et_) == 0?: %s\" % isdev\n\n \"\"\"find the norm of the tensors\"\"\"\n en = tensnorm(et_)\n\n print \"sn: %s\" % sn\n print \"min(en): %s\" % en.min()\n print \"max(en): %s\" % en.max()\n\n \"\"\"normalize the deviatoric strain tensor\"\"\"\n et_n = et_/np.expand_dims(en, 1)\n\n isnorm = np.all(np.isclose(tensnorm(et_n), np.ones(el**3)))\n print \"is norm(et_n) == 0?: %s\" % isnorm\n\n \"\"\"write the normalized deviatioric total strain and plastic strains\n in matrix form\"\"\"\n et_m = tens2mat(et_n)\n\n epn = tensnorm(ep)\n # epn_max = np.argmax(epn)\n orig = epn\n # print \"max(norm(ep)): %s\" % epn[epn_max]\n # print \"euler @ max(norm(ep)): %s\" % str(euler[epn_max, ...])\n # print et[epn_max, ...]\n # print et_[epn_max, ...]\n # print et_n[epn_max, ...]\n\n \"\"\"find the eigenvalues of the normalized tensor\"\"\"\n eigval_, g_p2s_ = LA.eigh(et_m)\n del et_m\n\n print \"eigval_ example (before sort): %s\" % str(eigval_[0, :])\n print \"g_p2s_ example (before sort):\"\n print g_p2s_[0, ...]\n\n \"\"\"sort the eigenvalues/vectors by highest to lowest magnitude\n eigenvalue\"\"\"\n esort = np.argsort(np.abs(eigval_))[:, ::-1]\n\n eigval = np.zeros(eigval_.shape)\n g_p2s = np.zeros(g_p2s_.shape)\n\n for ii in xrange(el**3):\n eigval[ii, :] = eigval_[ii, esort[ii, :]]\n for jj in xrange(3):\n g_p2s[ii, jj, :] = g_p2s_[ii, jj, esort[ii, :]]\n\n print \"eigval example (after sort): %s\" % str(eigval[0, :])\n print \"g_p2s example (after sort):\"\n print g_p2s[0, ...]\n\n \"\"\"find the deformation mode\"\"\"\n theta = np.arctan2(-2*eigval[:, 0]-eigval[:, 2], np.sqrt(3)*eigval[:, 2])\n theta += np.pi*(theta < 0)\n\n print \"min(theta): %s\" % np.str(theta.min()*180./np.pi)\n print \"mean(theta): %s\" % np.str(theta.mean()*180./np.pi)\n print \"max(theta): %s\" % np.str(theta.max()*180./np.pi)\n\n \"\"\"find g_p2c = g_p2s*g_s2c\"\"\"\n g_s2c = ef.bunge2g(euler[:, 0], euler[:, 1], euler[:, 2])\n\n \"\"\"this application of einsum is validated vs loop with np.dot()\"\"\"\n g_p2c = np.einsum('...ij,...jk', g_s2c, g_p2s)\n\n phi1, phi, phi2 = ef.g2bunge(g_p2c)\n\n X = np.vstack([phi1, phi, phi2]).T\n # X = np.array(ef.g2bunge(g_p2c)).T\n # X = np.array(ef.g2bunge(g_p2c.swapaxes(1, 2))).T\n # X = np.array(ef.g2bunge(g_p2s)).T\n # X = np.array(ef.g2bunge(g_p2s.swapaxes(1, 2))).T\n # X = np.array(ef.g2bunge(g_s2c)).T\n # X = np.array(ef.g2bunge(g_s2c.swapaxes(1, 2))).T\n\n del phi1, phi, phi2\n\n pred = rr.eval_func(theta, X, en).real\n\n print \"min(orig): %s\" % orig.min()\n print \"min(pred): %s\" % pred.min()\n print \"max(orig): %s\" % orig.max()\n print \"max(pred): %s\" % pred.max()\n\n return orig, pred", "def generate_training_data_3D():\n c11 = np.random.uniform(0.05, 1.50, 20)\n c12 = np.random.uniform(-1.50, 1.50, 20)\n c13 = np.random.uniform(-2.50, -0.05, 20)\n c21 = np.random.uniform(-1.50, -0.05, 20)\n c22 = np.random.uniform(-1.50, 1.50, 20)\n c23 = np.random.uniform(0.05, 2.50, 20)\n c1 = np.array([[i, j, k] for i, j, k in zip(c11, c12, c13)])\n c2 = np.array([[i, j, k] for i, j, k in zip(c21, c22, c23)])\n\n points = plt.figure()\n ax = points.add_subplot(111, projection='3d')\n ax.scatter(c1[:, 0], c1[:, 1], c1[:, 2], c='r', marker='^')\n ax.scatter(c2[:, 0], c2[:, 1], c2[:, 2], c='b', marker='*')\n plt.show()\n plt.close()\n\n return c1, c2", "def test_el3s():\n N = 999\n xs = (np.random.rand(N)) * 5\n kcs = (np.random.rand(N) - 0.5) * 10\n ps = (np.random.rand(N) - 0.5) * 10\n\n res0 = [el30(x, kc, p) for x, kc, p in zip(xs, kcs, ps)]\n res1 = el3v(xs, kcs, ps)\n res2 = el3(xs, kcs, ps)\n\n assert np.allclose(res0, res1)\n assert np.allclose(res1, res2)", "def norm3d(self) -> float:\n\n return self.v3ddict.norm3d()", "def K3(p, E):\n B, C, D = p\n K_ = B * E / ((C + E**2)**2 + D*E**2)\n K_ = K_*(K_>0)\n return K_", "def get_fairlead_force_3d(self, index):\n fx = c_double(-999.9)\n fy = c_double(-999.9)\n fz = c_double(-999.9)\n Map.lib.map_get_fairlead_force_3d( pointer(fx), pointer(fy), pointer(fz), self.f_type_d, index, self.status, pointer(self.ierr))\n return fx.value, fy.value, fz.value", "def r3d(**kwargs):\n\n return _video_resnet('r3d',\n block=BasicBlock,\n conv_makers=[Conv3DSimple] * 4,\n layers=[NUM_LAYER, NUM_LAYER, NUM_LAYER, NUM_LAYER],\n stem=BasicStem, **kwargs)", "def get_3d_valid(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_valid['3d'][:, to_select, :][:, to_sort, :]", "def process_projection_3D(self, input_rays):\n if not bool(self.optical_system):\n return {}\n if self.dimension != 3:\n raise RuntimeError(\"Should not call 3D function on 2D system\")\n \n result = {\"rays\": {}}\n active_rays = {}\n input_ray_fields = input_rays.keys()\n \n intersect_result = self.optical_system.intersect(input_rays)\n \n # compile dead rays\n if self.compile_dead_rays:\n dead_rays = {}\n is_dead = tf.logical_not(intersect_result[\"valid\"])\n for field in input_ray_fields:\n dead_rays[field] = tf.boolean_mask(\n input_rays[field],\n is_dead\n )\n if bool(self.dead_ray_length):\n # change the length of the dead rays\n dead_rays[\"x_end\"] = dead_rays[\"x_start\"] + \\\n self.dead_ray_length * (dead_rays[\"x_end\"] - \n dead_rays[\"x_start\"])\n dead_rays[\"y_end\"] = dead_rays[\"y_start\"] + \\\n self.dead_ray_length * (dead_rays[\"y_end\"] - \n dead_rays[\"y_start\"])\n dead_rays[\"z_end\"] = dead_rays[\"z_start\"] + \\\n self.dead_ray_length * (dead_rays[\"z_end\"] - \n dead_rays[\"z_start\"])\n \n self._dead_rays.append(dead_rays)\n result[\"rays\"][\"dead\"] = dead_rays\n\n # update the input_rays with the projections\n input_rays[\"x_end\"] = tf.where(\n intersect_result[\"valid\"], intersect_result[\"x\"], input_rays[\"x_end\"]\n )\n input_rays[\"y_end\"] = tf.where(\n intersect_result[\"valid\"], intersect_result[\"y\"], input_rays[\"y_end\"]\n )\n input_rays[\"z_end\"] = tf.where(\n intersect_result[\"valid\"], intersect_result[\"z\"], input_rays[\"z_end\"]\n )\n \n # gather the boundary types\n boundary_type = tf.gather(\n self.optical_system._merged[\"catagory\"], \n intersect_result[\"gather_trig\"]\n )\n \n # compile active_rays\n active_rays = {}\n is_active = tf.logical_and(\n intersect_result[\"valid\"],\n tf.equal(boundary_type, OPTICAL)\n )\n for field in input_ray_fields:\n active_rays[field] = tf.boolean_mask(\n input_rays[field],\n is_active\n )\n if self.compile_active_rays:\n self._active_rays.append(active_rays)\n result[\"rays\"][\"active\"] = active_rays\n \n # compile finished_rays\n if self.compile_finished_rays:\n finished_rays = {}\n is_finished = tf.logical_and(\n intersect_result[\"valid\"],\n tf.equal(boundary_type, TARGET)\n )\n for field in input_ray_fields:\n finished_rays[field] = tf.boolean_mask(\n input_rays[field],\n is_finished\n )\n self._finished_rays.append(finished_rays)\n result[\"rays\"][\"finished\"] = finished_rays\n \n # compile stopped_rays\n if self.compile_stopped_rays:\n stopped_rays = {}\n is_stopped = tf.logical_and(\n intersect_result[\"valid\"],\n tf.equal(boundary_type, STOP)\n )\n for field in input_ray_fields:\n stopped_rays[field] = tf.boolean_mask(\n input_rays[field],\n is_stopped\n )\n self._stopped_rays.append(stopped_rays)\n result[\"rays\"][\"stopped\"] = stopped_rays\n \n # compile optical boundaries\n gather_optical = tf.boolean_mask(\n intersect_result[\"gather_trig\"],\n is_active\n )\n optical = {\n field: tf.gather(\n self.optical_system._amalgamated_optical[field],\n gather_optical\n )\n for field in self.optical_system._amalgamated_optical.keys()\n }\n optical_norm = tf.boolean_mask(\n intersect_result[\"norm\"], is_active\n )\n try:\n if optical_norm.shape[0] > 0:\n optical[\"norm\"] = optical_norm\n except(TypeError):\n optical[\"norm\"] = tf.zeros((0,))\n result[\"optical\"] = optical\n \n # compile techinical boundaries\n # This step is only performed if the both flags are set.\n if self.compile_technical_intersections and self.compile_stopped_rays:\n gather_stop = tf.boolean_mask(\n (\n intersect_result[\"gather_trig\"] - \n self.optical_system._optical_count\n ),\n is_stopped\n )\n stop = {\n field: tf.gather(\n self.optical_system._amalgamated_stop[field],\n gather_stop\n )\n for field in self.optical_system._amalgamated_stop.keys()\n }\n stop_norm = tf.boolean_mask(\n intersect_result[\"norm\"], is_stopped\n )\n try:\n if stop_norm.shape[0] > 0:\n stop[\"norm\"] = stop_norm\n except(TypeError):\n stop[\"norm\"] = tf.zeros((0,))\n result[\"stop\"] = stop\n \n if (self.compile_technical_intersections and \n self.compile_finished_rays\n ):\n gather_target = tf.boolean_mask(\n (\n intersect_result[\"gather_trig\"] - \n self.optical_system._optical_count -\n self.optical_system._stop_count\n ),\n is_finished\n )\n target = {\n field: tf.gather(\n self.optical_system._amalgamated_target[field],\n gather_target\n )\n for field in \\\n self.optical_system._amalgamated_target.keys()\n }\n finished_norm = tf.boolean_mask(\n intersect_result[\"norm\"], is_finished\n )\n try:\n if finished_norm.shape[0] > 0:\n target[\"norm\"] = finished_norm\n except(TypeError):\n target[\"norm\"] = tf.zeros((0,))\n result[\"target\"] = target\n \n return result", "def compute_rwse(self, traffic_density):\r\n # Ensure experiment has not beem done before\r\n np.random.seed(2020)\r\n file_names = os.listdir(self.dirName)\r\n if traffic_density+'rwse' in file_names:\r\n print(\"This experiment has been done already!\")\r\n return None\r\n elif traffic_density == '':\r\n print(\"select a traffic dinsity level!\")\r\n return None\r\n rwse_dict = {'vel_m':0,\r\n 'lat_vel':1,\r\n 'vel_y':2,\r\n 'vel_f':3,\r\n 'vel_fadj':4}\r\n\r\n pred_step_n = self.pred_h*10+1\r\n splits_n = 6 # number of splits across an entire trajectory\r\n pred_arrs = [np.zeros([self.episode_n*self.traj_n*6,\r\n pred_step_n]) for i in range(5)]\r\n truth_arrs = [np.zeros([self.episode_n*self.traj_n*6,\r\n pred_step_n]) for i in range(5)]\r\n _row = 0\r\n\r\n for episode_id in self.test_data.test_episodes[:self.episode_n]:\r\n # for episode_id in [1289]:\r\n st_seq, cond_seq, st_arr, targ_arr = self.episodeSetup(episode_id)\r\n self.gen_model.max_pc = max(st_arr[:, self.gen_model.indx_m['pc']])\r\n self.gen_model.min_pc = min(st_arr[:, self.gen_model.indx_m['pc']])\r\n if len(st_seq) >= 6:\r\n splits_n = 6\r\n else:\r\n splits_n = len(st_seq)\r\n obs_n = self.test_data.data_obj.obs_n\r\n traj_splits = np.random.choice(range(19, 19+len(st_seq)), splits_n, replace=False)\r\n # leave value of 19 to ensure scenarios remain consistent\r\n for split in traj_splits:\r\n st_seq_i, cond_seq_i, bc_der_i, _, st_i, targ_i = self.sceneSetup(st_seq,\r\n cond_seq,\r\n st_arr,\r\n targ_arr,\r\n current_step=split,\r\n pred_h=self.pred_h)\r\n targ_i.shape = (1, pred_step_n, 5)\r\n st_init = np.repeat(np.reshape(st_i[0,:], [1,17]), self.traj_n, axis=0)\r\n\r\n actions, _, _ = self.policy.get_actions([st_seq_i, cond_seq_i], bc_der_i,\r\n traj_n=self.traj_n, pred_h=self.pred_h)\r\n st_pred = self.gen_model.forwardSim(st_init, actions, pred_step_n)\r\n\r\n truth_arrs[0][_row:_row+self.traj_n, :] = \\\r\n st_i[:,self.gen_model.indx_m['vel']]\r\n pred_arrs[0][_row:_row+self.traj_n, :] = \\\r\n st_pred[:,:,self.gen_model.indx_m['vel']]\r\n\r\n truth_arrs[1][_row:_row+self.traj_n, :] = \\\r\n st_i[:,self.gen_model.indx_m['act_lat_p']]\r\n pred_arrs[1][_row:_row+self.traj_n, :] = \\\r\n st_pred[:,:,self.gen_model.indx_m['act_lat_p']]\r\n\r\n truth_arrs[2][_row:_row+self.traj_n, :] = \\\r\n st_i[:,self.gen_model.indx_y['vel']]\r\n pred_arrs[2][_row:_row+self.traj_n, :] = \\\r\n st_pred[:,:,self.gen_model.indx_y['vel']]\r\n\r\n truth_arrs[3][_row:_row+self.traj_n, :] = \\\r\n st_i[:,self.gen_model.indx_f['vel']]\r\n pred_arrs[3][_row:_row+self.traj_n, :] = \\\r\n st_pred[:,:,self.gen_model.indx_f['vel']]\r\n\r\n truth_arrs[4][_row:_row+self.traj_n, :] = \\\r\n st_i[:,self.gen_model.indx_fadj['vel']]\r\n pred_arrs[4][_row:_row+self.traj_n, :] = \\\r\n st_pred[:,:,self.gen_model.indx_fadj['vel']]\r\n\r\n\r\n _row += self.traj_n\r\n # return st_pred\r\n print('Episode ', episode_id, ' has been completed!')\r\n for key in rwse_dict.keys():\r\n rwse_dict[key] = self.root_weightet_sqr(truth_arrs[rwse_dict[key]], \\\r\n pred_arrs[rwse_dict[key]])\r\n\r\n with open(self.dirName+'/'+ traffic_density + 'rwse', \"wb\") as f:\r\n pickle.dump(rwse_dict, f)\r\n return rwse_dict", "def flow_pc3d(pcl_c3d, flow_grid, flow_mask_grid, K_cur, feat_comm_keys, use_normal, sparse_nml_opts=None, return_stat=False, timer=None):\n if timer is not None:\n timer.log(\"flow_pc3d start\", 1, True)\n\n batch_size = flow_grid.shape[0]\n\n ### compose the flow to xyz\n xyz_grid = pcl_c3d.grid.feature['xyz']\n xyz_flat = xyz_grid.reshape(batch_size, 3, -1)\n flow_flat = flow_grid.reshape(batch_size, 3, -1)\n flow_flat = torch.cat([flow_flat[:,:2].detach(), flow_flat[:, 2:]], dim=1) # detach the x and y dimension of the flow\n xyz_flowed_flat = xyz_flat.detach() + flow_flat # detach so that the flowed c3d loss only affects the flow gradient instead of both flow and depth. Otherwise depth could be confused. \n # logging.info(\"xyz_flat.detach(): %s\"%(xyz_flat.detach().requires_grad))\n\n ### mask out invalid pixels and project to image uv coordinate\n xyz_mask_grid = pcl_c3d.grid.mask\n # if False:\n if flow_mask_grid is not None:\n mask_grid = xyz_mask_grid & flow_mask_grid\n else:\n mask_grid = xyz_mask_grid \n mask_flat = mask_grid.reshape(batch_size, 1, -1)\n\n xyz_flowed_flat_list = [None]*batch_size\n uvb_list = [None]*batch_size\n new_nb = [None]*batch_size\n inview_mask_list = [None]*batch_size\n \n for ib in range(batch_size):\n if timer is not None:\n timer.log(\"uvb, inview_mask ib=%d\"%ib, 2, True)\n mask_vec = mask_flat[ib, 0]\n xyz_flowed_flat_cur = xyz_flowed_flat[[ib]][:,:,mask_vec] # 1*3*N\n\n uvb = torch.matmul(K_cur[ib], xyz_flowed_flat_cur) # 1*3*N\n uvb_1 = ( uvb / torch.clamp(torch.abs(uvb[:, [2]]), min=1e-6) ).round() #- 1 , commented because in dataset_read.py there is a K_mat2py() function converting K from matlab to python coordinate\n uvb_1[:, 2] = ib\n # uvb_list[ib] = uvb\n\n # assert (uvb[:,2] == xyz_flowed_flat_cur[:,2]).all(), \"{} {}\".format(uvb[0,2,0], xyz_flowed_flat_cur[0,2,0])\n # logging.info( \"{} {}\".format(uvb[0,2,0], xyz_flowed_flat_cur[0,2,0]) )\n ### check whether the new points are in the view of camera\n inview_mask = (uvb_1[0,0,:] > 0) & (uvb_1[0,0,:] < mask_grid.shape[3]) & (uvb_1[0,1,:] > 0) & (uvb_1[0,1,:] < mask_grid.shape[2]) & (xyz_flowed_flat_cur[0,2,:] > 0.1)\n inview_mask_list[ib] = inview_mask\n\n xyz_flowed_flat_cur = xyz_flowed_flat_cur[:,:,inview_mask]\n uvb_1 = uvb_1[:,:,inview_mask]\n # logging.info(\"diff between uvb2: {}, {}, {}\".format((uvb_1-uvb_2).max(), (uvb_1-uvb_2).min(), (uvb_1[:,:2]-uvb_2[:,:2]).mean()) )\n # logging.info(\"uvb_1.shape: {} {}\".format(uvb_1.shape, uvb.shape))\n xyz_flowed_flat_list[ib] = xyz_flowed_flat_cur\n uvb_list[ib] = uvb_1\n\n new_nb[ib] = uvb_1.shape[2]\n \n # print(\"new_nb:\", new_nb)\n if timer is not None:\n timer.log(\"cat xyz, uvb\", 1, True)\n\n xyz_flowed_flat = torch.cat(xyz_flowed_flat_list, dim=2)\n uvb_flat = torch.cat(uvb_list, dim=2)\n\n ### The occlusion check is the speed bottleneck (>0.4s), and the effect is similar to flow_mask_grid, therefore disabled\n # if timer is not None:\n # timer.log(\"occlu_mask\", 1, True)\n # ### find the duplicate points and filter out those not close to the camera\n # occlu_mask = torch.ones(uvb_flat.shape[2], dtype=torch.bool, device=mask_grid.device)\n\n # uvb_dim = [xyz_grid.shape[0], xyz_grid.shape[2], xyz_grid.shape[3]]\n # velo_proj_lin = sub2ind(uvb_dim, uvb_flat[0, 2, :], uvb_flat[0, 1, :], uvb_flat[0, 0, :] ) # B, H, W\n # dupe_proj_lin = [item for item, count in Counter(velo_proj_lin).items() if count > 1]\n # # print(\"# or dupe_proj_lin:\", len(dupe_proj_lin))\n # for dd in dupe_proj_lin:\n # pts = torch.where(velo_proj_lin == dd)[0] ### torch.where() [actually torch.nonzero(condition, as_tuple=True)] returns a tuple. [0] takes the array of the first dim.\n # z_min = 1e7\n # for pt_idx in pts:\n # z_cur = xyz_flowed_flat[0, 2, pt_idx]\n # if z_cur < z_min:\n # z_min = z_cur\n # min_idx = pt_idx\n # else:\n # occlu_mask[pts] = False\n # ib = uvb_flat[0, 2, pt_idx]\n # new_nb[ib] -= 1\n \n # # print(\"before occlu_mask:\", xyz_flowed_flat.shape[2])\n # xyz_flowed_flat = xyz_flowed_flat[:,:,occlu_mask]\n # uvb_flat = uvb_flat[:,:,occlu_mask]\n # # print(\"after occlu_mask:\", xyz_flowed_flat.shape[2])\n\n if timer is not None:\n timer.log(\"PCL_C3D_Flat\", 1, True)\n ### construct PCL_C3D_Flat\n flow_pcl_c3d_flat = PCL_C3D_Flat()\n flow_pcl_c3d_flat.uvb = uvb_flat\n flow_pcl_c3d_flat.feature['xyz'] = xyz_flowed_flat\n flow_pcl_c3d_flat.nb = new_nb\n\n ### need to exit early if empty, otherwise later processing will produce unpredicted result and failure in next iteration\n if any(n <= 0 for n in new_nb):\n return flow_pcl_c3d_flat, None\n # raise ValueError(\"empty pcl: {}\".format(new_nb))\n\n if timer is not None:\n timer.log(\"feat_flat\", 1, True)\n ### copy those shared features from original point cloud. Remember to apply the same masking.\n for feat in feat_comm_keys:\n feat_flat = pcl_c3d.grid.feature[feat].reshape(batch_size, 3, -1)\n feat_flat_list = [None]*batch_size\n for ib in range(batch_size):\n mask_vec = mask_flat[ib, 0]\n feat_flat_list[ib] = feat_flat[[ib]][:,:,mask_vec]\n\n ### filter out out-of-view points\n feat_flat_list[ib] = feat_flat_list[ib][:,:,inview_mask_list[ib]]\n\n feat_flat_concat = torch.cat(feat_flat_list, dim=2)\n ### filter out points duplicated on image\n # flow_pcl_c3d_flat.feature[feat] = feat_flat_concat[:,:,occlu_mask]\n flow_pcl_c3d_flat.feature[feat] = feat_flat_concat\n\n if timer is not None:\n timer.log(\"feat_grid\", 1, True)\n ### prepare xyz_grid of the flowed point cloud\n uvb_split = uvb_flat.to(dtype=torch.long).squeeze(0).transpose(0,1).split(1,dim=1) # a tuple of 3 elements of tensor N*1, only long/byte/bool tensors can be used as indices\n xyz_flowed_grid = grid_from_concat_flat_func(uvb_split, xyz_flowed_flat, xyz_grid.shape)\n mask_flowed_grid = (xyz_flowed_grid != 0).any(1, keepdim=True)\n\n if timer is not None:\n timer.log(\"calc_normal\", 1, True)\n ### calculate sparse normal\n if use_normal:\n if return_stat:\n normal_flat, nres_flat, dist_stat_flat = calc_normal(flow_pcl_c3d_flat.uvb, xyz_flowed_grid, mask_flowed_grid, sparse_nml_opts.normal_nrange, sparse_nml_opts.ignore_ib, sparse_nml_opts.min_dist_2, return_stat=return_stat)\n else:\n normal_flat, nres_flat = calc_normal(flow_pcl_c3d_flat.uvb, xyz_flowed_grid, mask_flowed_grid, sparse_nml_opts.normal_nrange, sparse_nml_opts.ignore_ib, sparse_nml_opts.min_dist_2, return_stat=return_stat)\n \n flow_pcl_c3d_flat.feature['normal'] = normal_flat\n flow_pcl_c3d_flat.feature['nres'] = nres_flat\n\n if return_stat:\n flow_pcl_c3d_flat.feature['dist_stat'] = dist_stat_flat\n\n if timer is not None:\n timer.log(\"PCL_C3D_Grid\", 1, True)\n ### construct PCL_C3D_Grid\n flow_pcl_c3d_grid = PCL_C3D_Grid()\n flow_pcl_c3d_grid.mask = mask_flowed_grid\n flow_pcl_c3d_grid.feature['xyz'] = xyz_flowed_grid\n\n for feat in feat_comm_keys:\n flow_pcl_c3d_grid.feature[feat] = grid_from_concat_flat_func(uvb_split, flow_pcl_c3d_flat.feature[feat], pcl_c3d.grid.feature[feat].shape)\n\n if use_normal:\n flow_pcl_c3d_grid.feature['normal'] = grid_from_concat_flat_func(uvb_split, flow_pcl_c3d_flat.feature['normal'], pcl_c3d.grid.feature['normal'].shape)\n flow_pcl_c3d_grid.feature['nres'] = grid_from_concat_flat_func(uvb_split, flow_pcl_c3d_flat.feature['nres'], pcl_c3d.grid.feature['nres'].shape)\n if return_stat:\n flow_pcl_c3d_grid.feature['dist_stat'] = grid_from_concat_flat_func(uvb_split, flow_pcl_c3d_flat.feature['dist_stat'], pcl_c3d.grid.feature['dist_stat'].shape) \n\n return flow_pcl_c3d_flat, flow_pcl_c3d_grid", "def test_3D_m4_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def rmse(actual, predicted):\n rms = (actual-predicted)**2\n\n # Returning the sqaure root of the root mean square\n return float(np.sqrt(rms.mean()))", "def h_static_3d(E: float, normalize=False) -> Tuple[ndarray, ndarray]:\n # todo: Why don't we get a result if we fail to normalize here?\n # Normalize the radial part, not the whole thing; this gives us reasonable values,\n # without dealing with the asymptote near the origin.\n r, ψ = h_static(E, normalize=True)\n ψ = sqrt(ψ**2 / r**2)\n\n # Post-process by flipping between 0s, to make up for info lost\n # during square root.\n ε = 1e-3 # thresh for hit a 0.\n ψ_processed = np.copy(ψ)\n in_inversion = False\n slope_neg_prev = True\n\n for j in range(ψ.size):\n if j == 0: # We use slopes; don't mis-index\n ψ_processed[j] = ψ[j]\n continue\n\n slope_neg = ψ[j] < ψ[j-1]\n\n # Just started or ended an inversion.\n if ψ[j] <= ε and slope_neg != slope_neg_prev:\n in_inversion = not in_inversion\n\n if in_inversion:\n ψ_processed[j] = -ψ[j]\n else:\n ψ_processed[j] = ψ[j]\n\n slope_neg_prev = slope_neg\n\n if normalize:\n norm = simps(np.conj(ψ_processed) * ψ_processed, x=r)\n return r, ψ_processed / norm ** 0.5\n return r, ψ_processed", "def rmse(y_true, y_pred):\n\treturn backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))", "def _pred_shape(self, X):\n return X.shape[:-1] # X has Euler angles, while output is scalar", "def run_rmse_check(self):\n\n # Shift the intensities to compare\n obs = self.obs.data - self.obs.data.mean()\n exp = self.exp.data - self.exp.data.mean()\n\n # Experimental RMSE2 in real space\n rmse_real = (obs - exp)\n rmse_real = rmse_real**2\n\n # Experimental RMSE2 in reciprocal space\n ftdiff = np.fft.fft2(obs.data,norm='ortho') - np.fft.fft2(exp.data,norm='ortho')\n rmse_fourier = np.real(np.abs(ftdiff)**2.)\n\n # Return\n rmse_ij = self.obs.deepcopy()\n rmse_ij.data = rmse_real\n\n rmse_kl = ModifiedImage(rmse_fourier)\n return rmse_ij, rmse_kl", "def test_3D_m6_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def calc_error(opt, net, cuda, dataset, num_tests):\n if num_tests > len(dataset):\n num_tests = len(dataset)\n with torch.no_grad():\n erorr_arr, IOU_arr, prec_arr, recall_arr = [], [], [], []\n for idx in tqdm(range(num_tests)):\n\n # retrieve data for one frame (or multi-view)\n data = dataset[idx * len(dataset) // num_tests]\n image_tensor = data['img'].to(device=cuda) # (num_views, C, W, H) for 3x512x512 images, float -1. ~ 1.\n calib_tensor = data['calib'].to(device=cuda) # (num_views, 4, 4) calibration matrix\n sample_tensor = data['samples'].to(device=cuda).unsqueeze(0) # (1, 3, n_in + n_out), float XYZ coords are inside the 3d-volume of [self.B_MIN, self.B_MAX]\n if opt.num_views > 1:\n sample_tensor = reshape_sample_tensor(sample_tensor, opt.num_views) # (num_views, 3, n_in + n_out)\n label_tensor = data['labels'].to(device=cuda).unsqueeze(0) # (1, 1, n_in + n_out), float 1.0-inside, 0.0-outside\n deepVoxels_tensor = torch.zeros([label_tensor.shape[0]], dtype=torch.int32).to(device=cuda) # small dummy tensors\n if opt.deepVoxels_fusion != None: deepVoxels_tensor = data[\"deepVoxels\"].to(device=cuda)[None,:] # (B=1,C=8,D=32,H=48,W=32), np.float32, all >= 0.\n\n # forward pass\n res, error = net.forward(image_tensor, sample_tensor, calib_tensor, labels=label_tensor, deepVoxels=deepVoxels_tensor) # (1, 1, n_in + n_out), R\n if len(opt.gpu_ids) > 1: error = error.mean()\n\n # compute errors {IOU, prec, recall} based on the current set of query 3D points\n IOU, prec, recall = compute_acc(res, label_tensor) # R, R, R\n\n # print(\n # '{0}/{1} | Error: {2:06f} IOU: {3:06f} prec: {4:06f} recall: {5:06f}'\n # .format(idx, num_tests, error.item(), IOU.item(), prec.item(), recall.item()))\n erorr_arr.append(error.item())\n IOU_arr.append(IOU.item())\n prec_arr.append(prec.item())\n recall_arr.append(recall.item())\n\n return np.average(erorr_arr), np.average(IOU_arr), np.average(prec_arr), np.average(recall_arr)", "def dynamics_observation_prediction_error_scorer(dynamics,\n episodes,\n window_size=1024):\n total_errors = []\n for episode in episodes:\n for batch in _make_batches(episode, window_size, dynamics.n_frames):\n pred = dynamics.predict(batch.observations, batch.actions)\n errors = ((batch.next_observations - pred[0])**2).sum(axis=1)\n total_errors += errors.tolist()\n # smaller is better\n return -np.mean(total_errors)" ]
[ "0.57921743", "0.5499897", "0.5472451", "0.52560514", "0.5251211", "0.52303135", "0.51892173", "0.518625", "0.5166288", "0.5151563", "0.514771", "0.511312", "0.5067872", "0.50428575", "0.5011078", "0.49873042", "0.49712166", "0.49459213", "0.49200428", "0.4919663", "0.49184883", "0.49083573", "0.49080715", "0.48977944", "0.48971817", "0.48962042", "0.487182", "0.48699838", "0.484338", "0.48387176", "0.48194218", "0.4815768", "0.48134112", "0.48019618", "0.47942594", "0.47918463", "0.4790486", "0.4782371", "0.47750288", "0.47701478", "0.4768945", "0.4760177", "0.4759545", "0.47584644", "0.47503865", "0.47495165", "0.47446996", "0.47427142", "0.474086", "0.47404745", "0.4738917", "0.4737529", "0.47324806", "0.4726002", "0.47093076", "0.4706741", "0.47044295", "0.47040975", "0.47035766", "0.47005185", "0.4700287", "0.4699728", "0.46941352", "0.4685762", "0.46831834", "0.46830437", "0.46731478", "0.46709067", "0.4670526", "0.4667935", "0.46666354", "0.46653464", "0.46648282", "0.46619952", "0.46456176", "0.46451768", "0.4643405", "0.4633928", "0.4620695", "0.46179172", "0.46171916", "0.46120805", "0.46117908", "0.4611445", "0.4611068", "0.46085244", "0.46079272", "0.460647", "0.46051002", "0.46044648", "0.46033266", "0.45964184", "0.4595329", "0.4594627", "0.45930386", "0.45920628", "0.45915973", "0.4590086", "0.45829666", "0.45818353" ]
0.60341465
0
Evaluate 3D keypoint results.
def evaluate(self, outputs: list, res_folder: str, metric: Optional[Union[str, List[str]]] = 'pa-mpjpe', **kwargs: dict): metrics = metric if isinstance(metric, list) else [metric] for metric in metrics: if metric not in self.ALLOWED_METRICS: raise KeyError(f'metric {metric} is not supported') # for keeping correctness during multi-gpu test, we sort all results res_dict = {} for out in outputs: target_id = out['image_idx'] batch_size = len(out['keypoints_3d']) for i in range(batch_size): res_dict[int(target_id[i])] = dict( keypoints=out['keypoints_3d'][i], vertices=out['vertices'][i], ) keypoints, vertices = [], [] for i in range(self.num_data): keypoints.append(res_dict[i]['keypoints']) vertices.append(res_dict[i]['vertices']) keypoints = np.stack(keypoints) vertices = np.stack(vertices) res = dict(keypoints=keypoints, vertices=vertices) name_value_tuples = [] for index, _metric in enumerate(metrics): if 'body_part' in kwargs: body_parts = kwargs['body_part'][index] for body_part in body_parts: if _metric == 'pa-mpjpe': _nv_tuples = self._report_mpjpe( res, metric='pa-mpjpe', body_part=body_part) elif _metric == 'pa-pve': _nv_tuples = self._report_pve( res, metric='pa-pve', body_part=body_part) else: raise NotImplementedError name_value_tuples.extend(_nv_tuples) else: if _metric == 'mpjpe': _nv_tuples = self._report_mpjpe(res) elif _metric == 'pa-mpjpe': _nv_tuples = self._report_mpjpe(res, metric='pa-mpjpe') elif _metric == '3dpck': _nv_tuples = self._report_3d_pck(res) elif _metric == 'pa-3dpck': _nv_tuples = self._report_3d_pck(res, metric='pa-3dpck') elif _metric == '3dauc': _nv_tuples = self._report_3d_auc(res) elif _metric == 'pa-3dauc': _nv_tuples = self._report_3d_auc(res, metric='pa-3dauc') elif _metric == 'pve': _nv_tuples = self._report_pve(res) elif _metric == 'pa-pve': _nv_tuples = self._report_pve(res, metric='pa-pve') elif _metric == '3DRMSE': _nv_tuples = self._report_3d_rmse(res) else: raise NotImplementedError name_value_tuples.extend(_nv_tuples) name_value = OrderedDict(name_value_tuples) return name_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_3d(self, box_point_3d, instance):\n azimuth_error, polar_error = self.evaluate_viewpoint(box_point_3d, instance)\n iou = self.evaluate_iou(box_point_3d, instance)\n return azimuth_error, polar_error, iou", "def _get_data_on_3d_points(self, varname, record, points):\n if self.get_mesh_dimension() != 3:\n raise TelemacException(\"Action possible only on 3d mesh\")\n\n res = float('nan')*np.ones((len(points)), dtype=np.float64)\n for i, point in enumerate(points):\n elev = self.get_data_on_vertical_segment(\\\n 'ELEVATION Z', record, point[:-1])\n values = self.get_data_on_vertical_segment(\\\n varname, record, point[:-1])\n for plan in range(self.nplan-1):\n if elev[plan] <= point[-1] and point[-1] <= elev[plan+1]:\n shz = (point[-1]-elev[plan])/max((elev[plan+1]\\\n -elev[plan]), 1.e-6)\n res[i] = (1.0-shz)*values[plan]+shz*values[plan+1]\n return res", "def function_3d(point):\n return point[0]**2 + point[1]**2 + point[2]**2 - 1", "def keypoint_3d_loss(self, pred_keypoints_3d, gt_keypoints_3d, has_pose_3d):\n pred_keypoints_3d = pred_keypoints_3d[:, 25:, :]\n conf = gt_keypoints_3d[:, :, -1].unsqueeze(-1).clone()\n gt_keypoints_3d = gt_keypoints_3d[:, :, :-1].clone()\n gt_keypoints_3d = gt_keypoints_3d[has_pose_3d == 1]\n conf = conf[has_pose_3d == 1]\n pred_keypoints_3d = pred_keypoints_3d[has_pose_3d == 1]\n if len(gt_keypoints_3d) > 0:\n gt_pelvis = (gt_keypoints_3d[:, 2,:] + gt_keypoints_3d[:, 3,:]) / 2\n gt_keypoints_3d = gt_keypoints_3d - gt_pelvis[:, None, :]\n pred_pelvis = (pred_keypoints_3d[:, 2,:] + pred_keypoints_3d[:, 3,:]) / 2\n pred_keypoints_3d = pred_keypoints_3d - pred_pelvis[:, None, :]\n return (conf * self.criterion_keypoints(pred_keypoints_3d, gt_keypoints_3d)).mean()\n else:\n return torch.FloatTensor(1).fill_(0.).to(self.device)", "def get_3d_points(preds_3d):\n for i,p in enumerate(preds_3d):\n preds_3d[i] = preds_3d[i] - preds_3d[i].mean(0)*np.ones((16,1));\n return preds_3d;", "def _evaluate(self, x, y, z):\n raise NotImplementedError()", "def _evaluate(self, w, x, y, z):\n raise NotImplementedError()", "def _evaluate(self, x, y, z):\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n gamma = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.f_values[x_pos - 1, y_pos - 1, z_pos - 1]\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.f_values[x_pos - 1, y_pos - 1, z_pos]\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.f_values[x_pos - 1, y_pos, z_pos - 1]\n + (1 - alpha) * beta * gamma * self.f_values[x_pos - 1, y_pos, z_pos]\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.f_values[x_pos, y_pos - 1, z_pos - 1]\n + alpha * (1 - beta) * gamma * self.f_values[x_pos, y_pos - 1, z_pos]\n + alpha * beta * (1 - gamma) * self.f_values[x_pos, y_pos, z_pos - 1]\n + alpha * beta * gamma * self.f_values[x_pos, y_pos, z_pos]\n )\n return f", "def test_el3s():\n N = 999\n xs = (np.random.rand(N)) * 5\n kcs = (np.random.rand(N) - 0.5) * 10\n ps = (np.random.rand(N) - 0.5) * 10\n\n res0 = [el30(x, kc, p) for x, kc, p in zip(xs, kcs, ps)]\n res1 = el3v(xs, kcs, ps)\n res2 = el3(xs, kcs, ps)\n\n assert np.allclose(res0, res1)\n assert np.allclose(res1, res2)", "def test(self, x, y, z):\n return self.a*x + self.b*y + self.c*z + self.d", "def imshow_multiview_keypoints_3d(pose_result, skeleton=None, pose_kpt_color=None, pose_link_color=None, space_size=[8000, 8000, 2000], space_center=[0, -500, 800], kpt_score_thr=0.0):\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n ax.set_xlim3d(space_center[0] - space_size[0] * 0.5, space_center[0] + space_size[0] * 0.5)\n ax.set_ylim3d(space_center[1] - space_size[1] * 0.5, space_center[1] + space_size[1] * 0.5)\n ax.set_zlim3d(space_center[2] - space_size[2] * 0.5, space_center[2] + space_size[2] * 0.5)\n pose_kpt_color = np.array(pose_kpt_color)\n pose_kpt_color = pose_kpt_color[..., ::-1] / 255.0\n for kpts in pose_result:\n xs, ys, zs, scores = kpts.T\n valid = scores > kpt_score_thr\n ax.scatter(xs[valid], ys[valid], zs[valid], marker='o', color=pose_kpt_color[valid])\n for link, link_color in zip(skeleton, pose_link_color):\n link_indices = [_i for _i in link]\n xs_3d = kpts[link_indices, 0]\n ys_3d = kpts[link_indices, 1]\n zs_3d = kpts[link_indices, 2]\n kpt_score = kpts[link_indices, 3]\n if kpt_score.min() > kpt_score_thr:\n _color = np.array(link_color[::-1]) / 255.0\n ax.plot(xs_3d, ys_3d, zs_3d, color=_color)\n fig.tight_layout()\n fig.canvas.draw()\n img_w, img_h = fig.canvas.get_width_height()\n img_vis = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(img_h, img_w, -1)\n img_vis = mmcv.rgb2bgr(img_vis)\n plt.close(fig)\n return img_vis", "def Compute3d(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_Compute3d(self, *args)", "def k3(self) -> float:\n return self.distortion_coefficients[2]", "def threeptscurv(x, y):\n length = x.size\n R = np.zeros(length)\n cur = np.zeros(length)\n for i in range(1, length-1):\n a = np.sqrt((x[i+1]-x[i])**2 + (y[i+1]-y[i])**2)\n b = np.sqrt((x[i+1]-x[i-1])**2 + (y[i+1]-y[i-1])**2)\n c = np.sqrt((x[i]-x[i-1])**2 + (y[i]-y[i-1])**2)\n p = (a+b+c)/2\n R[i] = a*b*c/4/np.sqrt(p*(p-a)*(p-b)*(p-c))\n cur[i] = 1/R[i]\n if R[i] > 1/ZERO or np.isnan(R[i]):\n cur[i] = 0\n return cur", "def K3(p, E):\n B, C, D = p\n K_ = B * E / ((C + E**2)**2 + D*E**2)\n K_ = K_*(K_>0)\n return K_", "def test_el3_vs_original():\n # store computations from original implementation\n # from florian_ell3_paper import el3 as el30\n # N = 10000\n # x11 = np.random.rand(N)*5\n # kc11 = (np.random.rand(N)-.5)*10\n # p11 = (np.random.rand(N)-.5)*10\n # result0 = np.array([el30(x, kc, p) for x,kc,p in zip(x11,kc11,p11)])\n # np.save('data_test_el3', np.array([result0,x11,kc11,p11]))\n\n # load data from orginal implementation\n data = np.load(\"tests/testdata/testdata_el3.npy\")\n res0, x11, kc11, p11 = data\n\n # compare to vectorized\n resv = el3v(x11, kc11, p11)\n assert np.allclose(res0, resv)\n\n # compare to modified original\n res1 = np.array([el30(x, kc, p) for x, kc, p in zip(x11, kc11, p11)])\n assert np.allclose(res0, res1)", "def calc(input1, input2, outpref):\n logging.info('Doing 3dcalc for %s' % outpref)\n cmd = split(\"3dcalc -a {} -b {} -expr 'a-b' -prefix {}\".format(input1,\n input2, outpref))\n call(cmd)", "def test_3D_m6_1k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def test_3D_m6_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def _evaluate(self, w, x, y, z):\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list, w), self.w_n - 1), 1)\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n w_pos = self.wSearchFunc(self.w_list, w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n - 1] = self.w_n - 1\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n alpha = (w - self.w_list[i - 1]) / (self.w_list[i] - self.w_list[i - 1])\n beta = (x - self.x_list[j - 1]) / (self.x_list[j] - self.x_list[j - 1])\n gamma = (y - self.y_list[k - 1]) / (self.y_list[k] - self.y_list[k - 1])\n delta = (z - self.z_list[l - 1]) / (self.z_list[l] - self.z_list[l - 1])\n f = (1 - alpha) * (\n (1 - beta)\n * (\n (1 - gamma) * (1 - delta) * self.f_values[i - 1, j - 1, k - 1, l - 1]\n + (1 - gamma) * delta * self.f_values[i - 1, j - 1, k - 1, l]\n + gamma * (1 - delta) * self.f_values[i - 1, j - 1, k, l - 1]\n + gamma * delta * self.f_values[i - 1, j - 1, k, l]\n )\n + beta\n * (\n (1 - gamma) * (1 - delta) * self.f_values[i - 1, j, k - 1, l - 1]\n + (1 - gamma) * delta * self.f_values[i - 1, j, k - 1, l]\n + gamma * (1 - delta) * self.f_values[i - 1, j, k, l - 1]\n + gamma * delta * self.f_values[i - 1, j, k, l]\n )\n ) + alpha * (\n (1 - beta)\n * (\n (1 - gamma) * (1 - delta) * self.f_values[i, j - 1, k - 1, l - 1]\n + (1 - gamma) * delta * self.f_values[i, j - 1, k - 1, l]\n + gamma * (1 - delta) * self.f_values[i, j - 1, k, l - 1]\n + gamma * delta * self.f_values[i, j - 1, k, l]\n )\n + beta\n * (\n (1 - gamma) * (1 - delta) * self.f_values[i, j, k - 1, l - 1]\n + (1 - gamma) * delta * self.f_values[i, j, k - 1, l]\n + gamma * (1 - delta) * self.f_values[i, j, k, l - 1]\n + gamma * delta * self.f_values[i, j, k, l]\n )\n )\n return f", "def keypoints_from_heatmaps3d(heatmaps, center, scale):\n N, K, D, H, W = heatmaps.shape\n preds, maxvals = _get_max_preds_3d(heatmaps)\n for i in range(N):\n preds[i, :, :2] = transform_preds(preds[i, :, :2], center[i], scale[i], [W, H])\n return preds, maxvals", "def _evaluate(self, w, x, y, z):\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n gamma = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos - 1](w)\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos](w)\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos][z_pos - 1](w)\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos][z_pos](w)\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos - 1](w)\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos][y_pos - 1][z_pos](w)\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos][z_pos - 1](w)\n + alpha * beta * gamma * self.wInterpolators[x_pos][y_pos][z_pos](w)\n )\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list, x)\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1, self.x_n):\n for j in range(1, self.y_n):\n for k in range(1, self.z_n):\n c = np.logical_and(\n np.logical_and(i == x_pos, j == y_pos), k == z_pos\n )\n if np.any(c):\n alpha = (x[c] - self.x_list[i - 1]) / (\n self.x_list[i] - self.x_list[i - 1]\n )\n beta = (y[c] - self.y_list[j - 1]) / (\n self.y_list[j] - self.y_list[j - 1]\n )\n gamma = (z[c] - self.z_list[k - 1]) / (\n self.z_list[k] - self.z_list[k - 1]\n )\n f[c] = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i - 1][j - 1][k - 1](w[c])\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[i - 1][j - 1][k](w[c])\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[i - 1][j][k - 1](w[c])\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[i - 1][j][k](w[c])\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i][j - 1][k - 1](w[c])\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[i][j - 1][k](w[c])\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[i][j][k - 1](w[c])\n + alpha\n * beta\n * gamma\n * self.wInterpolators[i][j][k](w[c])\n )\n return f", "def imshow_keypoints_3d(pose_result, img=None, skeleton=None, pose_kpt_color=None, pose_link_color=None, vis_height=400, kpt_score_thr=0.3, num_instances=-1, *, axis_azimuth=70, axis_limit=1.7, axis_dist=10.0, axis_elev=15.0):\n show_img = img is not None\n if num_instances < 0:\n num_instances = len(pose_result)\n elif len(pose_result) > num_instances:\n pose_result = pose_result[:num_instances]\n elif len(pose_result) < num_instances:\n pose_result += [dict()] * (num_instances - len(pose_result))\n num_axis = num_instances + 1 if show_img else num_instances\n plt.ioff()\n fig = plt.figure(figsize=(vis_height * num_axis * 0.01, vis_height * 0.01))\n if show_img:\n img = mmcv.imread(img, channel_order='bgr')\n img = mmcv.bgr2rgb(img)\n img = mmcv.imrescale(img, scale=vis_height / img.shape[0])\n ax_img = fig.add_subplot(1, num_axis, 1)\n ax_img.get_xaxis().set_visible(False)\n ax_img.get_yaxis().set_visible(False)\n ax_img.set_axis_off()\n ax_img.set_title('Input')\n ax_img.imshow(img, aspect='equal')\n for idx, res in enumerate(pose_result):\n dummy = len(res) == 0\n kpts = np.zeros((1, 3)) if dummy else res['keypoints_3d']\n if kpts.shape[1] == 3:\n kpts = np.concatenate([kpts, np.ones((kpts.shape[0], 1))], axis=1)\n valid = kpts[:, 3] >= kpt_score_thr\n ax_idx = idx + 2 if show_img else idx + 1\n ax = fig.add_subplot(1, num_axis, ax_idx, projection='3d')\n ax.view_init(elev=axis_elev, azim=axis_azimuth)\n x_c = np.mean(kpts[valid, 0]) if sum(valid) > 0 else 0\n y_c = np.mean(kpts[valid, 1]) if sum(valid) > 0 else 0\n ax.set_xlim3d([x_c - axis_limit / 2, x_c + axis_limit / 2])\n ax.set_ylim3d([y_c - axis_limit / 2, y_c + axis_limit / 2])\n ax.set_zlim3d([0, axis_limit])\n ax.set_aspect('auto')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n ax.dist = axis_dist\n if not dummy and pose_kpt_color is not None:\n pose_kpt_color = np.array(pose_kpt_color)\n assert len(pose_kpt_color) == len(kpts)\n x_3d, y_3d, z_3d = np.split(kpts[:, :3], [1, 2], axis=1)\n _color = pose_kpt_color[..., ::-1] / 255.0\n ax.scatter(x_3d[valid], y_3d[valid], z_3d[valid], marker='o', color=_color[valid])\n if not dummy and skeleton is not None and pose_link_color is not None:\n pose_link_color = np.array(pose_link_color)\n assert len(pose_link_color) == len(skeleton)\n for link, link_color in zip(skeleton, pose_link_color):\n link_indices = [_i for _i in link]\n xs_3d = kpts[link_indices, 0]\n ys_3d = kpts[link_indices, 1]\n zs_3d = kpts[link_indices, 2]\n kpt_score = kpts[link_indices, 3]\n if kpt_score.min() > kpt_score_thr:\n _color = link_color[::-1] / 255.0\n ax.plot(xs_3d, ys_3d, zs_3d, color=_color, zdir='z')\n if 'title' in res:\n ax.set_title(res['title'])\n fig.tight_layout()\n fig.canvas.draw()\n img_w, img_h = fig.canvas.get_width_height()\n img_vis = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(img_h, img_w, -1)\n img_vis = mmcv.rgb2bgr(img_vis)\n plt.close(fig)\n return img_vis", "def estimate_extrinsics(dataset):\n # extrinsics are matrices M of shape (3,4) for every datapoint --> M = [R,t] where R=rotation matrix and t = translation vector\n camera_extrinsics_univ = np.zeros(\n (dataset.datadict[\"keypoints_3d_univ\"].shape[0], 3, 4), dtype=np.float\n )\n camera_extrinsics = np.zeros(\n (dataset.datadict[\"keypoints_3d\"].shape[0], 3, 4), dtype=np.float\n )\n\n for i, vid in enumerate(\n tqdm(\n np.unique(dataset.datadict[\"v_ids\"]),\n desc=\"Estimate extrinsics per video\",\n )\n ):\n ids = dataset.datadict[\"v_ids\"] == vid\n kps3d_c = dataset.datadict[\"keypoints_3d\"][ids]\n kps3d_c_univ = dataset.datadict[\"keypoints_3d_univ\"][ids]\n kps3d_w = dataset.datadict[\"keypoints_3d_world\"][ids]\n kps3d_c = np.reshape(kps3d_c, (-1, 3))\n kps3d_c_univ = np.reshape(kps3d_c_univ, (-1, 3))\n kps3d_w = np.reshape(kps3d_w, (-1, 3))\n\n _, M, _ = cv2.estimateAffine3D(\n kps3d_w, kps3d_c, ransacThreshold=10, confidence=0.999\n )\n _, M_univ, _ = cv2.estimateAffine3D(\n kps3d_w, kps3d_c_univ, ransacThreshold=10, confidence=0.999\n )\n\n # returned values correspond to [R,t]^T\n camera_extrinsics[ids] = M\n camera_extrinsics_univ[ids] = M_univ\n\n return camera_extrinsics_univ, camera_extrinsics", "def point_from_rays(self):\n print \"generating the 3d point from given clicked points\"\n \n #gather cams and points clicked \n uvs = []\n cams = []\n for iFrame in self.frames:\n if iFrame.lastClick : \n uv = numpy.multiply(iFrame.lastClick,self.reduceFactor)\n uvs.append(uv)\n cam = load_perspective_camera(self.camList[iFrame.currImg])\n cams.append(cam)\n point = get_3d_from_cams(cams, uvs)\n self.point3d = point;\n self.pointLabel.set(\"3d Point: \" + str(self.point3d))\n\n # project 3d point into each image, and gather intensities \n values = []\n ims = []\n for idx, img in enumerate(self.imgList):\n cam = load_perspective_camera(self.camList[idx])\n imgPoint = project_point(cam, point[0], point[1], point[2])\n imgPoint = numpy.divide(imgPoint, self.reduceFactor)\n self.allUVs.append(imgPoint)\n \n #grab float intensity value at this point \n imgView,ni,nj = load_image(img)\n val = pixel(imgView, imgPoint)\n if val > 0.0:\n values.append(val)\n ims.append(idx)\n \n #cleanup\n remove_from_db([imgView, cam])\n \n\n #write mean/std of intensities \n self.meanLabel.set(\"Mean: \" + str(numpy.mean(values)) )\n self.stdLabel.set(\"Std Dev: \" + str(numpy.std(values)) )\n #plot the intensities by image number \n self.f.clf();\n self.a = self.f.add_subplot(311)\n self.a.set_xlabel(\"img #\")\n self.a.set_ylabel(\"intensity\")\n self.a.plot(ims, values)\n #plot the histogram of intensities by image number \n pdf, bins, patches = plt.hist(values)\n self.b = self.f.add_subplot(313)\n self.b.set_xlabel(\"bin val\")\n self.b.set_ylabel(\"freq\")\n self.b.hist(values, 15, normed=1, facecolor=\"green\" )\n self.canvas.show();", "def evaluate(w, kx, ky):\n factor = contrast * patch_diameter.magnitude**2 * np.pi / 4\n arg = np.sqrt(kx**2 + ky**2) * patch_diameter * 0.5\n\n # NOTE: a runtime warning will arise since np.where evaluates the function\n # in all points, including 0, and then proper final results are selected\n with np.errstate(invalid='ignore'):\n spatial = np.where(arg == 0, 1, 2 * first_kind_bessel(arg) / arg)\n\n half_duration = duration.rescale(1/w.units) / 2\n temporal = np.sinc(w * half_duration / np.pi) * np.exp(1j * w * (delay+half_duration).rescale(1/w.units))\n return factor * duration.magnitude * temporal * spatial", "def parse_points3d(kapture_path: str,\n number_of_points: int,\n nvm_content: List[str],\n offset: int,\n point_id_offset: int,\n image_idx_to_image_name: List[str],\n filter_list: Optional[Set[str]],\n points3d: List[List[float]],\n keypoints: kapture.Keypoints,\n observations: kapture.Observations) -> None:\n # (image_name, nvm_feature_id ) -> keypoint_id\n known_keypoints = {}\n local_keypoints = {}\n for i in range(0, number_of_points):\n fields = nvm_content[i + offset].split()\n points3d.append([float(v) for v in fields[0:6]])\n # parse observations\n number_of_measurements = int(fields[6])\n for j in range(0, number_of_measurements):\n # parse measurement\n image_index = int(fields[7 + 4 * j + 0])\n feature_index = int(fields[7 + 4 * j + 1])\n x = float(fields[7 + 4 * j + 2])\n y = float(fields[7 + 4 * j + 3])\n\n # retrieve filename. if added, then proceed to add features / observations\n file_name = image_idx_to_image_name[image_index]\n if filter_list is not None and file_name not in filter_list:\n # file_name is not in the list, do not add it\n continue\n\n # init local_keypoints if needed\n if file_name not in local_keypoints:\n local_keypoints[file_name] = []\n # do not add the same keypoint twice\n if (file_name, feature_index) not in known_keypoints:\n # in the kapture format, keypoint id is different. Note that it starts from 0\n known_keypoints[(file_name, feature_index)] = len(local_keypoints[file_name])\n local_keypoints[file_name].append([x, y])\n keypoint_idx = known_keypoints[(file_name, feature_index)]\n point3d_idx = i + point_id_offset\n observations.add(point3d_idx, LOCAL_FEATURE_TYPE, file_name, keypoint_idx)\n\n # finally, convert local_keypoints to np.ndarray and add them to the global keypoints variable\n for image_filename, keypoints_array in local_keypoints.items():\n keypoints_np_array = np.array(keypoints_array, dtype=np.float32)\n keypoints_filepath = kapture.io.features.get_keypoints_fullpath(LOCAL_FEATURE_TYPE,\n kapture_path,\n image_filename)\n kapture.io.features.image_keypoints_to_file(keypoints_filepath, keypoints_np_array)\n keypoints.add(image_filename)", "def three_dimensional(self, z): # Maybe I misunderstood the task. My method looks weird\n return (self.x, self.y, z)", "def __getitem__(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint3___getitem__(self, *args)", "def immoment3D(X, Y, Z, p, q, r):\n assert len(X) == len(Y)\n assert len(Y) == len(Z)\n return (X ** p * Y ** q * Z ** r).sum()", "def _evaluate(self, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n beta = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha) * (1 - beta) * self.xInterpolators[y_pos - 1][z_pos - 1](x)\n + (1 - alpha) * beta * self.xInterpolators[y_pos - 1][z_pos](x)\n + alpha * (1 - beta) * self.xInterpolators[y_pos][z_pos - 1](x)\n + alpha * beta * self.xInterpolators[y_pos][z_pos](x)\n )\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n beta = (z[c] - self.z_list[j - 1]) / (\n self.z_list[j] - self.z_list[j - 1]\n )\n f[c] = (\n (1 - alpha)\n * (1 - beta)\n * self.xInterpolators[i - 1][j - 1](x[c])\n + (1 - alpha) * beta * self.xInterpolators[i - 1][j](x[c])\n + alpha * (1 - beta) * self.xInterpolators[i][j - 1](x[c])\n + alpha * beta * self.xInterpolators[i][j](x[c])\n )\n return f", "def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)", "def _evaluate(self, w, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n beta = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[y_pos - 1][z_pos - 1](w, x)\n + (1 - alpha) * beta * self.wxInterpolators[y_pos - 1][z_pos](w, x)\n + alpha * (1 - beta) * self.wxInterpolators[y_pos][z_pos - 1](w, x)\n + alpha * beta * self.wxInterpolators[y_pos][z_pos](w, x)\n )\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n beta = (z[c] - self.z_list[j - 1]) / (\n self.z_list[j] - self.z_list[j - 1]\n )\n f[c] = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[i - 1][j - 1](w[c], x[c])\n + (1 - alpha)\n * beta\n * self.wxInterpolators[i - 1][j](w[c], x[c])\n + alpha\n * (1 - beta)\n * self.wxInterpolators[i][j - 1](w[c], x[c])\n + alpha * beta * self.wxInterpolators[i][j](w[c], x[c])\n )\n return f", "def test_variables(x, y, z):\n a = x * y\n b = y * a\n c = a + b\n return c / z", "def test_pointnum3():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(1000, -500), radius=5000, thickness=50)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13", "def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]", "def EvaluatePointDataField(self, *float, **kwargs):\n ...", "def eucdist3d(self,point1,point2):\n#\t\tif not isinstance(point1,np.ndarray):\n#\t\t\tpoint1 = np.array(point1)\n#\t\t\tpoint2 = np.array(point2)\n\t\t\n\t\treturn(((point2[0]-point1[0])**2 + (point2[1]-point1[1])**2 + (point2[2]-point1[2])**2)**0.5)", "def x3(self):\n return self._x + self._x3", "def elec_p_xyz_loop(x, y, z):\n U = 0.\n\n r = np.sqrt(x**2 + y**2 + z**2)\n x = x/r\n y = y/r\n z = z/r\n\n npts = x.size\n for i in range(npts-1):\n for j in range(i+1, npts):\n dsq = (x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2\n U += 1./np.sqrt(dsq)\n return U", "def get_joystick3d(self, index=0):\r\n return (self.handler.absx[index], self.handler.absy[index], self.handler.absz[index])", "def task_three():\n # Formula to calculate:\n # q2 = (z2 / z1) * (R + T * nt / d) * q1\n # where R - rotation\n # T - translation\n # nt - normal vertex of common plane of the 3d points\n # d - shift of the common plane\n # and (R + T * nt / d) required homography transform\n # defined up to constant\n # But in our case T == 0\n tetta = 30 * np.pi / 180\n H = np.array([[1, 0, 0],\n [0, np.cos(tetta), -np.sin(tetta)],\n [0, np.sin(tetta), np.cos(tetta)],\n ])\n print(\"Homography transformation:\\n\", H)", "def problem3():\n t = np.array([-27.1, -2.9, -3.2])\n principal_point = np.array([8, -10])\n focal_length = 8\n\n # model transformations\n T = gettranslation(t)\n Ry = getyrotation(135)\n Rx = getxrotation(-30)\n Rz = getzrotation(90)\n print(T)\n print(Ry)\n print(Rx)\n print(Rz)\n\n K = getcentralprojection(principal_point, focal_length)\n\n P,M = getfullprojection(T, Rx, Ry, Rz, K)\n print(P)\n print(M)\n\n points = loadpoints()\n displaypoints2d(points)\n\n z = loadz()\n Xt = invertprojection(K, points, z)\n\n Xh = inverttransformation(M, Xt)\n\n worldpoints = hom2cart(Xh)\n displaypoints3d(worldpoints)\n\n points2 = projectpoints(P, worldpoints)\n displaypoints2d(points2)\n\n plt.show()", "def fun(params, n_cameras, n_points, camera_indices, point_indices, points_2d, theta):\n \n camera_params = params[:n_cameras * 9].reshape((n_cameras, 9))\n points_3d = params[n_cameras * 9:].reshape((n_points, 3))\n points_proj = project(points_3d[point_indices], camera_params[camera_indices], theta)\n print(\"Residual is: \", (points_proj - points_2d).ravel())\n return (points_proj - points_2d).ravel()", "def fiabilite3(v1 : float, v2 : float, v3 : float\n , epsilon : float) -> float:\n egal_v1_v2 : bool = egal_eps(v1, v2, epsilon)\n\n egal_v2_v3 : bool = egal_eps(v2, v3, epsilon)\n\n egal_v1_v3 : bool = egal_eps(v1, v3, epsilon)\n\n if egal_v1_v2 and egal_v2_v3 and egal_v1_v3:\n return 1\n elif ((egal_v1_v2 and egal_v2_v3)\n or (egal_v1_v2 and egal_v1_v3) \n or (egal_v1_v3 and egal_v2_v3)):\n return 2/3\n else:\n return 0", "def test_projection_v3_x(self):\n from pedemath.vec3 import projection_v3\n\n vec_a = Vec3(3, 4, 5)\n vec_b = Vec3(1, 0, 0)\n\n result = projection_v3(vec_a, vec_b)\n\n self.assertEqual(3, result)", "def three_point_method(d, point_a, point_b, point_c, point_a0, point_b0, point_c0, point_d0):\n dx, dy, dz = d\n\n mag_rda = magnitude(point_a0, point_d0) # length of vector RDA\n mag_rdb = magnitude(point_b0, point_d0) # length of vector RDB\n mag_rdc = magnitude(point_c0, point_d0) # length of vector RDC\n\n # set up equation for scipy.fsolve by adding all 3 together and moving all over to right side\n eq1 = (dx - point_a[0])**2 + (dy - point_a[1])**2 + (dz - point_a[2])**2 - mag_rda**2\n eq2 = (dx - point_b[0])**2 + (dy - point_b[1])**2 + (dz - point_b[2])**2 - mag_rdb**2\n eq3 = (dx - point_c[0])**2 + (dy - point_c[1])**2 + (dz - point_c[2])**2 - mag_rdc**2\n\n return eq1, eq2, eq3", "def is3D(data):\n return data.find(\"x3\") != -1 and data.find(\"y3\") != -1 and data.find(\"z3\") != -1", "def Has3d(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Has3d(self, *args)", "def render_vertices_3d(self, **kwds):\n return point3d(self.coordinates_of(self.points), **kwds)", "def fun(params,n_cameras,n_points,camera_indices,point_indices,points_3d , points_2d):\n camera_params = params[:n_cameras * 6].reshape((n_cameras, 6))\n # points_3d = points_3d.T\n # points_3d = params[n_cameras * 7:].reshape((n_points, 3))\n # print(point_indices)\n points_proj = project(points_3d[point_indices], camera_params[camera_indices])\n return (points_proj - points_2d).ravel()", "def evaluate(x,a,b,c,k1,k2,f1,f2):\n return a * np.sin(k1 * x - f1) + b * np.cos(k2 * x - f2) + c", "def evaluate(w, kx, ky):\n _check_valid_spatial_freq(kx, ky, kx_g, ky_g)\n _check_valid_temporal_freq(w, w_g)\n\n dw = abs(w.flatten()[1] - w.flatten()[0]) if isinstance(w, np.ndarray) and w.ndim > 0 else 1*pq.Hz\n dkx = abs(kx.flatten()[1] - kx.flatten()[0]) if isinstance(kx, np.ndarray) and kx.ndim > 0 else 1/pq.deg\n dky = abs(ky.flatten()[1] - ky.flatten()[0]) if isinstance(ky, np.ndarray) and ky.ndim > 0 else 1/pq.deg\n\n g_1 = kronecker_delta(kx, kx_g) * kronecker_delta(ky, ky_g) * kronecker_delta(w, w_g)\n g_2 = kronecker_delta(kx, -kx_g) * kronecker_delta(ky, -ky_g) * kronecker_delta(w, -w_g)\n\n return 4 * np.pi**3 * contrast * (g_1 + g_2) / dw.magnitude / dkx.magnitude / dky.magnitude", "def test_3D_m4_1k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def computeCenters3d(self, data):\n\n\n for i in range(self.nPoints):\n print(\"Label of point \", i, \" is \", self.labels[i])\n for j in range(3):\n self.centers[self.labels[i]][j] += data[i][j]\n\n for c in range(self.n):\n for j in range(3):\n self.centers[c][j] /= self.tots[c]", "def threej(self, coeff):\n j1, m1, j2, m2, j3, m3 = coeff\n myargs = tuple(int(2*x) for x in (j1, j2, j3, m1, m2, m3))\n L1, L2, L3, M1, M2, M3 = myargs\n if M1 + M2 + M3 != 0:\n return 0\n elif abs(L1 - L2) > L3:\n return 0\n elif L1 + L2 < L3:\n return 0\n else:\n return py3nj.wigner3j(*myargs)", "def test_projection_v3_z(self):\n\n from pedemath.vec3 import projection_v3\n\n vec_a = Vec3(3, 4, 5)\n vec_b = Vec3(0, 0, 1)\n\n result = projection_v3(vec_a, vec_b)\n\n self.assertEqual(5, result)", "def extract3d(xaxis, yaxis, zaxis, dat3d, crd_sys, xvec,yvec, zvec, pad=0.):\n func = RegularGridInterpolator((xaxis, yaxis, zaxis), dat3d, \n method='linear', bounds_error=False, fill_value=pad)\n\n # convert x,y,z coordinates to spherical coordinates\n if crd_sys == 'car':\n profx = xvec\n profy = yvec\n profz = zvec\n elif crd_sys == 'sph':\n # radius\n profx = np.sqrt(xvec**2 + yvec**2 + zvec**2)\n\n # theta\n tvec = np.arctan2(zvec, np.sqrt(xvec**2 + yvec**2))\n reg = tvec < 0.\n tvec[reg] = tvec[reg] + 2.*np.pi\n profy = tvec\n\n # azimuth\n pvec = np.arctan2(yvec, xvec)\n reg = pvec < 0\n pvec[reg] = pvec[reg] + 2*np.pi\n profz = pvec\n\n nvec = len(xvec)\n prof = np.zeros([nvec], dtype=np.float64)\n for ii in range(nvec):\n prof[ii] = func([profx[ii], profy[ii], profz[ii]])\n\n return prof", "def test_3D_m4_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def test_grasp_3D_location(self):\n self.taskRunner.callOnThread(self.grasp_3D_location)", "def _getitem3d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n ny = comm_world.allreduce(ny, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, iystop - iystart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iy, slice):\n sss[1] = 0\n if not isinstance(iz, slice):\n sss[2] = 0\n\n return resultglobal[tuple(sss)]", "def _interpolate_scalar_3d(volume, dkk, dii, djj):\n ns = volume.shape[0]\n nr = volume.shape[1]\n nc = volume.shape[2]\n\n if not (-1 < dkk < ns and -1 < dii < nr and -1 < djj < nc):\n out = 0\n return 0\n # find the top left index and the interpolation coefficients\n kk = np.floor(dkk).astype('int')\n ii = np.floor(dii).astype('int')\n jj = np.floor(djj).astype('int')\n # no one is affected\n cgamma = (dkk - kk).astype('float32')\n calpha = (dii - ii).astype('float32')\n cbeta = (djj - jj).astype('float32')\n alpha = (1.0 - calpha).astype('float32')\n beta = (1.0 - cbeta).astype('float32')\n gamma = (1.0 - cgamma).astype('float32')\n\n inside = 0\n # ---top-left\n if (ii >= 0) and (jj >= 0) and (kk >= 0):\n out = alpha * beta * gamma * volume[kk, ii, jj]\n inside += 1\n else:\n out = 0\n # ---top-right\n jj += 1\n if (ii >= 0) and (jj < nc) and (kk >= 0):\n out += alpha * cbeta * gamma * volume[kk, ii, jj]\n inside += 1\n # ---bottom-right\n ii += 1\n if (ii < nr) and (jj < nc) and (kk >= 0):\n out += calpha * cbeta * gamma * volume[kk, ii, jj]\n inside += 1\n # ---bottom-left\n jj -= 1\n if (ii < nr) and (jj >= 0) and (kk >= 0):\n out += calpha * beta * gamma * volume[kk, ii, jj]\n inside += 1\n kk += 1\n if(kk < ns):\n ii -= 1\n if (ii >= 0) and (jj >= 0):\n out += alpha * beta * cgamma * volume[kk, ii, jj]\n inside += 1\n jj += 1\n if (ii >= 0) and (jj < nc):\n out += alpha * cbeta * cgamma * volume[kk, ii, jj]\n inside += 1\n # ---bottom-right\n ii += 1\n if (ii < nr) and (jj < nc):\n out += calpha * cbeta * cgamma * volume[kk, ii, jj]\n inside += 1\n # ---bottom-left\n jj -= 1\n if (ii < nr) and (jj >= 0):\n out += calpha * beta * cgamma * volume[kk, ii, jj]\n inside += 1\n\n # assert that inside == 8\n #return 1 if inside == 8 else 0\n return out", "def rk3(x,t,h,f):\n \n k1=h*f(x,t)\n k2=h*f(x+k1/2,t+h/2)\n k3=h*f(x-k1+2*k2,t+h)\n\n return x + (k1 + 4*k2 + k3)/6", "def is3_d(self):\n return self.container['is3_d']", "def test_3D_m8_1k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def get_reproj_errors(img_idx, points3d_with_views, R, t, K, keypoints, distCoeffs=np.array([])):\n points_3d, points_2d, pt3d_idxs = prep_for_reproj(img_idx, points3d_with_views, keypoints)\n rvec, _ = cv2.Rodrigues(R)\n projPoints, _ = cv2.projectPoints(points_3d, rvec, t, K, distCoeffs=distCoeffs)\n projPoints = np.squeeze(projPoints)\n avg_error, errors = calculate_reproj_errors(projPoints, points_2d)\n\n return points_3d, points_2d, avg_error, errors", "def evaluate(self,coeffs,evalpts):\n a1,a2,a3,A0,E0,G0,n = coeffs\n x = asarray(evalpts) #XXX: requires a numpy.array\n return (a1 + a2*x + a3*x*x + A0 * ( G0/(2*pi) )/( (x-E0)*(x-E0)+(G0/2)*(G0/2) ))/n", "def calculate_potential_3D(true_csd, ele_xx, ele_yy, ele_zz, \n csd_x, csd_y, csd_z):\n xlin = csd_x[:,0,0]\n ylin = csd_y[0,:,0]\n zlin = csd_z[0,0,:]\n xlims = [xlin[0], xlin[-1]]\n ylims = [ylin[0], ylin[-1]]\n zlims = [zlin[0], zlin[-1]]\n sigma = 1.0\n pots = np.zeros(len(ele_xx))\n tic = time.time()\n for ii in range(len(ele_xx)):\n pots[ii] = integrate_3D(ele_xx[ii], ele_yy[ii], ele_zz[ii],\n xlims, ylims, zlims, true_csd, \n xlin, ylin, zlin, \n csd_x, csd_y, csd_z)\n print 'Electrode:', ii\n pots /= 4*np.pi*sigma\n toc = time.time() - tic\n print toc, 'Total time taken - series, sims'\n return pots", "def test_fun_result(self):\n x = CArray([3, 5])\n correct_result = x[0] ** 2 + x[1] ** 2\n self._test_fun_result(self.fun, x, correct_result.item())", "def _evaluate(self, x, y, z):\n if _isscalar(x):\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (1 - alpha) * self.xyInterpolators[z_pos - 1](\n x, y\n ) + alpha * self.xyInterpolators[z_pos](x, y)\n else:\n m = len(x)\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n if x.size > 0:\n for i in range(1, self.z_n):\n c = z_pos == i\n if np.any(c):\n alpha = (z[c] - self.z_list[i - 1]) / (\n self.z_list[i] - self.z_list[i - 1]\n )\n f[c] = (1 - alpha) * self.xyInterpolators[i - 1](\n x[c], y[c]\n ) + alpha * self.xyInterpolators[i](x[c], y[c])\n return f", "def calc_vad_3d(az, elev, vel):\n elev = np.deg2rad(elev)\n az = np.deg2rad(az)\n\n if vel.size > 1: # If there could be sufficient data points...\n A = sum(vel * np.sin(az))\n B = sum(np.sin(az) ** 2 * np.cos(elev))\n C = sum(np.cos(az) * np.sin(az) * np.cos(elev))\n G = sum(np.sin(az) * np.sin(elev))\n\n D = sum(vel * np.cos(az))\n E = sum(np.sin(az) * np.cos(az) * np.cos(elev))\n F = sum(np.cos(az) ** 2 * np.cos(elev))\n H = sum(np.cos(az) * np.sin(elev))\n\n W = sum(vel)\n X = sum(np.sin(az) * np.cos(elev))\n Y = sum(np.cos(az) * np.cos(elev))\n Z = sum(az * np.sin(elev))\n\n # solve A = uB + vC + wG , D = uE + vF + wH and W = uX + vY+ wZ\n y = np.array([[B, E, X], [C, F, Y], [G, H, Z]])\n z = np.array([A, D, W])\n # print y\n # print z\n try:\n sol = np.linalg.solve(y, z)\n # print sol\n u = sol[0]\n v = sol[1]\n w = sol[2]\n return u, v, w\n except np.linalg.linalg.LinAlgError:\n return FILL_VALUE, FILL_VALUE, FILL_VALUE\n else:\n return FILL_VALUE, FILL_VALUE, FILL_VALUE", "def compute(self, *args, **kwargs):\n vertices = args[0]\n xpts = vertices[2] # z plays the 'x' part\n ypts = vertices[0] # x plays the 'y' part\n #zpts = vertices[1]\n #********************************************\n # switcharoo: using z in place of x\n # using x in place of y\n # i.e.\n #\n # y <- x\n # x <- z\n #\n qxdot = np.dot(xpts,self.localBasis[1])\n qxddot = np.dot(xpts,self.localBasis[2])\n qydot = np.dot(ypts,self.localBasis[1])\n qyddot = np.dot(ypts,self.localBasis[2])\n store = (qxdot*qyddot - qydot*qxddot)\n temp = np.sqrt(qxdot**2 + qydot**2)\n if isinstance(temp, ia):\n if temp.inf<=0:\n temp.inf = 0.\n denom = temp*((temp)**2)#**.5## #problem foud with sqrt\n #\n curvature = store/denom#((np.sqrt(qxdot*qxdot + qydot*qydot))**3.)\n return curvature", "def _krls_evaluate(self, dAldKRLS):\n \n # Get the needed data from the dictionary with data\n mDict = dAldKRLS['mDict']\n vAlpha = dAldKRLS['vAlpha']\n \n (iRowsDict, _) = mDict.shape # Get the number of rows from the dictionary\n if iRowsDict > 0:\n vX = np.dot(vAlpha.T, mDict)\n else:\n vX = np.zeros((iRowsDict,1))\n \n return vX", "def run_3D_predictions(self, min_size=5000):\n cases = self.test_loader.dataset.im_ids\n assert len(cases) == len(self.test_loader)\n for (test_batch, case) in tqdm(zip(self.test_loader, cases), total=len(cases)):\n test_x = torch.squeeze(test_batch[0], dim=0)\n if self.pseudo_3D:\n pred, _, act, _ = self.model.predict_3D_pseudo3D_2Dconv(test_x,\n **self.pred_3D_params)\n else:\n pred, _, act, _ = self.model.predict_3D(test_x,\n **self.pred_3D_params)\n assert len(pred.shape) == 3\n assert len(act.shape) == 4\n pred = remove_3D_connected_components(pred, min_size=min_size)\n pred = self.post_process_stage1(pred)\n self.save_pred(pred, act, case)\n case_raw = Path(case).name\n bbox_coord = self.create_bbox_stage1(pred, case_raw)\n self.bbox_coords[case_raw] = bbox_coord\n self.save_bbox_coords()", "def scaldiv3d(self, k: float=1.) -> dict:\n\n return {\"vect3D\": self.v3ddict.scaldiv3d(k=k).getDict()}", "def prep_for_reproj(img_idx, points3d_with_views, keypoints):\n points_3d = []\n points_2d = []\n pt3d_idxs = []\n i = 0\n for pt3d in points3d_with_views:\n if img_idx in pt3d.source_2dpt_idxs.keys():\n pt3d_idxs.append(i)\n points_3d.append(pt3d.point3d)\n kpt_idx = pt3d.source_2dpt_idxs[img_idx]\n points_2d.append(keypoints[img_idx][kpt_idx].pt)\n i += 1\n\n return np.array(points_3d), np.array(points_2d), pt3d_idxs", "def test_std_3d(self):\r\n inp3d = array( # 2,2,3\r\n [[[0, 2, 2],\r\n [3, 4, 5]],\r\n\r\n [[1, 9, 0],\r\n [9, 10, 1]]])\r\n exp3d = ( # for axis None, 0, 1, 2: calc from scipy.stats.std\r\n 3.63901418552,\r\n array([[0.70710678, 4.94974747, 1.41421356],\r\n [4.24264069, 4.24264069, 2.82842712]]),\r\n array([[2.12132034, 1.41421356, 2.12132034],\r\n [5.65685425, 0.70710678, 0.70710678]]),\r\n array([[1.15470054, 1.],\r\n [4.93288286, 4.93288286]]))\r\n res = tuple(std(inp3d, ax) for ax in [None, 0, 1, 2])\r\n for obs, exp in zip(res, exp3d):\r\n testing.assert_almost_equal(obs, exp)", "def plot_results_3d(p_x, p_y, p_z, h_exp = 0.5):\n plt.figure(figsize = (10, 10))\n ax3d = plt.axes(projection = '3d') \n\n color=iter(cm.rainbow(np.linspace(0,1,p_x.shape[0]))) # (1)\n labels = ['Particle ' + str(pl+1) for pl in np.arange(0, p_x.shape[0], step = 1)]\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n c = next(color) # (1)\n for t in np.arange(0, p_x.shape[1], step = 1): \n ax3d.plot3D(p_x[p, t], p_y[p, t], p_z[p, t], 'x', c = c, label = labels[p]) \n legend_without_duplicate_labels(ax3d)\n ax3d.set_xlabel('X (pixels)') \n ax3d.set_ylabel('Y (pixels') \n ax3d.set_zlabel('Z (pixels)') \n ax3d.set_xlim([origin-150,origin+150])\n ax3d.set_ylim([origin-150,origin+150])\n ax3d.set_zlim([origin-150,origin+150])\n ax3d.set_title('3D particle trajectories - H = ' + str(h_exp))", "def plane_equation(p1, p2, p3):\n a1 = p2[0] - p1[0]\n b1 = p2[1] - p1[1]\n c1 = p2[2] - p1[2]\n a2 = p3[0] - p1[0]\n b2 = p3[1] - p1[1]\n c2 = p3[2] - p1[2]\n a = b1 * c2 - b2 * c1\n b = a2 * c1 - a1 * c2\n c = a1 * b2 - b1 * a2\n # Points are collinear\n if (abs(a) < 1e-6) and (abs(b) < 1e-6) and (abs(c) < 1e-6):\n return None\n # All clear\n d = (- a * p1[0] - b * p1[1] - c * p1[2])\n return a, b, c, d", "def test_3(self):\n # Run Newton's method. Last starting point is wildly high. How will Newton perform??\n starting_points = (0.1, 4.0, -0.2, -0.1)\n assert len(starting_points) == 4\n\n logging.info(\"\\nRUNNING EXERCISE 1.3C\")\n newton_roots = [undertest.newton(self.func, self.derivative, x0, 50)\n for x0 in starting_points]\n\n # Run secant-based methods. Last interval has a high right endpoint. How will the algos do?\n secant_intervals = [(0.9, 10.0), (-0.2, 3.0), (0.1, 6.0), (1.9, 20.0), (20.0, 1.9)]\n assert len(secant_intervals) == 5\n logging.info(\"\\nRUNNING EXERCISE 1.3D\")\n secant_results = [undertest.secant(self.func, prev, current, self.maxit)\n for (prev, current) in secant_intervals]\n logging.info(\"\\nRUNNING EXERCISE 1.3E\")\n regula_falsi_results = [undertest.regula_falsi(self.func, prev, current, 100)\n for (prev, current) in secant_intervals]\n logging.info(\"\\nRUNNING EXERCISE 1.3F\")\n wheeler_results = [undertest.wheeler(self.func, prev, current, 20)\n for (prev, current) in secant_intervals]", "def test_3D_m8_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def pm3d_formula(x,formula):\n \n if(formula<0):\t\t\n\tx=1.0-x\n\tformula=-formula\n\n if(formula==0): return 0\n elif(formula==1): return 0.5\n elif(formula==2): return 1\n elif(formula==3): return x\n elif(formula==4): return(x * x)\n elif(formula==5): return(x * x * x)\n elif(formula==6): return(x * x * x * x)\n elif(formula==7): return(Numeric.sqrt(x))\n elif(formula==8): return(x**0.25)\n elif(formula==9): return(Numeric.sin(90.0 * x * DEG2RAD))\n elif(formula==10): return(Numeric.cos(90 * x * DEG2RAD))\n elif(formula==11): return(Numeric.fabs(x - 0.5))\n elif(formula==12): return((2 * x - 1) * (2.0 * x - 1))\n elif(formula==13): return(Numeric.sin(180 * x * DEG2RAD))\n elif(formula==14): return(Numeric.fabs(cos(180 * x * DEG2RAD)))\n elif(formula==15): return(Numeric.sin(360 * x * DEG2RAD))\n elif(formula==16): return(Numeric.cos(360 * x * DEG2RAD))\n elif(formula==17): return(Numeric.fabs(Numeric.sin(360 * x * DEG2RAD)))\n elif(formula==18): return(Numeric.fabs(Numeric.cos(360 * x * DEG2RAD)))\n elif(formula==19): return(Numeric.fabs(Numeric.sin(720 * x * DEG2RAD)))\n elif(formula==20): return(Numeric.fabs(Numeric.cos(720 * x * DEG2RAD)))\n elif(formula==21): return(3 * x) # ???????\n elif(formula==22): return(3 * x - 1)\n elif(formula==23): return(3 * x - 2)\n elif(formula==24): return(Numeric.fabs(3 * x - 1))\n elif(formula==25): return(Numeric.fabs(3 * x - 2))\n elif(formula==26): return((1.5 * x - 0.5))\n elif(formula==27): return((1.5 * x - 1))\n elif(formula==28): return(Numeric.fabs(1.5 * x - 0.5))\n elif(formula==29): return(Numeric.fabs(1.5 * x - 1))\n elif(formula==30):\n if (x <= 0.25): return 0.0\n if (x >= 0.57): return 1.0\n\treturn(x / 0.32 - 0.78125)\n elif(formula==31):\n if (x <= 0.42): return 0.0\n if (x >= 0.92): return 1.0\n\treturn(2 * x - 0.84)\n elif(formula==32):\n if (x <= 0.42): return(4*x)\n if (x <= 0.92): return(-2 * x + 1.84)\n return(x / 0.08 - 11.5)\n elif(formula==33): return(Numeric.fabs(2 * x - 0.5))\n elif(formula==34): return(2 * x)\n elif(formula==35): return(2 * x - 0.5)\n elif(formula==36): return(2 * x - 1)\n return(0)", "def compute_exam_p_and_r(y_true_3d, y_pred_3d, n_sents, n_words, silent=False):\n p_list = list()\n r_list = list()\n d_batch = y_true_3d.shape[0]\n\n for sample_idx in range(d_batch):\n n_sent = n_sents[sample_idx, 0]\n for sent_idx in range(n_sent):\n n_word = n_words[sample_idx, sent_idx]\n y_true = y_true_3d[sample_idx, sent_idx, :n_word]\n y_pred = y_pred_3d[sample_idx, sent_idx, :n_word]\n if not silent and not y_pred.any() == 1:\n logger.info('No pred is made for: {0}.{1}. y_pred: {2}'.format(sample_idx, sent_idx, y_pred))\n\n p_list.append(precision_score(y_true, y_pred))\n r_list.append(recall_score(y_true, y_pred))\n\n return p_list, r_list", "def evalQuad(a,b,c,x):\n return a * x**2 + b*x + c", "def test_projection_v3_y(self):\n from pedemath.vec3 import projection_v3\n\n vec_a = Vec3(3, 4, 5)\n vec_b = Vec3(0, 1, 0)\n\n result = projection_v3(vec_a, vec_b)\n\n self.assertEqual(4, result)", "def triangulate_points_and_reproject(R_l, t_l, R_r, t_r, K, points3d_with_views, img_idx1, img_idx2, kpts_i, kpts_j, kpts_i_idxs, kpts_j_idxs, reproject=True):\n\n print(f\"Triangulating: {len(kpts_i)} points.\")\n P_l = np.dot(K, np.hstack((R_l, t_l)))\n P_r = np.dot(K, np.hstack((R_r, t_r)))\n\n kpts_i = np.squeeze(kpts_i)\n kpts_i = kpts_i.transpose()\n kpts_i = kpts_i.reshape(2,-1)\n kpts_j = np.squeeze(kpts_j)\n kpts_j = kpts_j.transpose()\n kpts_j = kpts_j.reshape(2,-1)\n\n point_4d_hom = cv2.triangulatePoints(P_l, P_r, kpts_i, kpts_j)\n points_3D = cv2.convertPointsFromHomogeneous(point_4d_hom.transpose())\n for i in range(kpts_i.shape[1]):\n source_2dpt_idxs = {img_idx1:kpts_i_idxs[i], img_idx2:kpts_j_idxs[i]}\n pt = Point3D_with_views(points_3D[i], source_2dpt_idxs)\n points3d_with_views.append(pt)\n\n if reproject:\n kpts_i = kpts_i.transpose()\n kpts_j = kpts_j.transpose()\n rvec_l, _ = cv2.Rodrigues(R_l)\n rvec_r, _ = cv2.Rodrigues(R_r)\n projPoints_l, _ = cv2.projectPoints(points_3D, rvec_l, t_l, K, distCoeffs=np.array([]))\n projPoints_r, _ = cv2.projectPoints(points_3D, rvec_r, t_r, K, distCoeffs=np.array([]))\n delta_l , delta_r = [], []\n for i in range(len(projPoints_l)):\n delta_l.append(abs(projPoints_l[i][0][0] - kpts_i[i][0]))\n delta_l.append(abs(projPoints_l[i][0][1] - kpts_i[i][1]))\n delta_r.append(abs(projPoints_r[i][0][0] - kpts_j[i][0]))\n delta_r.append(abs(projPoints_r[i][0][1] - kpts_j[i][1]))\n avg_error_l = sum(delta_l)/len(delta_l)\n avg_error_r = sum(delta_r)/len(delta_r)\n print(f\"Average reprojection error for just-triangulated points on image {img_idx1} is:\", avg_error_l, \"pixels.\")\n print(f\"Average reprojection error for just-triangulated points on image {img_idx2} is:\", avg_error_r, \"pixels.\")\n errors = list(zip(delta_l, delta_r))\n return points3d_with_views, errors, avg_error_l, avg_error_r\n\n return points3d_with_views", "def test_3d_freq():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/ft/test001.ft3\")\n\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n\n # and the first slice\n assert sdata.shape == (128, 4096)\n assert sdata.dtype == 'float32'\n assert round(sdata[1,2],2) == 25980.13\n assert round(sdata[22,5],2) == -8336.05\n check_ppm_limits(sdic,sdata,0,[147.42, 93.01])\n check_ppm_limits(sdic,sdata,1,[254.92, -142.83])\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)", "def test_get_coords(self):\n known_values = {\n 1: (0, 0),\n 2: (1, 0),\n 3: (1, 1),\n 4: (0, 1),\n 5: (-1, 1),\n 6: (-1, 0),\n 7: (-1, -1),\n 8: (0, -1),\n 9: (1, -1),\n 10: (2, -1),\n 11: (2, 0),\n 12: (2, 1),\n 13: (2, 2),\n 14: (1, 2),\n 15: (0, 2),\n 16: (-1, 2),\n 17: (-2, 2),\n 18: (-2, 1),\n 19: (-2, 0),\n 20: (-2, -1),\n 21: (-2, -2),\n 22: (-1, -2),\n 23: (0, -2),\n 24: (1, -2),\n 25: (2, -2),\n 26: (3, -2),\n }\n\n for number, expected in known_values.items():\n actual = coords_3a(number)\n message = (\"Testing input '{}', expected '{}' but got '{}'\"\n .format(number, expected, actual))\n self.assertEqual(actual, expected, msg=message)", "def intersect_triangle(v1, v2, v3, pos):\r\n #calc normal from two edge vectors v2-v1 and v3-v1\r\n nVec = cross(subtract(v2, v1), subtract(v3, v1))\r\n #equation of plane: Ax + By + Cz = kVal where A,B,C are components of normal. x,y,z for point v1 to find kVal\r\n kVal = dot(nVec,v1)\r\n #return y val i.e. y = (kVal - Ax - Cz)/B\r\n return (kVal - nVec[0]*pos[0] - nVec[2]*pos[2])/nVec[1]", "def dist3d(self, endroit3D: Endroit3D) -> float:\n\n return self.p3ddict.dist3d(endroit3D.p3ddict)", "def _evaluator(idx_dct):\n idx_dct = augment_index_dict_with_hydrogen_keys(\n gra, idx_dct, break_ties=False, neg=True)\n\n def _parity(key):\n key1, key2 = key\n nkey1s = nkeys_dct[key1] - {key2}\n nkey2s = nkeys_dct[key2] - {key1}\n\n nmax1 = max(nkey1s, key=idx_dct.__getitem__)\n nmax2 = max(nkey2s, key=idx_dct.__getitem__)\n\n xyz1 = xyz_dct[key1]\n xyz2 = xyz_dct[key2]\n nxyz1 = xyz_dct[nmax1]\n nxyz2 = xyz_dct[nmax2]\n\n bnd1_vec = numpy.subtract(nxyz1, xyz1)\n bnd2_vec = numpy.subtract(nxyz2, xyz2)\n\n dot_val = numpy.vdot(bnd1_vec, bnd2_vec)\n assert dot_val != 0. # for now, assume not collinear\n par = dot_val < 0.\n return par\n\n return _parity", "def eq_to_3d(ra, dec):\r\n x = np.cos(ra) * np.cos(dec)\r\n y = np.sin(ra) * np.cos(dec)\r\n z = np.sin(dec)\r\n return x, y, z", "def D3(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_D3(self, *args)", "def rewofzs3(x,y):\n\n z=x+y*(1j)\n a=1.0/(2.0*z*z)\n q=(1j)/(z*jnp.sqrt(jnp.pi))*(1.0 + a*(1.0 + a*(3.0 + a*(15.0+a*105.0))))\n return jnp.real(q)", "def iterate(rk):\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk", "def _evaluate_xyz(self,x,y,z=0.):\n return -np.pi * self._rhoc_M /(self.n+1.)*self.a**3*self._b*self._c * \\\n _potInt(x, y, z, self._a2, self._b2*self._a2, self._c2*self._a2, self.n)", "def get_joystickB3d(self, index=0):\r\n return (self.handler.absx2[index], self.handler.absy2[index], self.handler.absz2[index])", "def k3(self, f, t, u, k2, usereverse=False):\n\n h = self._delta_t / 2.0\n\n v = u + h * k2\n tt = t + h\n\n if usereverse:\n output, storagez, storagew = f(tt, v)\n\n self.storagez.append(storagez)\n self.storagew.append(storagew)\n\n return output\n else:\n return f(tt, v)", "def test_step3(self):\n fun = get_problem('step3', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)", "def _evaluate_xyz(self, x, y, z):\n return (\n 2.0\n * numpy.pi\n * self._b\n * self._c\n * _potInt(\n x, y, z, self._psi, self._b2, self._c2, glx=self._glx, glw=self._glw\n )\n )" ]
[ "0.7018151", "0.6458969", "0.6291701", "0.618679", "0.6173702", "0.6046015", "0.59891486", "0.59870636", "0.59073186", "0.5893742", "0.58693314", "0.5850453", "0.57705677", "0.57656455", "0.57517314", "0.5728884", "0.5630404", "0.561461", "0.5612338", "0.5607166", "0.5588802", "0.5578121", "0.5559084", "0.554585", "0.5545186", "0.54901147", "0.54541326", "0.54537183", "0.54465365", "0.5446072", "0.54380244", "0.5436363", "0.54324126", "0.540791", "0.5398021", "0.5392782", "0.5387564", "0.5374603", "0.5348885", "0.5348438", "0.534064", "0.5337576", "0.5336437", "0.5326786", "0.532406", "0.5323686", "0.5322454", "0.5318574", "0.5305899", "0.5275244", "0.5272297", "0.5253599", "0.5249382", "0.5242821", "0.5237806", "0.5224554", "0.52243507", "0.5213809", "0.5206304", "0.5186725", "0.51861626", "0.5174734", "0.517288", "0.51728004", "0.51720023", "0.51686305", "0.5160564", "0.5156392", "0.515533", "0.5151501", "0.51476157", "0.5147008", "0.51398593", "0.513533", "0.5130166", "0.51179147", "0.51164544", "0.51137894", "0.5108382", "0.51054215", "0.51024616", "0.51012534", "0.51003444", "0.50903827", "0.50900614", "0.50849766", "0.5079095", "0.5078181", "0.5065723", "0.50587237", "0.50578135", "0.5052575", "0.50521284", "0.505185", "0.5049713", "0.5047899", "0.5037962", "0.5037898", "0.503136", "0.5030514", "0.5026953" ]
0.0
-1
Print lots of basic information about the given presentation.
def debug_dump(prs:Presentation): print("Presentation has", len(prs.slides), "slides") # Print summary of all slides, plus text n = 0 for slide in prs.slides: n += 1 print("========== slide {} ========== [{}]".format(n, slide.slide_layout.name)) for shape in slide.shapes: if not shape.has_text_frame: continue print(shape.name) for paragraph in shape.text_frame.paragraphs: for run in paragraph.runs: print(" " + run.text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Print(self):\n\n\t\tif self.verbose:\n\n\t\t print (\"\\033[1m[HEADER]\\033[0m\")\n\t\t print (\"code:\\t\\t%s\" % self.kod)\n\t \tprint (\"version:\\t%s\" % self.ver)\n\t\t print (\"date and time:\\t%s\" % self.probid)\n\t\t print (\"dump number:\\t%s\" % self.knod)\n\t \tprint (\"number of histories:\\t%s\" % self.nps)\n\t\t print (\"number of pseudorandom numbers used:\\t%s\" % self.rnr)\n\t\t print (\"title: %s\" % self.title)\n\n\t\t if self.ntal>1:\n\t\t\t\tprint self.ntal, 'tallies:', self.ntals\n\t \telse:\n\t\t\t\tprint self.ntal, 'tally:', self.ntals\n\n\n\t\t if self.npert != 0:\n\t\t\t\tprint(\"number of perturbations: %s\" % self.npert)", "def print_overview_slide():\r\n print '<div id=\"overview\" class=\"step\" ' \\\r\n ' data-x=\"3000\" data-y=\"1500\" data-scale=\"10\">'\r\n print '</div>'", "def _show(self, indent = 0):\n print(\" \"*indent, \"Name:\", self.name)\n print(\" \"*indent, \"Description:\", self.description)", "def analyse_presentation(pres_name:str, verbose=False) -> Dict[str, Any]:\n prs = Presentation(pres_name)\n if verbose:\n debug_dump(prs)\n (layouts_interactive, layouts) = count_layouts(prs)\n interaction_stars = min(layouts_interactive, 5)\n topic_stars = ([1,1,3,5,5,4,3,2,1]+[1]*100)[layouts[\"Section Header\"]]\n\n pres_properties = get_presentation_properties(prs)\n\n word_count = get_word_counts(prs.slides)\n words_per_slide = sum(word_count) / len(word_count)\n # ideal words/slide is 30-40 (5 stars)\n text_stars = calculate_text_stars(word_count)\n # print(\"word counts:\", word_count)\n\n # Create a list of warnings about very text-heavy slides\n heavy_warnings = []\n for slide, words in enumerate(word_count):\n if words > MAX_WORDS_PER_SLIDE:\n heavy_warnings.append(f\"WARNING: slide {slide} has {words} words!\")\n\n\n slides = get_slide_analytics(prs.slides)\n print(slides)\n result = {\n\n \"presentation_rating_stars_interaction\": interaction_stars,\n \"presentation_rating_stars_section\": topic_stars,\n \"presentation_rating_stars_accessibility\": 3, # not implemented yet!\n \"presentation_rating_stars_text\": text_stars,\n \"presentation_count_slide\": len(prs.slides),\n \"presentation_count_layout\": layouts, # dictionary that maps layout name to count\n \"presentation_total_words\": words_per_slide, # a float\n \"presentation_warning_text_heavy\": heavy_warnings, # a list of warning strings\n \"presentation_data_slides\": slides, # a list of slides and analytics\n \"filename\": pres_name, # TODO: strip any Path and just return file name?\n \"name\": \"ICT999\",\n \"description\": \"Introduction to ICT\"\n }\n\n return result", "def print_details(self):\n self.view.print_details()", "def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())", "def printinfo(assign, question):\n print(\"Last Name: Bell\")\n print (\"First Name: Daniel\")\n print(\"Student ID: 282911\")\n print(\"Course: CPSC 231\")\n print(\"Tutorial Section: T02\")\n print(\"Assignment: %d\" %assign)\n print(\"Question: %s\" %question)\n print(\"\")", "def info():\n print(\"Made using the OOP RPG game creator (c) Claire.\\n\")", "def print_header_information():\n\t\tprint \"Elijah Molloy\"\n\t\tprint \"70-510 - Spring 1 - 2018\"\n\t\tprint \"PROGRAMMING ASSIGNMENT #4\\n\"", "def printDetails(self):\n print str(self.number) + \": \" + self.title\n print \"URL: \" + self.URL\n print \"domain: \" + self.domain\n print \"score: \" + str(self.score) + \" points\"\n print \"submitted by: \" + self.submitter\n print \"# of comments: \" + str(self.commentCount)\n print \"'discuss' URL: \" + self.commentsURL\n print \"HN ID: \" + str(self.id)\n print \" \"", "def show_man_page(self):\n print(Gstr_synopsis)", "def display(self):\r\n print(self.title, 'written by', self.author)", "def print_intro(self):\n \n print('Did you know that birds hold the record for longest animal migrations?')", "def test_display_presentation(self):\n response = self._speaker_profile(True)\n self.assertContains(response, FIRST_PRESENTATION_TITLE)\n self.assertContains(response, SECOND_PRESENTATION_TITLE)", "def debug_print(self):\n print self.title\n print self.storyline\n print self.poster_image_url\n print self.trailer_youtube_url\n print \"------\"", "def show_man_page(self):\n \n print(Gstr_synopsis)", "def print_configuration_info():\n print(\"Selected dataset:\", DATASET) \n print(\"Dataset base directory:\", BASE_INPUT_DIR) \n print(\"Daytime option:\", DAYTIME) \n print(\"Nones option:\", NONES) \n print(\"Selected action/activity representation:\", OP)\n print(\"Number of epochs: \", EPOCHS)\n print(\"Number of folds for cross-validation: \", FOLDS)\n print(\"Input directory for data files:\", INPUT_DIR) \n print(\"Embedding matrix file:\", EMBEDDING_WEIGHTS)\n print(\"Action sequences (X) file:\", X_FILE) \n print(\"Word embedding file for activities:\", ACTIVITY_EMBEDDINGS) \n print(\"Activity to int mappings:\", ACTIVITY_TO_INT)\n print(\"Int to activity mappings:\", INT_TO_ACTIVITY) \n print(\"Experiment ID:\", EXPERIMENT_ID)\n print(\"Treat imbalance data:\", TREAT_IMBALANCE)\n print(\"Save intermediate plots:\", SAVE)\n print(\"Batch size:\", BATCH_SIZE)\n print(\"Dropout:\", DROPOUT)\n print(\"Loss:\", LOSS)", "def show_info(self): \n color= Fore.WHITE\n print(f\"\"\" {color} \nNombre: {self.name} \nRuta: {self.route }\nFecha de salida: {self.departure_date}\"\"\")\n print(\"<\"*8, \">\"*8)\n print(\"El precio por habitacion es:\")\n for key, value in self.prize.items():\n color_value= (Fore.GREEN + str(value))\n color_key= Fore.WHITE + \"Habitacion\" + \" \" + key\n print(f\"\"\" {color_key} : {color_value}$ \"\"\")\n \n print(Fore.WHITE + \"<\"*8, \">\"*8)\n for floor, info in self.floors_info.items():\n piso=(Fore.WHITE + floor)\n print(f\" {piso}:{info} \")\n \n \n print(\"<\"*8, \">\"*8)\n print(\"Capacidad por tipo de habitacion: \")\n for key, value in self.room_capacity.items():\n print(f\"Habitacion {key}: {value} personas \",\"\\t\")\n return \"\"", "def show_main_screen():\n option = algo_selection(algos)\n if option == 1:\n print_factorial()\n show_main_screen()\n if option == 2:\n print_gcd()\n show_main_screen()\n if option == 3:\n print_pow()\n show_main_screen()\n if option == 4:\n print_towers()\n show_main_screen()\n if option == 5:\n print_permutations()\n show_main_screen()\n if option == 6:\n raise SystemExit(0)", "def show(self):\n # Used for testing because there is obviously no way back\n # from VISU_Gen.SetCurrentStudy\n if not self.display:\n return\n\n # Desactivation : Load the med file in the PARAVIS component\n #import smeca_utils.visu_utils as VU\n #log.info(\"Loading Paravis module...\")\n #msg = VU.load_med_file(self.read_fname())\n #log.info(msg)", "def display(self):\n print \"\\n\\n***********************\\n\"\n print \"Info about group %s, name=%s, path=%s\" % (self.sdef['id'], \n self.name, self.path)\n print \"sdef=\"\n pp.pprint(self.sdef)\n print \"expanded_def=\"\n pp.pprint (self.expanded_def)\n print \"includes=\"\n pp.pprint (self.includes)\n print \"parent_attributes=\"\n pp.pprint (self.parent_attributes)\n print \"attributes=\"\n pp.pprint (self.attributes)\n print \"mstats=\"\n pp.pprint (self.mstats)", "def printhelp():", "def print_intro(self):\n \n print(\"Did you know that most insect migrations are intergenerational, meaning that offspring continue their parent's journey?\\n\")", "def print_welcome():\n print(\"Welcome to Langton's ant simulator! Choose option: \")\n print(\"1 -> Create white blank picture\")\n print(\"2 -> Load file\")\n print(\"3 -> Generate picture with given probability\")", "def printInfo():\n print('\\t' * 6 + 'Combinational Circuit Paths')\n\n print('-' * 75)\n\n print('Input: Verilog file with Gate Level Modelling')\n print('Output: All paths from input to output of the circuit described by the Verilog file')\n print('(Optional: Graph of the circuit can also be exported)')\n\n print('-' * 75, end='\\n\\n')", "def program_info():\n\n print(\n color.GREEN\n + color.UNDERLINE\n + color.BOLD\n + \"Program Info Center:\\n\"\n + color.END\n )\n print(\n color.UNDERLINE\n + color.BOLD\n + \"About The Program:\"\n + color.END\n + \" This program works with the Blockchain-19 protocols defined within it's respective project. Blockchain-19 is an adaptation of the cryptocurrency blockchain or the Blockchain game used for education purposes, instead relating the content on the Blockchain to COVID-19. Given patient information the program can calculate the hashes within the Blockchain, creating a solved ledger. The program offers users the option of creating a new ledger or importing a previously exported ledger.\\n\"\n )\n\n print(\n color.UNDERLINE\n + color.BOLD\n + \"Necessary Patient Info:\"\n + color.END\n + \"\\n* Hospital \\n* Patient ID \\n* Current Status\\n\"\n )\n\n print(\n color.UNDERLINE\n + color.BOLD\n + \"Current Patient Status Key:\"\n + color.END\n + \"\\n* A = Admitted \\n* B = Stable \\n* C = Moderate \\n* D = Severe \\n* E = Discharged \\n* F = ICU\\n\\n\"\n )", "def print_methods():\n print('''1. Sobol Variance Based:\n first and total order''')\n print('''2. Regional Sensitivity Analysis:\n also called Monte Carlo Filtering''')\n print('''3. Morris Screening Method:\n with pre-optimized defined trajects and group option''')\n print('''4. Sampled-OAT:\n Latin HYpercube or Sobol sampling with OAT sensitivity''')\n print('''5. Standardized Regression Coefficients:\n Latin HYpercube or Sobol sampling with linear regression''')\n print('''6. DYNamic Identifiability Analysis:\n Latin HYpercube or Sobol sampling with time-sliced based\n evaluation''')", "def viewer(prob, pt, file=sys.stdout):\n\n summary_data = (\n prob[pt + \".fc.Fl_O:stat:MN\"],\n prob[pt + \".fc.alt\"],\n prob[pt + \".inlet.Fl_O:stat:W\"],\n prob[pt + \".perf.Fn\"],\n prob[pt + \".perf.Fg\"],\n prob[pt + \".inlet.F_ram\"],\n prob[pt + \".perf.OPR\"],\n prob[pt + \".perf.TSFC\"],\n prob[pt + \".splitter.BPR\"],\n )\n\n print(file=file, flush=True)\n print(file=file, flush=True)\n print(file=file, flush=True)\n print(\"----------------------------------------------------------------------------\", file=file, flush=True)\n print(\" POINT:\", pt, file=file, flush=True)\n print(\"----------------------------------------------------------------------------\", file=file, flush=True)\n print(\" PERFORMANCE CHARACTERISTICS\", file=file, flush=True)\n print(\" Mach Alt W Fn Fg Fram OPR TSFC BPR \", file=file, flush=True)\n print(\" %7.5f %7.1f %7.3f %7.1f %7.1f %7.1f %7.3f %7.5f %7.3f\" % summary_data, file=file, flush=True)\n\n fs_names = [\n \"fc.Fl_O\",\n \"inlet.Fl_O\",\n \"fan.Fl_O\",\n \"splitter.Fl_O1\",\n \"splitter.Fl_O2\",\n \"duct4.Fl_O\",\n \"lpc.Fl_O\",\n \"duct6.Fl_O\",\n \"hpc.Fl_O\",\n \"bld3.Fl_O\",\n \"burner.Fl_O\",\n \"hpt.Fl_O\",\n \"duct11.Fl_O\",\n \"lpt.Fl_O\",\n \"duct13.Fl_O\",\n \"core_nozz.Fl_O\",\n \"byp_bld.Fl_O\",\n \"duct15.Fl_O\",\n \"byp_nozz.Fl_O\",\n ]\n fs_full_names = [f\"{pt}.{fs}\" for fs in fs_names]\n pyc.print_flow_station(prob, fs_full_names, file=file)\n\n comp_names = [\"fan\", \"lpc\", \"hpc\"]\n comp_full_names = [f\"{pt}.{c}\" for c in comp_names]\n pyc.print_compressor(prob, comp_full_names, file=file)\n\n pyc.print_burner(prob, [f\"{pt}.burner\"], file=file)\n\n turb_names = [\"hpt\", \"lpt\"]\n turb_full_names = [f\"{pt}.{t}\" for t in turb_names]\n pyc.print_turbine(prob, turb_full_names, file=file)\n\n noz_names = [\"core_nozz\", \"byp_nozz\"]\n noz_full_names = [f\"{pt}.{n}\" for n in noz_names]\n pyc.print_nozzle(prob, noz_full_names, file=file)\n\n shaft_names = [\"hp_shaft\", \"lp_shaft\"]\n shaft_full_names = [f\"{pt}.{s}\" for s in shaft_names]\n pyc.print_shaft(prob, shaft_full_names, file=file)\n\n bleed_names = [\"hpc\", \"bld3\", \"byp_bld\"]\n bleed_full_names = [f\"{pt}.{b}\" for b in bleed_names]\n pyc.print_bleed(prob, bleed_full_names, file=file)", "def displayInfo(self):\n # clear stdout for a smoother display\n # os.system('cls' if os.name=='nt' else 'clear')\n\n #print(\"=========== Status ============\")\n # print(\n # \"speed: \" + str(self.speed) +\n # \"\\nangle: \" + str(self.steering_angle) +\n # \"\\nsign: \" + str(self.detected_sign) +\n # \"\\nlane lines: \" + str(self.lane_lines) +\n # \"\\nintersection line flag: \" + str(self.intersection_line) +\n # \"\\ncurrent state label: \" + str(self.currentStateLabel) +\n # \"\\ncurrent states: \" + str(self.currentState)\n #)", "def print_info(self):\n\n print \"\\nALGORITHM INFO\"\n print \"modelnumber:\", self.modelnumber\n print \"restart:\", self.restart\n print \"particles:\", self.particles\n print \"beta:\", self.beta\n print \"dt:\", self.dt\n if self.mode != 1:\n if len(self.final_epsilon) == 0:\n print \"manual epsilon:\"\n for i in range(self.epsilon.shape[0]):\n print \"\\t\",\n for j in range(self.epsilon.shape[1]):\n print \"\", self.epsilon[i, j],\n print \"\"\n else:\n print \"auto epsilon:\"\n print \"\\t\", self.final_epsilon\n print \"\\talpha:\", self.alpha\n\n print \"kernel:\", self.kernel\n print \"model kernel:\", self.modelkernel\n print \"model prior:\", self.modelprior\n\n print \"DATA:\"\n print \"\\ttimes:\", self.times\n if self.mode == 0:\n print \"\\tvars:\"\n for i in range(len(self.data[0, :])):\n print \"\\t\",\n for j in range(self.ntimes):\n print \"\", self.data[j, i],\n print \"\"\n\n print \"MODELS:\", self.nmodels\n for i in range(self.nmodels):\n print \"\\t\", \"npar:\", self.nparameters[i]\n print \"\\t\", \"nspecies:\", self.nspecies[i]\n print \"\\t\", \"name:\", self.name[i]\n print \"\\t\", \"source:\", self.source[i]\n print \"\\t\", \"type:\", self.type[i]\n print \"\\t\", \"fit:\", self.fit[i]\n print \"\\t\", \"init:\", self.x0prior[i]\n print \"\\t\", \"prior:\", self.prior[i]\n print \"\\t\", \"logp:\", self.logp[i]\n print \"\\n\"", "def intro():\n print(\" ___ _ _ _ ____ \")\n print(\"|_ _|_ __ __| (_) __ _ _ __ __ _ | | ___ _ __ ___ ___ |___ \\\\ \")\n print(\" | || '_ \\\\ / _` | |/ _` | '_ \\\\ / _` | _ | |/ _ \\\\| '_ \\\\ / _ \\\\/ __| __) |\")\n print(\" | || | | | (_| | | (_| | | | | (_| | | |_| | (_) | | | | __/\\\\__ \\\\ / __/ \")\n print(\"|___|_| |_|\\\\__,_|_|\\\\__,_|_| |_|\\\\__,_| \\\\___/ \\\\___/|_| |_|\\\\___||___/ |_____|\")\n print('and his Great Python Adventure'.center(80))\n print()", "def printing():\r\n document.add_heading('Printing Service details', 1)\r\n\r\n printing_metrics = ['customproperties',\r\n 'workingSetSizeHiPct',\r\n 'logVerbosityAuditActivity',\r\n 'logVerbosityService',\r\n 'hostname',\r\n 'tags']\r\n\r\n printnodes = get_qlik_sense.get_printing()\r\n num_of_nodes = len(printnodes)\r\n num_of_print_metrics = len(printing_metrics)\r\n table = document.add_table(rows=num_of_print_metrics+1, cols=num_of_nodes+1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Metric'\r\n for item in range(0, num_of_nodes):\r\n row.cells[item+1].text = printnodes[item][6]\r\n for item in range(num_of_print_metrics):\r\n row = table.rows[item+1]\r\n row.cells[0].text = str(printing_metrics[item])\r\n for printnode in range(num_of_nodes):\r\n row.cells[printnode+1].text = str(printnodes[printnode][item])\r\n\r\n document.add_page_break()", "def _display_examples(self):\n\n print(self._usage)\n print(self._examples)", "def display(self):\r\n\t\ts = self.options['space']\r\n\t\tv = self.level\r\n\t\tp = self.options['sep']\r\n\t\tt = self.options['tab']\r\n\t\tb = self.options['bullet']\r\n\t\tprint(v*t+b+s+self.abbrev+s+p+s+self.text)", "def print_details(self):\n print(\"[{}]\".format(self.name))\n print(\"ID: \" + str(self.id))\n print(\"name: %s\" % self.name)\n print(\"URL: %s\" % self.url)\n print(\"CPUs: \" + str(self.cpus) + \" cores\")\n print(\"Mem: \" + self.memory_str)\n print(\"Tasks: \" + str(self.tasks_len))\n print(\"Uptime %s\" + self.uptime)\n print(\"Uptime Descriptive %s\" + self.uptime_descriptive)\n print(\" \")", "def display_simple(self):\n print(\"\") \n print(\"Date: {}\".format(self.date))\n print(\" Task name: {}\".format(self.task_name))\n print(\" Time spent: {} minutes\".format(self.time_spent))\n print(\" Notes: {}\".format(self.notes))\n print(\" Task number: {}\".format(self.task_number))\n print(\"\")", "def print_params():\n\n help_out = convert_phil_to_text(master_phil, att_level=1)\n txt_out = convert_phil_to_text(master_phil)\n\n return help_out, txt_out", "def print_info(machine_name, connections, width, height, ip_file_filename):\n t = Terminal()\n\n to_print = OrderedDict()\n\n to_print[\"Hostname\"] = t.bright(connections[(0, 0)])\n to_print[\"Width\"] = width\n to_print[\"Height\"] = height\n\n if len(connections) > 1:\n to_print[\"Num boards\"] = len(connections)\n to_print[\"All hostnames\"] = ip_file_filename\n\n to_print[\"Running on\"] = machine_name\n\n print(render_definitions(to_print))\n\n try:\n input(t.dim(\"<Press enter when done>\"))\n except (KeyboardInterrupt, EOFError):\n print(\"\")", "def print(self):\r\n self.print_avec_separateur()", "def print_info(self, show=True, **plot_iso_args):\n print(self)\n return self.plot(show, **plot_iso_args)", "def print_intro(self):\n \n print('Did you know mammals tend to have the shortest migration routes because walking takes more energy than flying or swimming?')", "def printInfoDoc():\n global _modinfo\n print _modinfo\n help(\"ProcUtils\")", "def display(self):\n statement = f\"\"\"\n ------\n By {self.prescribed_by.name.upper()}\n ------\n Patient Detail!\n Name: {self.prescribed_to.name.capitalize()}\n Age: {self.prescribed_to.age}\n Gender: {self.prescribed_to.gender}\n Prescribed Medicines!\"\"\"\n print(statement)\n self.display_cure()", "def print_help(self):\n print self.get_help()", "def print_content(self):\n if self.piece!=None:\n print('%s : %s %s' % (self.name, self.piece.color, self.piece.piece_type))\n else:\n print('%s : empty' % (self.name))", "def print_header():\n print('------------------------------------')\n print(' Lesson04')\n print(' Kata Fourteen Assignment')\n print('------------------------------------\\n')", "def printInfo():\n utils = CONFIG['utils']\n mytime = utils.mytime()\n logIt(\"Todays date: \" + mytime + \"\\n\")\n logIt(\" Number is: \" + str(CONFIG['number']) + \"\\n\")\n logIt(\" Host is: \" + str(CONFIG['host']) + \"\\n\")\n logIt(\" Port is: \" + str(CONFIG['port']) + \"\\n\")\n logIt(\" Log file is: \" + str(CONFIG['logfile']) + \"\\n\")\n logIt(\" Stdout flag is: \" + str(CONFIG['stdout']) + \"\\n\")\n logIt(\" Debug flag is: \" + str(CONFIG['debug']) + \"\\n\")", "def print_help(self):\r\n\r\n print (\"\"\"Show data values for assignment.\r\n\r\nUsage:\r\n cat <request or table path>\r\n cat --id <assignment_id> #Where assignment_id provided by 'vers <table path>' command\r\n\r\nFormatting flags:\r\n\r\n -c or --comments - Show comments on/off\r\n -nc or --no-comments\r\n\r\n -ph or --horizontal - Print table horizontally\r\n -pa or --vertical - Print table vertically\r\n (If no '--horizontal' or '--vertical' flag is given, the layout of table is determined automatically:\r\n vertical layout if table has only 1 row and more than 3 columns, horizontal otherwise)\r\n\r\n -b or --borders - Switch show borders on of off\r\n -nb or --no-borders\r\n\r\n -h or --header - Show header on/off\r\n -nh or --no-header\r\n\r\n -t or --time - Show time\r\n -nt or --no-time\r\n\r\nExamples:\r\n > cat /test/test_vars/test_table #print latest data for test_table\r\n > cat /test/test_vars/test_table::subtest #print latest data in subtest variation\r\n > cat /test/test_vars/test_table:::2012-08 #print data latest for august 2012\r\n\r\nSee also 'dump' command which is 'cat' formatted to save data to files. 'help dump'\r\n\r\n \"\"\")", "def printHelp(self,):\n print man\n return 0", "def print_object_details(obj: object) -> None:\n print_section(obj, 'Type', print_type)\n print_section(obj, 'Documentation', print_documentation)\n print_section(obj, 'Attributes', print_attributes)\n print_section(obj, 'Methods', print_methods)\n print_section_delimiter()", "def print_help():\n\tprint(\"Help text\")", "def about(display=True):\n\n ABOUT_TEXT = \"\"\"\nPre-release version %s (%s) of Topographica; an updated\nversion may be available from topographica.org.\n\nThis program is free, open-source software available under the BSD\nlicense (http://www.opensource.org/licenses/bsd-license.php).\n\"\"\"%(release,version)\n if display:\n print ABOUT_TEXT\n else:\n return ABOUT_TEXT", "def print_help():\n\n print(\"Mailroom Usage: <name>:add a donor and donation h:help l:list\"\n \"donors r:print report q:quit\")", "def print_standout(info):\n sys.stdout.write(\"Info: %s\" % info)\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()", "def print_info(self):\r\n self.system.print_to_log(\r\n f\"{self.__class__.__name__} model: Infection probability: {self.p}, Infectious period: {self.i}, Recovery period: {self.r}.\")", "def help(self):\n\t\tself.usage()\n\t\tprint \"\\tscreen - XML screen file\"\n\t\tprint \"\\troll - roll name\"\n\t\tsys.exit(0)", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def _repr_pretty_(self, pp, cycle):\n try:\n status_text = self.format_status_info(self.status_info())\n except Exception:\n status_text = (f'{self}: Error showing status information. '\n 'Check IOC connection and device health.')\n logger.debug(status_text, exc_info=True)\n raise\n pp.text(status_text)", "def print_actions_help():\n print(\\\n'''\\n\nTools for handling SELAFIN files and TELEMAC binary related in python\\n\nP ossible actions:\\n\n scan will print information about the SELAFIN, such as variables,\n their vales etc.\n spec will print information about a spectral file (also SELAFIN),\n such as frequencies, periodes, etc.\n chop will chop a SELAFIN given a new set of time range and step (but\n alter is better)\n alter will alter a SELAFIN file, choping or modifying time,\n converting its coordinates, extracting variables, etc.\n merge will merge two files together, whether they are continuous\n simulations (same variables) or putting variables together\n (same time definition)\n subdivide will subdivide a mesh by one iteration (splitting all triangles\n in four others)\n ''')", "def info(self):\n\n print(\"pupil file =\", self.pupil_file)\n print(\"phase file =\", self.phase_file)\n print(\"wavelengths and weights =\")\n for i in range(len(self.filter[0])):\n print(\" %10.5f %6.4f\" % (self.filter[0][i], self.filter[1][i]))\n print(\"pupil diameter (meters) =\", self.D)\n if self.oversample == 2:\n print(\"oversampling factor = 2 (Nyquist sampling)\")\n else:\n r = float(self.oversample) / 2.\n print(\"oversampling factor = %d (%g * Nyquist sampling)\" % \\\n (self.oversample, r))\n if self.type == SINGLE_PREC:\n print(\"computations will use single precision\")\n else:\n print(\"computations will use double precision\")\n print(\"size of output image =\", self.output_size)\n if self.cdelt is not None:\n print(\"output pixel size (arcsec) =\", self.cdelt / ARCSECtoDEGREES)\n if self.output_written:\n print(\"The computed PSF has been written to the output file.\")\n else:\n print(\"The output file has not been written yet.\")", "def output(self):\n print \"Name:\", self.name\n print \"City:\", self.city\n print \"Country:\", self.country\n print \"Number of Reviews:\", len(self.sentiments)\n print \"Old Reviews (Stars):\", self.stars_avg\n print \"Old Reviews (%):\", self.stars_avg/5\n print \"New Rating (Stars)\", self.new_rating*5\n print \"New Rating (%):\", self.new_rating", "def print_result_info(self,result,filename):\n print ('File: %s' % filename)\n print ('Desc: %s' % result.description)\n print ('Version: %s' % result.version)\n print ('Arch: %s' % result.arch)\n print ('Platform: %s' % result.platform)\n print ('CPU: %s' % result.cpuarch)\n if hasattr(result,'sequence'):\n print ('Sequence: %s' % result.sequence)\n print ('Person: %s (%s)' % (result.person_name,result.person_id))\n result.print_summary()\n print('')", "def show_completed_design(completed_design):\n print(\"\\nThe following models have been printed:\")\n for completed_designs in completed_design:\n print(completed_designs)", "def command_show(problem):\r\n print problem.get_html()", "def show_info(self):\n print(\"Problem number: \" + str(self.number))\n print(\"Problem name: \" + str(self.name))\n print(\"Problem description: \" + str(self.desc))", "def print_all(self) -> None:\n\n print(\"title: \" + str(self.title))\n print(\"simple_title: \" + str(self.simple_title))\n print(\"info: \" + str(self.info))\n print(\"exists: \" + str(self.exists))\n print(\"categories: \" + str(self.categories))\n print(\"content: \" + str(self.content))", "def show(self, options=None):\n\n # # IMPLEMENTATION NOTE: Stub for implementing options:\n # if options and self.InspectOptions.ALL_OUTPUT_LABELS in options:\n # pass\n\n print (\"\\n---------------------------------------------------------\")\n print (\"\\n{}\\n\".format(self.name))\n\n print (\"\\tLearning enabled: {}\".format(self._learning_enabled))\n\n # print (\"\\n\\tMechanisms:\")\n # for mech_name in self.mechanismNames:\n # print (\"\\t\\t{}\".format(mech_name))\n\n print (\"\\n\\tMechanisms:\")\n for mech_tuple in self._mech_tuples:\n print (\"\\t\\t{} (phase: {})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n\n\n print (\"\\n\\tOrigin mechanism: \".format(self.name))\n for mech_tuple in self.originMechanisms.mech_tuples_sorted:\n print(\"\\t\\t{} (phase: {})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n\n print (\"\\n\\tTerminal mechanism: \".format(self.name))\n for mech_tuple in self.terminalMechanisms.mech_tuples_sorted:\n print(\"\\t\\t{} (phase: {})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n for output_state_name in mech_tuple.mechanism.outputStates:\n print(\"\\t\\t\\t{0}\".format(output_state_name))\n\n print (\"\\n---------------------------------------------------------\")", "def display_single_actor(actor: dict):\n actor_name = \" \".join([n.title() for n in actor[\"name\"].split(\" \")])\n print(colored(figlet_format(actor_name, font=\"chunky\", width=220), \"magenta\"))\n # First and Last activity dates\n actor_activity_dates(actor)\n if \"known_as\" in actor:\n aka = \", \".join(list(actor['known_as'].split(',')))\n aka = f\"{chunk_long_description(aka)}\"\n aka = f\"{colored('Otherwise known as', attrs=['bold', 'underline'])}\\n{aka}\"\n print(f\"{aka}\\n\")\n if actor[\"description\"]:\n print(colored(\"Adversary description\", attrs=[\"bold\", \"underline\"]))\n print(f\"{chunk_long_description(actor['description'])}\\n\")\n # Actor capability, origin and type\n actor_type_and_capability(actor)\n # Motivations\n simple_list_display(\"motivations\", actor, \"Motivations\")\n # Objectives\n simple_list_display(\"objectives\", actor, \"Objectives\")\n # Capabilities\n simple_list_display(\"capabilities\", actor, \"Capabilities\")\n # Target Regions\n simple_list_display(\"target_regions\", actor, \"Targeted regions\")\n # Target Countries\n large_list_display(\"target_countries\", actor, \"Targeted countries\")\n # Target industries\n large_list_display(\"target_industries\", actor, \"Targeted industries\")\n # Kill chain\n if \"kill_chain\" in actor:\n chain = actor[\"kill_chain\"]\n print(colored(\"Tactics, Techniques and Procedures\", attrs=[\"bold\", \"underline\"]))\n for key, val in chain.items():\n if \"rich_text_\" not in key:\n if val[:3] != \"\\r\\n\\t\" and val[:3] != \"CVE\":\n val = \"\\r\\n\\t\" + val\n key = \" \".join([k.title() for k in key.split(\"_\")]).replace(\"And\", \"and\")\n print(f\"{bold(key)}: {chunk_long_description(val, 100)}\\n\")\n # eCrime Kill chain\n if \"ecrime_kill_chain\" in actor:\n ekc = actor[\"ecrime_kill_chain\"]\n print(colored(\"ECrime Tactics, Techniques and Procedures\", attrs=[\"underline\"]))\n for key, val in ekc.items():\n if \"rich_text_\" not in key and val:\n key = \" \".join([k.title() for k in key.split(\"_\")]).replace(\"And\", \"and\")\n print(f\"{bold(key)}: {val}\")", "def print_header(self):\n print(\"Running {} simulations.\".format(self.num_simulations))\n print(\"{0:2}% bias for men\".format(self.promotion_bias))\n print(\"{0:2} promotion cycles\".format(self.iterations_per_simulation))\n print(\"{0:2}% attrition rate\".format(self.attrition))\n print", "def _print(self):\n print('center :', self.center, ' widht : ', self.width, ' height : ', self.height, ' heat : ', self.heat,\n ' speed ', self.speed)", "def print_info(task_state, video_state):\n os.system('clear')\n\n # instructions\n blue_bg('\\n Instructions ')\n orange_fg('\\u21e6 / \\u21e8:\\t', '1 frame back/forward')\n orange_fg('\\u21e9 / \\u21e7:\\t', '10 frame back/forward')\n orange_fg('< / >:\\t', '100 frame back/forward')\n orange_fg('[ / ]:\\t', 'Previous/next task/video')\n orange_fg('Esc:\\t', 'Exit')\n orange_fg('0-9:\\t', 'Action ID')\n orange_fg('t / i:\\t', '[User Input] Jump to Task/Image ID')\n orange_fg('Space:\\t', 'Toggle text color')\n orange_fg('Tab:\\t', 'Toggle lookahead mode')\n red_fg('Note:\\t', '(a) Select image as active window (b) Turn off Caps Lock (c) Do not press shift key')\n\n # state information\n blue_bg('\\n State ')\n orange_fg('Video ID: ', '{}\\t'.format(task_state.tasks[task_state.task_idx]), newline=False)\n orange_fg('Frame ID: ', '{}'.format(video_state.get_image_name()))\n orange_fg('Image ID: ', '{}/{}'.format(video_state.image_idx + 1, video_state.num_frames))\n orange_fg('Action ID: ', video_state.get_image_label())\n\n # action dictionary and key mapping\n blue_bg('\\n Actions List ')\n for a, action in enumerate(task_state.actions):\n orange_fg('Action {}: '.format(a + 1), action)\n\n # annotations\n blue_bg('\\n Actions Record ')\n for frame_idx, (f, a) in enumerate(video_state.labels.items()):\n orange_fg('Label {}: '.format(frame_idx + 1), '{} --> {}'.format(f, a))", "def display_cli(conversations, alt_speaker, human_speaker):\n for speaker, speech in conversations:\n if speaker == END_OF_CONVO:\n print(\"-\" * 20 + \"END OF CONVERSATION\" + \"-\" * 20)\n elif speaker == alt_speaker:\n print(\"%-15s: %s\" % (speaker[:15], speech))\n else:\n prBlueBG(\"%-15s: %s\" % (speaker[:15], speech))", "def display_section(name):\n assert all((GENERAL, TRAINING, DETECTION, EVALUATION))\n section_frame = pd.DataFrame(eval(name)).T.fillna('-')\n section_frame['flags'] = section_frame.index.values\n section_frame['flags'] = section_frame['flags'].apply(lambda c: f'--{c}')\n section_frame = section_frame.reset_index(drop=True).set_index('flags')\n print(f'\\n{name.title()}\\n')\n print(\n section_frame[\n [\n column_name\n for column_name in ('help', 'required', 'default')\n if column_name in section_frame.columns\n ]\n ].to_markdown()\n )", "def showHelp(self):\n\t\tfor i in range(0,20):\n\t\t\tprint \"\"\n\t\tprint \" _ _ \"\n\t\tprint \"| | | | \"\n\t\tprint \"| |__ _ __ ___ | |__ \"\n\t\tprint \"| '_ \\ | '_ \\ / __|| '_ \\ \"\n\t\tprint \"| | | || | | |\\__ \\| | | |\"\n\t\tprint \"|_| |_||_| |_||___/|_| |_|\"\n\t\tprint \"A program by Scott Jackson\"\n\t\tprint \"\"\n\t\tprint \"To enter a command, type the key and press Return.\"\n\t\tprint \"NB: parentheses indicate which of two options is the default.\"\n\t\tprint \"\"\n\t\tprint \"Basic Commands:\"\n\t\tprint \"j / k -- show lower-ranked / higher-ranked stories.\"\n\t\tprint \"r -- get the latest stories from Hacker News.\"\n\t\tprint \"q -- quit.\"\n\t\tprint \"# -- open story number # in your web browser.\"\n\t\tprint \"c# -- open comments for story number # in your web browser.\"\n\t\tprint \"#+ -- open up story number # AND its comments in your web browser.\"\n\t\tprint \"top / new -- switch between showing the top and newest stories on HN. (top)\"\n\t\tprint \"c / e -- collapse stories you've already read / don't collapse them. (e)\"\n\t\tprint \"u -- update hnsh to the latest version.\"\n\t\tprint \"==========================\"\n\t\tprint \"For more commands, see the man.txt file.\"\n\t\tinput = raw_input(\"Press Return to go back to the Hacker News stories.\")", "def print_usage():\n print 'USAGE: %s [options]' % os.path.abspath(__file__)\n print 'EXAMPLE1: %s # FOR DEFAULTS' % os.path.abspath(__file__)\n print 'EXAMPLE2: %s 121f03=tweek hirap=towelie details=False # TWO SMALL SETS' % os.path.abspath(__file__)\n print 'EXAMPLE3: %s 121f03=tweek details=True # ONE DETAILED SET' % os.path.abspath(__file__)\n print 'EXAMPLE4: %s details=True # SHOWS MAX INFO' % os.path.abspath(__file__)", "def printInfo(self):\n print(\"Generating %s with the following info:\" % self.args.dest)\n print(\"From: %s\" % self.srcdir)\n print(\"To: %s\" % self.desdir)\n print(\"Template: %s\" % self.args.tmpl)\n print(\"Author: %s\" % self.args.author)\n print(\"Version: %s\" % self.args.ver)\n print(\"Date: %s\" % self.args.date)\n print(\"\\n\")", "def print_usage():\n leader = \" \"\n print(\"\\n Usage: scanning [-v|-c|-k=N] SOURCE PAPER SCALE COLOR [basename]\\n\")\n print(\" SOURCE Paper source:\")\n print_option_set(scan_core.SOURCES, leader)\n print(\" PAPER Paper size:\")\n print_option_set(scan_core.PAPERS, leader)\n print(\" SCALE Scaling factor:\")\n print_option_set(scan_core.SCALES, leader)\n print(\" COLOR Colour mode:\")\n print_option_set(scan_core.COLORS, leader)\n print(\" basename Desired base filename, optionally including path\")\n print(\" -v View each scan when conversion is complete\")\n print(\" -c Confirm each scan before saving in final location\")\n print(\" -d Print the scanning a conversion commands used for debugging\")\n print(\" -k=N Do not convert page N of scan\\n\")\n print(\"SCANNING Script (c)2010 Jody Sankey\")\n version = sys.version_info\n print(\"Currently running in Python v{}.{}.{}\\n\".format(*version))\n sys.exit()", "def display(self):\n print(\"{}, {}\".format(self.label, self.params))", "def _intro():\n # TODO: should we print to stderr ?\n print(logo)\n print(__version__)", "def print_help():\n parser = parsersetup()\n parser.print_help()", "def show_myhero(self):\n description = (self.name + ' Level is: ' + str(self.level) + ' Age is: ' + str(\n self.age) + ' Rank is: ' + self.rank + ' health is: ' + str(self.health) + ' magic is: ' + str(self.__magic)).title()\n print(description)", "def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])", "def show():\n info(str(Project))", "def info(self):\n self.update_info()\n print('Number of electrodes: ' + str(self.n_elecs))\n print('Recording time in seconds: ' + str(self.dur))\n print('Sample Rate in Hz: '+ str(self.sample_rate))\n print('Number of sessions: ' + str(self.n_sessions))\n print('Date created: ' + str(self.date_created))\n print('Meta data: ' + str(self.meta))", "def display_get_game():\n title = input(\"Please give me a title searched game: \")\n info_about_game = reports.get_game(filename, title)\n print(\"Properties of the game: {}\\n\".format(info_about_game))", "def pr(x):\n Card.print_pretty_cards(x)", "def Banner():\n main_banner = pyfiglet.figlet_format(\" UTM NAT\", font = \"slant\")\n sub_banner1 = pyfiglet.figlet_format(\"tool\", font = \"isometric1\")\n sub_banner2 = \" -Generate a CSV file of Sophos UTM NAT statements-\"\n sub_banner3 = \" via REST API using the power of Python\"\n\n print()\n print('=' * 62)\n print(main_banner)\n print(sub_banner1)\n print()\n print(sub_banner2)\n print(sub_banner3)\n print()\n print('=' * 62)\n print()", "def show(what):\n global program, simulator\n try:\n if \"breakpoints\".find(what) == 0 and simulator is not None:\n for(id, h, s) in simulator.get_breakpoints():\n print id, \" : hits={} {}\".format(h, s)\n elif \"assumptions\".find(what) == 0 and simulator is not None:\n for(g, l, expr) in simulator.get_assumptions():\n if l == 0:\n print \"0x{:x} : {}\".format(g, expr)\n else:\n print \"(0x{:x},{}) : {}\".format(g, l, expr)\n elif \"pc\".find(what) == 0:\n print \"0x{:x}\".format(pc())\n elif \"mppc\".find(what) == 0:\n print \"0x{:x}\".format(mppc())\n elif \"hooks\".find(what) == 0:\n for hf in sorted(hooks.keys()):\n print \"hooks for function\", hf.__name__\n index = 0\n for h in hooks[hf]:\n if h.__name__ is not None:\n if h.__name__.find(\"__\") == 0: # internal hook\n continue\n desc = h.__name__\n else:\n desc = str(h)\n print \"{:2d} : {}\".format(index, desc)\n index += 1\n if index == 0:\n print \"there is no hook\"\n except:\n simulation_error()", "def print_banner(self):\n print \":##::::'##::'#######::'########:::::::::::::::'###::::'########::'####:\\n\\\n:###::'###:'##.... ##: ##.....::::'##::::::::'## ##::: ##.... ##:. ##::\\n\\\n:####'####: ##:::: ##: ##::::::::: ##:::::::'##:. ##:: ##:::: ##:: ##::\\n\\\n:## ### ##: ##:::: ##: ######:::'######::::'##:::. ##: ########::: ##::\\n\\\n:##. #: ##: ##:::: ##: ##...::::.. ##.::::: #########: ##.....:::: ##::\\n\\\n:##:.:: ##: ##:::: ##: ##::::::::: ##:::::: ##.... ##: ##::::::::: ##::\\n\\\n:##:::: ##:. #######:: ##:::::::::..::::::: ##:::: ##: ##::::::::'####:\\n\\\n:..:::::..:::.......:::..:::::::::::::::::::..:::::..::..:::::::::....:\"", "def print(self):\n print('Name:', self.name)\n print('Camera:', self.camera)\n print('Memory:', self.memory)\n print('Ram:', self.ram)\n print('Price:', self.price)\n print('Image:', self.image)", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def __repr__(self):\n (sections, section_titles) = self._get_summary_struct()\n return _toolkit_repr_print(self, sections, section_titles, width=30)", "def show_info(self):\n x = self.x0 + self.m * self.dx + 20\n y = self.y0\n self.draw_text2('t={:.2f}'.format(self.t - self.t0), x, y)\n self.draw_text2('frame={}'.format(self.frame))\n self.draw_text2('fps={:.1f}'.format(self.frame/(self.t - self.t0)))\n self.draw_text2('pos={}'.format(self.pos))\n self.draw_text2('dir={}'.format(self.dir))\n self.draw_text2('type={}'.format(self.event_type))\n self.draw_text2('name={}'.format(self.event_name))\n self.draw_text2('key={}'.format(self.event_key))\n self.draw_text2('mod={}'.format(self.event_mod))\n self.draw_text2('unicode={}'.format(self.event_unicode))", "def print_info(fn, **kwargs):\n print('\\n==Common info==')\n print('File:', fn)\n print('Artist:', kwargs['a'])\n print('Title:', kwargs['t'])\n print('Album:', kwargs['b'])\n print('Year:', kwargs['y'])\n print('Genre:', kwargs['g'])\n print('Track №:', kwargs['r'])\n\n if fn.upper().endswith('MP3'):\n from mutagen.mp3 import MP3\n i = MP3(fn).info\n print('\\n==MP3 info==')\n print('Length:', int(i.length//60), 'm', round(i.length % 60), 's')\n print('Channels:', i.channels)\n print(i.bitrate_mode)\n print('Bitrate:', i.bitrate//1000)\n print('Sample rate:', i.sample_rate)\n print('Track gain:', i.track_gain)\n print('Track peak:', i.track_peak)\n print('Album gain:', i.album_gain)\n print('Encoder info:', i.encoder_info)\n print('Encoder settings:', i.encoder_settings)\n print('Version:', i.version)\n print('Layer:', i.layer)\n print('Mode:', i.mode)", "def display(config, transfo, learner, *args):\n\n stderr.write(\"Config is %s\\n\" % str(config))\n stderr.write(\"Transfo is %s\\n\" % str(ktpipes.KtPipe.from_json(config[transfo])))\n stderr.write(\"Learner is %s\\n\" % str(learner))", "def help_util():\r\n for cmd, f in COMMANDS.items():\r\n print(\"POM \" + cmd + \":\")\r\n print(f.__doc__.lstrip(\"\\n\"))", "def printPokemon():\n print(\" _ \")\n print(\" _ __ ___ | | _____ _ __ ___ ___ _ __ \")\n print(\" | '_ \\ / _ \\| |/ / _ \\ '_ ` _ \\ / _ \\| '_ \\ \")\n print(\" | |_) | (_) | < __/ | | | | | (_) | | | |\")\n print(\" | .__/ \\___/|_|\\_\\___|_| |_| |_|\\___/|_| |_|\")\n print(\" |_| \")", "def show_game_mission():\n print_bold(\"Misija:\")\n print(\"\\tOdaberi kućicu u kojoj se Talion može odmoriti ...\")\n print_bold(\"SAVJET:\")\n print(\"PAZI kako biraš jer neprijatelji su blizu!\")\n print_dotted_line()", "def drawDescription(self):\n print(\"\\nPress the following keys to run the features of the GoPiGo3.\")\n print(\"To move the motors, make sure you have a fresh set of batteries powering the GoPiGo3.\\n\")", "def display(self):\n print(\n f'\\t\\t {self.name.upper()} {self.potency[0]}{self.potency[1]}\\t\\t'\n f' {self.dose_qty[0]} {self.dose_qty[1]} {self.dose[0]} {self.dose[1].upper()}')" ]
[ "0.6639509", "0.6288107", "0.6238363", "0.62220323", "0.6204376", "0.6180137", "0.6151986", "0.61489", "0.6128701", "0.6115834", "0.6007176", "0.59872526", "0.59838665", "0.596641", "0.59656215", "0.59383917", "0.59287095", "0.5924039", "0.590953", "0.5906681", "0.5902103", "0.588743", "0.58825", "0.5878527", "0.58744395", "0.58607125", "0.5849076", "0.5835893", "0.582305", "0.5818587", "0.5782072", "0.5769248", "0.57612723", "0.5757334", "0.5755148", "0.57509905", "0.5749876", "0.57369274", "0.57308584", "0.57269704", "0.5720482", "0.5704222", "0.5699022", "0.5698274", "0.56971586", "0.569547", "0.56867576", "0.568381", "0.5680802", "0.56770355", "0.56716937", "0.56696254", "0.56643033", "0.56593496", "0.56588453", "0.5656755", "0.5653434", "0.5645816", "0.56375194", "0.563415", "0.56226736", "0.5619263", "0.5619246", "0.56186616", "0.56036115", "0.5603531", "0.5599429", "0.55988926", "0.55985576", "0.55921316", "0.55906767", "0.55871075", "0.5584856", "0.5581983", "0.558047", "0.55748695", "0.5571358", "0.55706", "0.5563595", "0.5558233", "0.5557672", "0.5556697", "0.5534227", "0.5529449", "0.5522169", "0.55178005", "0.55169004", "0.5513251", "0.5512937", "0.55128866", "0.5505143", "0.55036336", "0.54989517", "0.54949653", "0.54920834", "0.549158", "0.5469149", "0.5468105", "0.54636216", "0.54517186" ]
0.7133029
0
Count the amount of text in each slide.
def get_word_counts(slides) -> List[int]: word_count = [] for slide in slides: # print(f"========== slide {len(text_count)+1} ========== [{slide.slide_layout.name}]") words = 0 # find all text for shape in slide.shapes: if not shape.has_text_frame: continue # print(shape.name) for paragraph in shape.text_frame.paragraphs: for run in paragraph.runs: # print(" " + run.text) words += len(run.text.split()) word_count.append(words) return word_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_slide_analytics_new(slides) -> List[int]:\n word_count = []\n for slide in slides:\n print(slide)\n words = 0\n for shape in slide.shapes:\n if not shape.has_text_frame:\n continue\n print(shape.name)\n for paragraph in shape.text_frame.paragraphs:\n for run in paragraph.runs:\n print(\" \" + run.text)\n words += len(run.text.split())\n word_count.append(words)\n return word_count", "def count_passages(self, step, count):\r\n count = int(count)\r\n assert_equals(len(world.css_find('.annotatable-span')), count)\r\n assert_equals(len(world.css_find('.annotatable-span.highlight')), count)\r\n assert_equals(len(world.css_find('.annotatable-span.highlight-yellow')), count)", "def total_slides(path):\n # print(\"CALLING.. total_slides\")\n prs = Presentation(path)\n tot_slides = len(prs.slides._sldIdLst)\n return tot_slides", "def count(text):\n return len(text)", "def paragraph_count(self, doc):\n\n paragraphs = doc.split(\"\\n\\n\")\n # remove the empty string\n return len([paragraph for paragraph in paragraphs if paragraph])", "def words(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words = number_of_words + len(list(i.text.split()))\n return number_of_words", "def count_paragraphs(all_articles):\n total_paragraphs = 0\n for title in all_articles:\n total_paragraphs += all_articles[title]['content'].count('\\n')\n print(f\"There are {total_paragraphs} paragraphs written.\")", "def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)", "def paragraphs(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}')+1)]\n number_of_paragraphs = len(list(root.iter(root_tag + 'p')))\n return number_of_paragraphs", "def total_words(self):\n return len(strip_tags('%s %s' % (self.lead, self.content)).split())", "def count():", "def count_words(all_articles):\n total_words = 0\n for title in all_articles:\n total_words += all_articles[title]['word-count']\n print(f\"There are {total_words} words written.\")", "def text_count(self, text):\n res = 0\n for intv in self:\n if intv._text == text:\n res += 1\n return res", "def get_about_count_results(soup):\n title = soup.find('div', {'id': 'gs_ab_md'})\n if title:\n title = title.find('div', {'class': 'gs_ab_mdw'})\n if title:\n count_papers = title.text\n if count_papers:\n count_papers = count_papers.split(' ')[1].replace(',', '')\n else:\n count_papers = len(soup.find_all('h3', class_=\"gs_rt\"))\n try:\n int(count_papers)\n except:\n count_papers = title.text.split(' ')[0].replace(',', '')\n else:\n count_papers = len(soup.find_all('h3', class_=\"gs_rt\"))\n return int(count_papers)", "def displayed_words(self):\n return (len(strip_tags(self.preview).split()) -\n (len(self.more_string.split()) * int(not bool(self.lead))))", "def sentence_count(self):\n count = 0\n for line in self.lines:\n if '.' in line:\n count += 1\n if count == 0:\n count = 1\n return count\n #return line.count('.')\n #else:\n #return 1", "def counts(self, regex = \"\\w+\"): \n tokenizer = RegexpTokenizer(r'{}'.format(regex))\n count = []\n for i in tqdm(self.text):\n count.append(len(tokenizer.tokenize(i)))\n return count", "def count(self, word):\n pass", "def get_number_of_paragraph(self):\n file_to_read = f'{self.path}/{self.filename}'\n file = open(file_to_read, 'r', encoding='utf-8')\n string_to_match = '<p>'\n count = 0\n for line in file:\n if string_to_match in line:\n count += 1\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_paragraph', count)\n print(datetime.now(), '-', 'number_of_paragraph for', self.filename, 'calculated =', count)\n return None", "def __count_text(text, limit=None):\n\n count = 0\n is_text = True\n for i, c in enumerate(text):\n if is_text and c == '\\33':\n is_text = False\n\n if is_text:\n count += 1\n if limit is not None and count == limit:\n return i + 1\n\n if not is_text and c == 'm':\n is_text = True\n\n if limit is not None:\n return len(text)\n else:\n return count", "def get_number_of_elements(self):\n if self.page.paginator.count < int(self.page.number) * self.page_size:\n show = self.get_shows()\n\n return \"{} - {}\".format(show, self.page.paginator.count)\n else:\n show = self.get_shows()\n return \"{} - {}\".format(show, self.get_page_range())", "def count(app, status):\n item = app.tv.selection()[0]\n\n def count_children(item):\n children = app.tv.get_children(item)\n return len(children) + sum(count_children(child) for child in children)\n\n status.config(text=f'{count_children(item)} descendants')", "def get_counts(self):\n value = self.text_ctrl.GetValue()\n chars = len(value)\n words = len(re.findall('\\w+', value))\n pub.sendMessage('update_counts', chars=chars, words=words)", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def get_lenght(text):\n return range(len(Articles.split(text)))", "def __emphasis(self, title, text):\n title_counts = collections.Counter(title)\n text_counts = collections.Counter(text)\n text_count = 0\n title_count = 0\n exclamatory = ('?', '!')\n for k in exclamatory:\n if title_counts[k] is not None:\n title_count += title_counts[k]\n if text_counts[k] is not None:\n text_count += text_counts[k]\n return text_count, title_count", "def total_exs(dataset):\n total = 0\n for article in dataset['data']:\n for para in article['paragraphs']:\n total += len(para['qas'])\n return total", "def testSectionCount(self):\n\n self.sectionCount(3640)", "def word_count(self):\n return len(self.text)", "def count_sonata(self):\n return self.run_query(\"count( /mediawiki/page[starts-with (title, 'Sonata') ] )\")", "def count_words(filename):", "def count_articles(all_articles):\n print(f\"There are {len(all_articles)} articles.\")", "def render_words_count(request):\n count = 0\n try:\n count = sum([len(d.body.split(None)) for d in Devotional.objects.all()])\n except:\n pass\n\n return render_to_response('devotional/view_word_count.html',\n {'count': count},\n context_instance=RequestContext(request))", "def hives_count(self) -> int:\n return self.hives.count()", "def total_syllables(target_text):\n\n splited_text = target_text.split()\n count = 0\n for word in splited_text:\n count = count + word_syllables(word)\n return count", "def testArticleCount(self):\n\n self.articleCount(17)", "def total_words(target_text):\n\n splited_text = target_text.split()\n nbwords = len(splited_text)\n return nbwords", "def test_counts(self):\n lines, words, chars = analyze_text(self.filename)\n self.assertEqual(lines, 4)\n self.assertEqual(words, 8)\n self.assertEqual(chars, 36)", "def counter(self) -> int:", "def counter(self) -> int:", "def getCount(self):\n return _osgAnimation.Target_getCount(self)", "def count_search_results(self):\n raw_text = self.driver.find_element(*self.HEADING_COUNTER).text\n num = re.findall(r'\\d+', raw_text) \n return int(num[0])", "def sections(self) -> int:\n return len(self.string.split(\".\"))", "def count(self):\n # TODO not implemented yet\n return 0", "def count(self):\n\n raise NotImplementedError", "def amount_nouns_and_numerals_spacy(self) -> int:\n #choose language\n if self.lang == 'en':\n lang_for_spacy = 'en_core_web_sm'\n elif self.lang == 'de':\n lang_for_spacy = 'de_core_news_sm'\n elif self.lang == 'fr':\n lang_for_spacy = 'fr_core_news_md'\n nlp = spacy.load(lang_for_spacy)\n doc = nlp(self.sent)\n for word in doc:\n #if the part of speech is a noun, a proper noun or a numeral \n #(only for en) \n if self.lang == 'en':\n if word.pos_ == 'NOUN' or word.pos_ == 'PROPN' or word.pos_ == 'NUM':\n self.amount_nouns_and_num += 1\n elif self.lang == 'de' or self.lang == 'fr':\n if word.pos_ == 'NOUN' or word.pos_ == 'PROPN':\n self.amount_nouns_and_num += 1\n return self.amount_nouns_and_num", "def main_func(sources):\n art_count = 0\n word_count = 0\n for source in sources:\n titles = get_articles(source)\n art_count += len(titles)\n word_count += count_word('trump', titles)\n\n return (word_count, art_count)", "def wordCount(document):\n return float(len(document.split(None)))", "def test_run():\r\n print(count_words(\"cat bat mat cat bat cat\", 3))\r\n print(count_words(\"betty bought a bit of butter but the butter was bitter\", 3))", "def count(self):\n return len(self.find())", "def slider(self):\n\n if self.count >= len(self.txt):\n self.count = -1\n self.text = ''\n self.heading.config(text=self.text)\n\n else:\n self.text = self.text + self.txt[self.count]\n self.heading.config(text=self.text)\n self.count += 1\n\n self.heading.after(100, self.slider)", "def text_cond_count(self, condition):\n res = 0\n for intv in self:\n if condition(intv._text):\n res += 1\n return res", "def count(self):\n return self.ming_cursor.count()", "def count_containers(lines: Lines) -> int:\n rules = parse_rules(lines)\n allowed_containers = containers(\"shiny gold\", rules)\n assert allowed_containers is not None\n return len(allowed_containers) - 1", "def num_words():\n # Load the GT.\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n stats = {\n \"T\": {\"words\": [], \"duration\": []},\n \"P\": {\"words\": [], \"duration\": []},\n \"sess\": {\"words\": [], \"duration\": []},\n }\n\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n stats[\"P\"][\"words\"].append(float(row[\"gt_patient_num_words\"]))\n stats[\"T\"][\"words\"].append(float(row[\"gt_therapist_num_words\"]))\n stats[\"P\"][\"duration\"].append(float(row[\"gt_patient_time_spoken\"]))\n stats[\"T\"][\"duration\"].append(\n float(row[\"gt_therapist_time_spoken\"])\n )\n stats[\"sess\"][\"duration\"].append(float(row[\"sess_dur\"]))\n n_words = (\n row[\"gt_therapist_num_words\"] + row[\"gt_patient_num_words\"]\n )\n stats[\"sess\"][\"words\"].append(n_words)\n\n for speaker in stats:\n for metric in stats[speaker]:\n print(f\"------ {speaker} | {metric} ------\")\n print_stats(stats[speaker][metric])", "def count_words(filename):\n try:\n with open(filename) as f_obj:\n contents = f_obj.read()\n except FileNotFoundError:\n msg = \"sorry, \" + filename + \" does not exist\"\n print(msg)\n else:\n words = contents.split()\n num_words = len(words)\n print(\"The words'number is \" + str(num_words))", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def word_count(excerpt):\n # Validate that we are actually give something to work with\n assert excerpt, \"excerpt cannot be blank\"\n return Counter(excerpt.split())", "def letters(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_letters = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_letters = number_of_letters + len([letter for letter in i.text if letter.isalnum()])\n return number_of_letters", "def count_data_points(conversation, parse_text, i, **kwargs):\n data = conversation.temp_dataset.contents['X']\n num_elements = len(data)\n\n parse_op = gen_parse_op_text(conversation)\n\n if len(parse_op) > 0:\n description_text = f\" where <b>{parse_op}</b>\"\n else:\n description_text = \"\"\n\n message = f\"There are <b>{num_elements} items</b> in the data{description_text}.\"\n\n message += \"<br><br>\"\n message += \"Let me know if you want to see their ids.\"\n ids = list(data.index)\n rest_of_text = str(ids)\n conversation.store_followup_desc(rest_of_text)\n return message, 1", "def run_and_get_word_count(self) -> int:\n r = requests.get(self.url)\n if r.status_code != status.HTTP_200_OK:\n raise ScraperException\n soup = BeautifulSoup(r.content, \"html.parser\")\n matches = soup(text=re.compile(f\"{self.word}\"))\n count = 0\n for match in matches:\n words = re.findall(fr\"\\b{self.word}\\b\", match)\n count = count + len(words)\n return count", "def num_articles(self):\n\t\treturn len(index)", "def count_exemplar_words(self):\n valid_exemplars = [_ for _ in self.exemplars if _.validate()]\n\n total_words = 0\n for eg in valid_exemplars:\n eg.n_words = eg.count_words()\n total_words += eg.n_words\n return valid_exemplars, total_words", "def get_num_of_pages(self):", "def num_divs(self):\n return len(self.q(css='div.test').results)", "def count_words_and_dublicates(novel):", "def embedcount(line):\r\n\r\n x_temp = line.count(BOX_CHAR['lu'])\r\n return self.defaults.get('size')-(4*x_temp)", "def word_count(text, word):\n \n #answer\n word_list = text.split(\" \")\n return (word_list.count(word))\n \n #return (text.count(word)) - deoesn't work", "def on_text(self, event):\n self.get_counts()\n self.save()", "def children_num(self,p):\n counter = 0\n for child in self.children(p):\n counter += 1\n return counter", "def __capitals(self, title, text):\n text_words = nltk.word_tokenize(text)\n text_count = 0\n title_count = 0\n for word in text_words:\n if word.isupper():\n text_count += 1\n for word in title.split():\n if word.isupper():\n title_count += 1\n return title_count, text_count", "def get_number_of_words(self):\n filename = f'{self.path}/{self.filename}'\n # word_counter = {}\n # w_cnt = 0\n # x = 0\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n # for word in word_list:\n # w_cnt += 1\n # if word not in word_counter:\n # word_counter[word] = 1\n # else:\n # word_counter[word] = word_counter[word] + 1\n\n # for word in word_list:\n # x += 1\n # print(word, word.isalpha(), x)\n\n w_cnt = sum([a[0].isalpha() for a in word_list])\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_words', w_cnt)\n print(datetime.now(), '-', 'number_of_words for', self.filename, 'calculated =', w_cnt)\n return None", "def npulses(self):\n return self.header.pulse_count", "def __sent_len(self, title, text):\n total = 0\n text_sent = nltk.sent_tokenize(text)\n for sent in text_sent:\n total += len(nltk.word_tokenize(sent))\n return (len(nltk.word_tokenize(title)), total / len(text_sent))", "def count() -> int:\n pass", "def countWords(file_name, start, end):\r\n\r\n with open(file_name, \"r\") as file:\r\n counter_words = 0\r\n\r\n for line in islice(file, start, end):\r\n res = len(line.split())\r\n counter_words += res\r\n\r\n return counter_words", "def get_words(self, article: BeautifulSoup):\n return len(re.findall(r'\\w+', self.get_article_text(article)))", "def count(self, item):\n return _(self._.count(item))", "def test_count_publications(self):\n pass", "def document_count(self):\n raise NotImplementedError", "def sent_count(self):\n count = []\n for i in tqdm(self.text):\n count.append(len(sent_tokenize(i)))\n return count", "def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)", "def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)", "def total_phrases(target_text):\n\n nbphrase = 0\n separators = '.!?;'\n for char in target_text:\n if char in separators:\n nbphrase = nbphrase + 1\n return nbphrase", "def count_word(doc):\n count = count = 0\n for w in document.split(\" \"):\n count = count + 1\n return count", "def b_count_test(self):\n \t \n\tsel = self.selenium\n test = \"Test B - Count Articles, Titles, Headings, Etc.\"\n print test\n \n headers = sel.get_css_count(\"css=\" + CSS[1])\n images = sel.get_css_count(\"css=\" + CSS[2])\n authors = sel.get_css_count(\"css=\" + CSS[3])\n\tdots = sel.get_css_count(\"css=\" + CSS[7]) + sel.get_css_count(\"css=\" + CSS[6])\t\n \n if ((images < 8) or (dots < 8) or (authors < 8) or (headers < 8)):\n print \"Missing articles!\"\n L.log(BROWSERS[x], test, \"FAIL, MISSING CONTENT\", \"Images: \" + str(images) + \" Dots: \" + str(dots) + \" Authors: \" + str(authors) + \" Headers: \" + str(headers)) \n \n\telse:\n\t L.log(BROWSERS[x], test, \"PASS, OK\", \"None\")\n\t \n\t######################################################################## ", "def get_related_list_count(self, heading):\n locator = lex_locators[\"record\"][\"related\"][\"count\"].format(heading)\n count = self.selenium.get_webelement(locator).text\n count = count.replace(\"(\", \"\").replace(\")\", \"\")\n return int(count)", "def get_num_of_images(self):", "def count(self):\n return len(self.names)", "def sample_count(self):", "def amount_nouns_and_numerals_stanford_nlp(self) -> int:\n stanza.download(self.lang, processors = 'tokenize,mwt,pos')\n nlp = stanza.Pipeline(self.lang, processors = 'tokenize,mwt,pos')\n doc = nlp(self.sent)\n for sentence in doc.sentences:\n for word in sentence.words:\n #if the part of speech is a noun, a proper noun or a numeral \n #(only for en) \n if self.lang == 'en':\n if word.upos == 'NOUN' or word.upos == 'PROPN' or word.upos == 'NUM':\n self.amount_nouns_and_num += 1\n elif self.lang == 'de' or self.lang == 'fr':\n if word.upos == 'NOUN' or word.upos == 'PROPN':\n self.amount_nouns_and_num += 1\n return self.amount_nouns_and_num", "def count_words(self, clean_func=clean_up):\n return (\n len(clean_func(self.transcript_file.text()).split())\n if self.validate()\n else 0\n )", "def _text_length(self, text):\n\n if isinstance(text, dict): # {key: value} case\n return len(next(iter(text.values())))\n elif not hasattr(text, '__len__'): # Object has no len() method\n return 1\n elif len(text) == 0 or isinstance(text[0], int): # Empty string or list of ints\n return len(text)\n else:\n return sum([len(t) for t in text]) # Sum of length of individual strings", "def countWords(f, ext):\n\n word_count = 0\n if ext == \".xml\":\n # Parse with lxml\n tree = lxml.etree.parse(f)\n root = tree.getroot()\n # Get the text of all tags.\n if root is not None:\n text = root.xpath(\"//text()\")\n else:\n return word_count\n # Join the text together.\n text = \" \".join(text)\n # Split the text into words.\n words = text.split(\" \")\n # Remove empty strings.\n words = [w for w in words if w != \"\"]\n # Remove words that are just numbers.\n words = [w for w in words if not w.isnumeric()]\n # Remove one-letter words\n words = [w for w in words if len(w) > 1]\n # Count the words\n word_count = len(words)\n # Subtract off the number of child tags from the root.\n word_count = word_count - len(root.getchildren()) - 1\n\n elif ext == \".html\":\n # Parse the file with BeautifulSoup.\n soup = BS(f, \"html.parser\")\n # Get the text of all tags.\n text = soup.get_text()\n # Split the text into words.\n words = text.split(\" \")\n # Remove empty strings.\n words = [w for w in words if w != \"\"]\n # Remove words that are just numbers.\n words = [w for w in words if not w.isnumeric()]\n # Remove one-letter words\n words = [w for w in words if len(w) > 1]\n # Count the words\n word_count = len(words)\n\n elif ext == \".md\" or ext == \".srt\":\n for line in f:\n # Skip blank lines.\n if len(line) == 0 or line == \"\\n\" or line == \"\\r\":\n continue\n # Check for SRT time lines and skip them.\n if re.search(\"\\d\\d --> \\d\\d:\", line):\n continue\n # Skip lines that are just a single number.\n if re.search(\"^\\d+$\", line):\n continue\n # Check for lines that are just times and skip them.\n if re.search(\"^\\d\\d:\\d\\d$\", line):\n continue\n if re.search(\"^\\d\\d:\\d\\d:\\d\\d$\", line):\n continue\n\n raw_words = line.split(\" \")\n reduced_words = []\n for w in raw_words:\n # Don't include 1-character \"words\"\n if len(w) > 1:\n reduced_words.append(w)\n\n # Store filename and count\n word_count += len(reduced_words)\n\n # Sometimes the word count is negative. I don't know why.\n if word_count < 0:\n word_count = 0\n return word_count", "def count_occurrences(article_json, selected_word):\n selected_word = selected_word.lower()\n total_titles = 0 # some rows miss the title field, so not using len()\n selected_word_counter = 0\n for row in article_json:\n if 'title' in row:\n title = row['title']\n total_titles += 1\n for word_in_title in title.lower().split():\n if word_in_title == selected_word:\n selected_word_counter += 1\n return total_titles, selected_word_counter", "def _counter(title_list):\n t = Tokenizer()\n words_count = defaultdict(int)\n words = []\n for title in title_list:\n tokens = t.tokenize(title)\n for token in tokens:\n pos = token.part_of_speech.split(',')[0]\n if pos == '名詞':\n words_count[token.base_form] += 1\n words.append(token.base_form)\n return words_count, words", "def count_words(filename):\n try:\n with open(filename) as f_obj:\n contents = f_obj.read()\n except FileNotFoundError:\n # msg = \"Sorry, the file \" + filename + \" does not exist.\"\n # print(msg)\n pass\n else: \n words = contents.split()\n num_words = len(words)\n print(\"The file \" + filename + \" has about \" + str(num_words) + \" words.\")" ]
[ "0.7645021", "0.699523", "0.6631901", "0.65998024", "0.65277624", "0.65032226", "0.63644123", "0.63634205", "0.6349974", "0.6309825", "0.6171061", "0.6156245", "0.6148901", "0.60785764", "0.60774815", "0.6072669", "0.603562", "0.59676707", "0.59530956", "0.59443533", "0.5926551", "0.59191763", "0.5901001", "0.5897695", "0.5897695", "0.5897695", "0.5897695", "0.58900493", "0.58835316", "0.5869407", "0.5859996", "0.5853128", "0.58055353", "0.5774251", "0.5757163", "0.5752262", "0.57411385", "0.57390624", "0.5738895", "0.57297707", "0.5729508", "0.5727655", "0.5727655", "0.5707323", "0.56938654", "0.56796443", "0.56605667", "0.564855", "0.5640703", "0.56353414", "0.56281257", "0.561886", "0.55845064", "0.5578795", "0.5578319", "0.5553415", "0.5546276", "0.55393434", "0.5537116", "0.55253625", "0.5525328", "0.5519031", "0.5496333", "0.54958844", "0.54911214", "0.5480267", "0.5466869", "0.5466241", "0.54654187", "0.5459029", "0.5456582", "0.54559624", "0.54547644", "0.54506266", "0.5448946", "0.54477507", "0.5446901", "0.5446835", "0.544084", "0.54405236", "0.54342574", "0.5434116", "0.5426969", "0.541459", "0.54023635", "0.54023635", "0.5401984", "0.5394674", "0.5394373", "0.5392787", "0.5386115", "0.5373402", "0.53683203", "0.5364351", "0.53566676", "0.5353533", "0.53531283", "0.53514326", "0.53471255", "0.5347099" ]
0.75697744
1
Calculates a onetofive star ranking for presentations that are not too textheavy.
def calculate_text_stars(word_counts) -> int: if word_counts == []: return 3 words_per_slide = sum(word_counts) / len(word_counts) stars = 5 - abs(words_per_slide - 35) / 8 # print(stars) return max(0, min(5, int(stars + 0.5)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments = map(lambda word: afinn.get(word, 0), words)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n \n else:\n sentiment = 0\n return sentiment", "def compute_readability(text):\n total_words = 0\n total_sentences = 0\n total_syllables = 0\n score = 0\n\n words = text.split()\n total_words = len(text.split()) \n total_sentences = count_sentences(text)\n total_syllables = count_syllables(words)\n \n score = 206.835 - 1.015 * ( total_words / total_sentences) - 84.6 * (total_syllables / total_words)\n if score > 90.00:\n answer = 'Texto de nível do 5º ano do Ensino Fundamental, facilmente compreendido por um aluno de 11 anos.'\n elif score <= 90.00 and score > 80.00:\n answer = 'Texto de nível do 6º ano do Ensino Fundamental, inglês coloquial para consumidores.'\n elif score <= 80.00 and score > 70.00:\n answer = 'Texto de nível do 7º ano do Ensino Fundamental, razoavelmente fácil de ler.'\n elif score <= 70.00 and score > 60.00:\n answer = 'Texto de nível do 9º ano do Ensino Fundamental, Inglês simples compreendido por adolescentes de 13 - 15 anos.'\n elif score <= 60.00 and score > 50.00:\n answer = 'Texto de 1º a 3º ano do Ensino Médio, razoavelmente difícil de ler.'\n elif score <= 50.00 and score > 30.00:\n answer = 'Texto de nível Universitário, difícil de ler.'\n else:\n answer = 'Texto de nível de Graduação, muito difícil de ler e mais bem-compreendido por universitários graduados.'\n \n print('Pontuação Total:', score, answer)", "def score(self, sentence):\n # TODO your code here\n\n # initialize count with trained data\n unigram_count = self.count.copy()\n N = self.total\n\n # make a new key for UNK, add-one later\n for token in sentence:\n if token not in unigram_count:\n unigram_count[token] = 0\n\n # calcutate lopP(<s>) + logP(w1) + logP(w2) + ...\n score = 0.0 # P(<s>) = 1\n V = len(unigram_count) # the number of vocab including UNK\n for word in sentence:\n prob = float((unigram_count[word] + 1) / (N + V)) # c(w) + 1 / N + V\n score += math.log(prob)\n\n return score", "def recommend(self, u):\n\n sims = {} #similarities\n recommendation = \"\"\n topScore = None\n start = time.time()\n for movie_id, rating in enumerate(u):\n if rating != 0:\n sims[movie_id] = {}\n for r_id, movie in enumerate(self.ratings):\n sims[movie_id][r_id] = self.distance(movie,self.ratings[movie_id])\n # print time.time() - start, \"distance time\"\n\n start = time.time()\n for i, movieRating in enumerate(self.ratings):\n iPrediction = 0\n for movieName in self.ratedMovieList:\n j = self.titlesOnly.index(movieName)\n iPrediction += sims[j][i]*1.0 * self.userRatingVector[j]\n if topScore is None or iPrediction > topScore:\n movie = self.titlesOnly[i]\n if movie not in self.ratedMovieList and movie not in self.recommendedMovies:\n # print(\"prediction score for %s is %.5f\" % (movie, iPrediction))\n topScore = iPrediction\n recommendation = movie\n # print time.time() - start, \"recommendation time\"\n self.recommendedMovies.append(recommendation)\n\n articlePattern = re.match('(.*), (the|a|an|el|la)', recommendation)\n if articlePattern is not None:\n recommendation = articlePattern.group(2) + \" \" + articlePattern.group(1)\n\n return recommendation", "def create_text_rank(self):\n # filtered_tokens = self.filter_pos() #if use, replace 2 self.lemma_tokens below\n vocab = self.create_vocab(self.lemma_tokens)\n token_windows = self.create_token_windows(self.lemma_tokens)\n graph = self.create_matrix(vocab, token_windows)\n text_rank = np.array([1] * len(vocab))\n previous_tr = 0\n d = 0.85\n min_difference = 1e-5\n for epoch in range(10):\n text_rank = (1 - d) + d * np.dot(graph, text_rank)\n if abs(previous_tr - sum(text_rank)) < min_difference:\n break\n else:\n previous_tr = sum(text_rank)\n node_weight = {}\n for word in vocab:\n node_weight[word] = text_rank[vocab[word]]\n return node_weight", "def predictRating(self, writtenReview):\n totalScores = [0] * 6\n sentence = writtenReview.split()\n \n for word in sentence:\n if word in self.dictionary:\n wordScores = self.dictionary[word].getTFIDF()\n for i in range(1, len(totalScores)):\n totalScores[i] += wordScores[i]\n\n maxIndex = totalScores.index(max(totalScores))\n if maxIndex == 0:\n return 5\n return maxIndex", "def phrase_scoring_ranking(phrases,model,dataset,bitext):\n e_phrases = []\n f_phrases = []\n count = 0\n f_phrase_count = {}\n e_phrase_count = {} #not needed\n #e_f_pair_count = {} #e words as rows and f words as columns\n f_e_pair_count = {} #e words as rows and f words as columns\n for phrase_set in phrases:\n for phrase in phrase_set:\n e_phrases.append(phrase[3])\n f_phrases.append(phrase[2])\n if phrase[2] in f_phrase_count:\n f_phrase_count[phrase[2]] += 1\n else:\n f_phrase_count[phrase[2]] = 1\n if phrase[2] in f_e_pair_count:\n if phrase[3] in f_e_pair_count[phrase[2]]:\n f_e_pair_count[phrase[2]][phrase[3]] += 1\n else:\n f_e_pair_count[phrase[2]][phrase[3]] = 1\n else:\n f_e_pair_count[phrase[2]]={}\n f_e_pair_count[phrase[2]][phrase[3]] = 1\n\n e_phrases = list(set(e_phrases))\n f_phrases = list(set(f_phrases))\n ep_count = len(e_phrases)\n fp_count = len(f_phrases)\n #pmatrix = np.empty(ep_count*fp_count) # ######Not needed if dictionary is used\n #pmatrix = pmatrix.reshape(ep_count,fp_count)\n #pmatrix.fill(0)\n ef_prob_dict = {}\n for e in e_phrases:\n for f in f_phrases:\n ef_count =count_fe_pair(e,f,f_e_pair_count)# f_e_pair_count[e][f]\n f_count = f_phrase_count[f]\n e_idx = e_phrases.index(e) ###Check the count logic again\n f_idx = f_phrases.index(f)\n pair_prob = ef_count/f_count\n #pmatrix[e_idx][f_idx] = pair_prob\n if f in f_e_pair_count:\n if e in f_e_pair_count[f]:\n if f in ef_prob_dict:\n ef_prob_dict[f][e]=pair_prob\n else:\n ef_prob_dict[f] = {}\n ef_prob_dict[f][e] = pair_prob\n\n #if pmatrix[e_idx][f_idx] != 0:\n # print(e,f,ef_count,f_count,pair_prob)\n return ef_prob_dict", "def _calculate_ranking(self, files_found_by_word: Dict[str, int],\n words: List[str]) -> List[Tuple[str, float]]:\n size_words = len(words)\n words_percentage_hit = [(k, v / size_words) for (k, v) in files_found_by_word.items()]\n return words_percentage_hit", "def analyze_emoji_sentimens(text):\n sum = 0.0\n count = 0\n for character in list(text):\n value = index.get(character, None)\n if value != None:\n sum += value\n count += 1\n if count == 0:\n return 0.0\n\n return sum/count", "def recommendation_ranking(self):\n iu = self.final_recommendation_score_matrix()\n new_iu = []\n for row in iu:\n li = []\n temp = row\n if self.product != \"dist\":\n temp = -np.sort(-temp)\n for element in row:\n li.append(binary_search_opp(temp,element)+1) \n else:\n temp = np.sort(temp)\n for element in row:\n li.append(np.searchsorted(temp,element)+1)\n new_iu.append(li)\n return np.array(new_iu)", "def ngram_score(ngram, score):\n return score + len(ngram) * 0.1", "def user_interaction_score(uv, recommended_News, ranked=True):\n\n iv = recommended_News[\"topical_vector\"]\n\n product = simple_doct_product(uv, iv)\n\n epsilon = 10e-5\n\n if (product + epsilon) > 1.0:\n vui = 0.99\n else:\n vui = beta_distribution(product)\n\n # Awared preference\n ita = beta_distribution(0.98)\n pui = vui * ita\n\n return pui", "def __ranking_function(self, doc, query_tokens):", "def perplexity(self, text_ngrams):\n return pow(\n 2.0, self.entropy(progress(text_ngrams, desc=\"Calculating Perplexity\") if self.verbose else text_ngrams)\n )", "def polarity_scores(self, text):\n # convert emojis to their textual descriptions\n text_token_list = text.split()\n \n text_no_emoji_lst = []\n \n for token in text_token_list:\n if token in self.emojis:\n # get the textual description\n description = self.emojis[token]\n text_no_emoji_lst.append(description)\n else:\n text_no_emoji_lst.append(token)\n text = \" \".join(x for x in text_no_emoji_lst)\n \n sentitext = SentiText(text)\n \n sentiments = []\n words_and_emoticons = sentitext.words_and_emoticons\n \n for item in words_and_emoticons:\n valence = 0\n i = words_and_emoticons.index(item)\n # check for vader_lexicon words that may be used as modifiers or negations\n \n if item.lower() in BOOSTER_DICT:\n sentiments.append(valence)\n continue\n if (i < len(words_and_emoticons) - 1 and item.lower() == \"kind\" and\n words_and_emoticons[i + 1].lower() == \"of\"):\n sentiments.append(valence)\n continue\n \n sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments)\n \n\n sentiments = self._but_check(words_and_emoticons, sentiments)\n\n valence_dict = self.score_valence(sentiments, text)\n return valence_dict", "def textrank(artId):\n divisor = 4\n min_num, max_num = 3, 10\n\n art = id2news[artId]\n\n if len(art.sentences) <= 3:\n return art.body_text #no textrank if no text to rank...\n\n cosine_matrix = np.asarray([[lemma_similarity(sent_1, sent_2) for sent_1 in art.sentences] for sent_2 in art.sentences])\n graph = nx.from_numpy_array(cosine_matrix)\n scores = nx.pagerank(graph)\n\n n_sents = len(art.sentences)\n num_top = min(max(round(n_sents/divisor), min_num), max_num)\n ranked_sentences = sorted(((scores[i], i) for i in range(n_sents)), reverse=True)[:num_top]\n ranked_sentences.sort(key=lambda x: x[1]) #preserving order will help give more context\n return ' '.join([art.true_sentences[x[1]] for x in ranked_sentences])", "def ranking_loss(z_image, z_text, y, report_id,\n similarity_function='dot'):\n return imposter_img_loss(z_image, z_text, y, report_id, similarity_function) + \\\n imposter_txt_loss(z_image, z_text, y, report_id, similarity_function)", "def get_summary_from_text_rank(text, target_percentage, nlp):\n\n # general pre-processing\n sentences, sentence_vector_list = get_sentences(text, nlp)\n\n # begin generating topics from the text, beginning with 1 topic\n number_topics = 1\n percentage = len('. '.join(text_rank(sentence_vector_list, number_topics, sentences))) / len(text)\n\n # check if the ratio of the summary to the text is below the user-defined threshold\n while percentage < target_percentage:\n number_topics += 1\n percentage = len('. '.join(text_rank(sentence_vector_list, number_topics, sentences))) / len(text)\n\n return '. '.join(text_rank(sentence_vector_list, number_topics, sentences))", "def reciprocal_rank(ranking, references, atk=None):\n for k, prediction in enumerate(ranking[:atk], 1):\n if prediction in references:\n return 1.0 / k\n return 0.0", "def prufer_rank(self):\n r = 0\n p = 1\n for i in range(self.nodes - 3, -1, -1):\n r += p*self.prufer_repr[i]\n p *= self.nodes\n return r", "def get_ngram_prob(self, label_seq):\n curr_ngram = self.all_grams\n for i in range(0, len(label_seq)):\n label = label_seq[i]\n if i == len(label_seq) - 1:\n denom = curr_ngram.get_count() + self.SMOOTHING_VALUE * 9\n curr_ngram = curr_ngram.get_next_Ngram(label)\n # For smoothing, just add self.SMOOTHING_VALUE\n numer = curr_ngram.get_count() + self.SMOOTHING_VALUE\n return float(numer) / denom", "def get_score(self, list_item):\n stars = list_item.find('div', {'class': 'rating-large'})\n if stars:\n split_class = stars.find('i').get('title').split(' ')\n if split_class and is_numeric(split_class[0]):\n return float(split_class[0]) * 2.0\n return None", "def recommend_nmf():\n pass", "def centre_priority_evaluate(self):\r\n evaluation = 0\r\n for player in range(2):\r\n player_sign = player * 2 - 1\r\n for i in range(4):\r\n score = i + 1\r\n evaluation += player_sign * score * count_bits(self.bitboard_king[player] &\r\n self.CENTRE_PRIORITY_BITMASKS[i])\r\n evaluation += player_sign * score * count_bits(self.bitboard_pawns[player] &\r\n self.CENTRE_PRIORITY_BITMASKS[i])\r\n return evaluation", "def weight_distribution(self):\n all_scores = []\n for zettel in self.lemma_tokens:\n scores = []\n for word in zettel:\n cur_tf_idf = self.tf_idf_scores[word[0]] / 3 #range: 0-3+\n if word[1] == 'NG':\n word_list = re.split(\" \", word[0])\n cur_word_score = 0\n i = 0\n for new_word in word_list:\n cur_word_score += self.word_scores[new_word]\n i += 1\n cur_word_score = cur_word_score / i / 2 #range: 0-2+\n else:\n cur_word_score = self.word_scores[word[0]] / 2 #range: 0-2+\n cur_keyword_score = self.keyword_scores[word[0]] / 4 #0-4+\n cur_text_rank = self.text_ranks[word[0]] / 10 #range: 0-12+\n cur_pos_score = self.pos_scores[word[0]]\n cur_area_score = self.z_area_scores[word[0]]\n cur_total_score = ((cur_tf_idf * self.score_weights[0]) + (cur_word_score * self.score_weights[1]) +\n (cur_keyword_score * self.score_weights[2]) + (cur_text_rank * self.score_weights[3]) +\n (cur_pos_score * self.score_weights[4]) + (cur_area_score * self.score_weights[5])) / 6\n scores.append(cur_total_score)\n all_scores.append(scores)\n return all_scores", "def offensive_rating(data_frame, mode):\n off_rat = dict()\n average_points = calculate_average_points(data_frame, mode)\n for k, possessions in possessions_home_away(data_frame, mode).items():\n try:\n off_rat[k] = format(float(average_points[k]) * 100 / float(possessions), '.2f')\n except ZeroDivisionError:\n off_rat[k] = 0.0\n return off_rat", "def _kneser_ney_probability(self, count: int, sequence: str,\n sequence_total_count: int) -> float:\n assert self.count_map is not None and \\\n self.n_1_gram_map is not None, 'count map or n minus 1 gram map not initialized'\n\n count_previous_and_current: Optional[int] = None\n if sequence == unseen_output or sequence not in self.n_1_gram_map:\n # did not see given sequence, default count to 1\n count_previous_and_current = 1\n else:\n count_word = len(self.n_1_gram_map[sequence])\n count_previous_and_current = sequence_total_count + count_word\n d = count - self._good_turing_new_c(count)\n # first term is the term on the left of the equation\n first_term = max([count_previous_and_current - d, 0]\n ) / float(sequence_total_count)\n\n if sequence == unseen_output:\n # if sequence is not seen, use frequency of unknown\n # lmbda = d / count * freq(unknown)\n sequence = unknown_token\n different_final_word_types: int = 0\n if sequence in self.model:\n current_sequence_data: NGramsSequence = self.model[sequence]\n different_final_word_types = len(current_sequence_data.next_count)\n # lambda is part of the second term\n lmbda = d / float(sequence_total_count) * different_final_word_types\n\n different_preceding_final_word_types: int = 0\n if sequence in self.n_1_gram_map:\n different_preceding_final_word_types = len(\n self.n_1_gram_map[sequence])\n\n num_n_grams = len(self.model)\n if num_n_grams == 0:\n return 0.\n\n # p_cont is the second part of the second term\n p_cont = float(different_preceding_final_word_types) / num_n_grams\n\n # return probability of the current sequence\n return first_term + lmbda * p_cont", "def compute_rating(positive_count, neutral_count, negative_count):\n total = positive_count + neutral_count + negative_count\n if total < 5:\n return 'NEUTRAL'\n\n pos = positive_count/total\n neg = negative_count/total\n\n if pos > 0.3 and neg > 0.3:\n return 'CONTROVERSIAL'\n if pos > 0.7 or (pos > 0.5 and pos >= neg * 2):\n return 'POSITIVE'\n if neg > 0.7 or (neg > 0.5 and neg >= pos * 2):\n return 'NEGATIVE'\n return 'NEUTRAL'", "def reviewer_similarity_score(self, other: _Vertex) -> float:\n if self.degree() == 0 or other.degree == 0:\n return 0.0\n else:\n neighbours = self.neighbours\n other_neighbours = other.neighbours\n same_neighbours = neighbours.keys() & other_neighbours.keys()\n union = len(self.neighbours) + len(other.neighbours)\n sim_score_so_far = 0\n\n for vertex in same_neighbours:\n # 'bothered reviewing' bonus:\n sim_score_so_far += 1\n # 'love' bonus\n if self.neighbours[vertex] >= 9 and other.neighbours[vertex] >= 9:\n sim_score_so_far += 2\n # 'like' bonus\n elif self.neighbours[vertex] >= 7 and other.neighbours[vertex] >= 7:\n sim_score_so_far += 1\n\n return sim_score_so_far / union", "def compute_ari(text: str):\n characters = len(text.replace(\" \", \"\").replace(\".\", \"\").replace(\",\", \"\").replace(\";\", \"\"))\n words = text.count(\" \") + 1\n sentences = text.count(\".\")\n\n score = 4.71 * (characters / words) + .5 * (words / sentences) - 21.43\n\n return score", "def analyze(self, text):\n\n tknzr = nltk.tokenize.TweetTokenizer()\n words = tknzr.tokenize(text)\n \n score = 0\n \n for word in words:\n if word.lower() in self.positives:\n score += 1\n elif word.lower() in self.negatives:\n score -= 1\n else:\n continue\n \n return score", "def score(self, sentence):\n\n\n # TODO your code here\n score = 0.0 \n prevWord = \"\"\n prevPrevWord = \"\"\n newSentence = []\n for word in sentence:\n newSentence += word.split()\n for currentWord in sentence:\n currentWord = currentWord.strip(STRIP_CHARS)\n currentWord = currentWord.lower()\n if prevWord != \"\":\n if prevPrevWord != \"\":\n trigram = (prevPrevWord, prevWord, currentWord)\n trigramCount = self.trigramCounts[trigram]\n if trigramCount > 0:\n score += math.log(max(self.trigramCounts[trigram] - DISCOUNT, 0)*len(self.trigramCounts) + DISCOUNT*self.followingCounts[(prevPrevWord, prevWord)]*self.continuationCounts[currentWord])\n # Subtraction by 1 removes the add one count from the laplace\n # smoothing\n score -= math.log((self.bigramCounts[(prevPrevWord, prevWord)]) * len(self.trigramCounts))\n elif self.bigramCounts[(prevWord, currentWord)] > 0:\n score += math.log(self.bigramCounts[(prevWord, currentWord)]*BI_BACKOFF_COEFFICIENT)\n score -= math.log(self.totalBigramCounts)\n else:\n count = self.unigramCounts[currentWord]\n score += math.log(count * UNI_BACKOFF_COEFFICIENT)\n score -= math.log(self.total)\n else:\n prevPrevWord = prevWord\n prevWord = currentWord\n else:\n prevWord = currentWord\n return -score", "def relevance_ranking(data, ranked_list, gamma=0.5, stop_prob=0.7):\n total_relevance = 0\n for query in ranked_list:\n exposure = 1.0\n for doc in query[1]:\n relevance = doc[0]\n\n total_relevance += exposure * relevance * stop_prob\n\n exposure *= gamma\n exposure *= (1 - stop_prob * relevance)\n return total_relevance / len(ranked_list)", "def specific_pos_score(self, word):\n if word[0][1] == 'NNP':\n return 5\n if word[0][1] == 'NN':\n return 2\n else:\n return 1", "def getTextStatsFeat(text, stemmRequired = True,\r\n excludeStopwordsRequired = True):\r\n #length = len(text)\r\n sentenceCount = len(re.findall(\"[.?!]\", text))\r\n exclamationMarkCount = len(re.findall(\"[!]\", text))\r\n questionMarkCount = len(re.findall(\"[?]\", text))\r\n digitsCount = len(re.findall(\"[0-9]+\", text))\r\n text = text.replace(\",\", \" \").replace(\".\", \" \")\r\n cleanText = re.sub('[^a-zа-я0-9]', ' ', text.lower())\r\n wordCount = 0.0\r\n charCount = 0.0\r\n rusCharCount = 0.0\r\n engCharCount = 0.0\r\n if excludeStopwordsRequired:\r\n for w in cleanText.split():\r\n if len(w)>1 and w not in stopwords:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n else:\r\n for w in cleanText.split():\r\n if len(w)>1:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n # per sentence\r\n wordPerSentence = tryDivide(wordCount, sentenceCount)\r\n charPerSentence = tryDivide(charCount, sentenceCount)\r\n rusCharPerSentence = tryDivide(rusCharCount, sentenceCount)\r\n engCharPerSentence = tryDivide(engCharCount, sentenceCount)\r\n # per word\r\n charPerWord = tryDivide(charCount, wordCount)\r\n rusCharPerWord = tryDivide(rusCharCount, wordCount)\r\n engCharPerWord = tryDivide(engCharCount, wordCount)\r\n # ratio\r\n rusCharRatio = tryDivide(rusCharCount, charCount)\r\n engCharRatio = tryDivide(engCharCount, charCount)\r\n rusCharVsEngChar = tryDivide(rusCharCount, engCharCount)\r\n engCharVsRusChar = tryDivide(engCharCount, rusCharCount)\r\n \r\n stats = [\r\n sentenceCount,\r\n wordCount,\r\n charCount,\r\n rusCharCount,\r\n engCharCount,\r\n digitsCount,\r\n exclamationMarkCount,\r\n questionMarkCount,\r\n wordPerSentence,\r\n charPerSentence,\r\n rusCharPerSentence,\r\n engCharPerSentence,\r\n charPerWord,\r\n rusCharPerWord,\r\n engCharPerWord,\r\n rusCharRatio,\r\n engCharRatio,\r\n rusCharVsEngChar,\r\n engCharVsRusChar,\r\n ]\r\n statsFeat = \"\"\r\n for i,f in enumerate(stats):\r\n if f != 0:\r\n statsFeat += \"%s:%s \" % (i+1, f)\r\n statsFeat = statsFeat[:-1] \r\n return statsFeat", "def rank_results(result_list, search_title, search_artist, uploader_list):\n #scores = []\n #search_artist = search_artist.replace(\"+\", \" \").lower()\n search_title = search_title.replace(\"+\", \" \")\n #search_terms = search_title.split() + search_artist.split()\n\n ## Give score to each result\n #for index, title in enumerate(result_list):\n # title = title.lower()\n # score = 0\n\n # # One point for each word in result title\n # for term in search_terms:\n # if term in title:\n # score += 1\n\n # # 2 points if whole title in result, 2 points for whole artist, 4 points for both\n # if search_title in title:\n # score += 2\n # if search_artist in title:\n # score += 2\n # if search_title in title and search_artist in title:\n # score += 4\n # if search_title == title and (uploader_list[index] == search_artist+\" - topic\" or uploader_list[index] == 'various artists - topic' or uploader_list[index] == search_artist or uploader_list[index] == search_artist+'\\\\xa0'):\n # score += 100\n # if 'karaoke' in title:\n # score-=1000\n\n # scores.append(score)\n\n # return scores.index(max(scores))\n for index, title in enumerate(result_list):\n title = title\n if search_title == title:\n return index\n\n return 0", "def score_ap_from_ranks_1(ranks, nres):\n\n # accumulate trapezoids in PR-plot\n ap = 0.0\n\n # All have an x-size of:\n recall_step = 1.0 / nres\n\n for ntp, rank in enumerate(ranks):\n\n # y-size on left side of trapezoid:\n # ntp = nb of true positives so far\n # rank = nb of retrieved items so far\n if rank == 0:\n precision_0 = 1.0\n else:\n precision_0 = ntp / float(rank)\n\n # y-size on right side of trapezoid:\n # ntp and rank are increased by one\n precision_1 = (ntp + 1) / float(rank + 1)\n\n ap += (precision_1 + precision_0) * recall_step / 2.0\n\n return ap", "def analyze(self, text): #takes the text to be analyzed for sentiment\n #initialize inicial score to 0\n score = 0\n #Create tokenizer instance\n tokenizer = nltk.tokenize.TweetTokenizer()\n #create list of words in a tweets\n tokens = tokenizer.tokenize(text)\n \n #iterate over tokens(list of words)\n for word in tokens:\n #check if word is positive or negative\n if word.lower() in self.positives_words:\n score+=1\n if word.lower() in self.negatives_words:\n score-=1\n #neutral if its neither, doesnt add anything, 0\n return score", "def overall_performance_prf(articles, skip_nils=True, skip_nonnils=False):\n tp=0\n fn=0\n fp=0\n for article in articles:\n for entity in article.entity_mentions:\n if skip_nils and entity.gold_link=='--NME--':\n continue\n if skip_nonnils and entity.gold_link!='--NME--':\n continue\n if entity.gold_link==entity.sys_link:\n tp+=1\n else:\n if entity.sys_link!='--NME--':\n fp+=1\n if entity.gold_link!='--NME--':\n fn+=1\n print(tp, fp, fn)\n p=tp/(tp+fp) \n r=tp/(tp+fn) \n f1=2*p*r/(p+r)\n print(p,r,f1)\n return f1", "def score(self, sentence):\n # count each incremented word\n for word in sentence:\n if word not in self.unigramCounts:\n self.zeroCount += 1\n\n # apply laplace smoothing to unigram model\n score = 0.0\n for word in sentence:\n count = self.unigramCounts[word]\n score += math.log(count + 1)\n score -= math.log(self.totalCount + self.zeroCount)\n return score", "def calculate_recommendations(self, vote_list, itemMatch, itemIgnored):\n #print \"--------------------------------------------------\"\n #print \"calculate_recommendations\"\n #print \"--------------------------------------------------\"\n\n # http://www.quuxlabs.com/blog/2010/09/matrix-factorization-a-simple-tutorial-and-implementation-in-python/\n\n # U = np.array('users')\n # D = np.array('video_games')\n\n # R = |U| cross |D|\n\n # We want to discover K latent features\n\n # Find\n # P(a | |U| corss K matrix)\n # Q(a | |D| cross K matrix)\n # Such that their product approximates R\n # R approx= P cross transpose(Q) = hat(R)\n #\n\n # r[i][j] = transpose(p)[i] * q[j]\n # = sum( 1..k, p[i][k] * q[k][j] )\n\n # e[i][j]**2 = (r[i][j] - hat(r)[i][j])**2\n # = (r[i][j] - sum( 1..K, p[i][k] * q[k][j]))**2\n # squared error, estimated rating can be either higher or lower than the real thing\n\n # find the gradient\n # diff(e[i][j]**2, p[i][k]) = -2*(r[i][j] - hat(r)[i][j]) * (q[k][j]) = -2*e[i][j] * q[k][j]\n # diff(e[i][j]**2, q[k][j]) = -2*(r[i][j] - hat(r)[i][j]) * (p[i][k]) = -2*e[i][j] * p[i][k]\n\n # update rules\n # alpha = settings.alpha # learning_rate\n # alpha = 0.0002 # learning_rate\n # p[i][k]' = p[i][k] + alpha * diff(e[i][j]**2, p[i][k])\n # = p[i][k] + 2 * alpha * e[i][j] * q[k][j]\n # q[k][j]' = q[k][j] + alpha * diff(e[i][j]**2, q[k][j])\n # = q[k][j] + 2 * alpha * e[i][j] * p[i][k]\n\n # training data\n # T = (u[i], d[j], r[i][j])\n # np.array()\n\n # iterate until convergance\n # E = sum((u[i], d[j], r[i][j]) in T, e[i][j])\n # = sum((u[i], d[j], r[i][j]) in T, r[i][j]\n # - sum(1..k, p[i][k]*q[k][j]))**2\n\n # regularization\n # beta = 0.02\n # e[i][j]**2 = (r[i][j] - sum(1..K, p[i][j]*q[k][j]))**2\n # + ((beta/2) * sum(1..K, norm(P)**2 + norm(Q)**2))\n #\n # p[i][k]' = p[i][k] + alpha * (2 * e[i][j] * q[k][j] - beta * p[i][k])\n # q[k][j]' = q[k][j] + alpha * (2 * e[i][j] * p[i][k] - beta * q[k][j])\n\n data = np.array(vote_list)\n\n encoder = OneHotEncoder()\n\n users = data[:,0]\n unique_users = list(set(users))\n for i in range(len(users)):\n users[i] = unique_users.index(users[i])\n\n video_games = data[:,1]\n unique_games = list(set(video_games))\n for i in range(len(video_games)):\n video_games[i] = unique_games.index(video_games[i])\n\n ratings = data[:,2]\n M = len(set(video_games))\n N = len(set(users))\n R = np.zeros((N,M))\n for i in range(len(users)):\n user = users[i]\n game = video_games[i]\n rating = ratings[i]\n R[user][game] = rating\n\n K = 2\n\n P = np.random.rand(N,K)\n Q = np.random.rand(M,K)\n\n nP, nQ = self.matrix_factorization(R, P, Q, K)\n nR = np.dot(nP, nQ.T)\n\n itemMatch = {}\n for i in range(N):\n user = unique_users[i]\n itemMatch[user] = []\n for j in range(M):\n if R[i][j] == 0:\n video_game = unique_games[j]\n recommendation = (video_game, nR[i][j])\n itemMatch[user].append(recommendation)\n itemMatch[None] = []\n print 'pmf recommendations', itemMatch.items()\n print '\\n'\n recommendations = itemMatch.items()\n\n # returns\n # [\n # (<user1>, [\n # (\"<object_identifier1>\", <score>),\n # (\"<object_identifier2>\", <score>),\n # ]),\n # (<user2>, [\n # (\"<object_identifier1>\", <score>),\n # (\"<object_identifier2>\", <score>),\n # ]),\n # ]\n\n return recommendations", "def profile_text_stats( doc: BeautifulSoup ):\n text = doc.find('main', {'class': 'core-rail'}).text.strip()\n words = text.split()\n eng_ratio = sum(1 for word in words if word in COMMON_ENGLISH) * 10/ (len(words) + 0.001)\n return { 'length': len( text ),\n 'eng_ratio': np.round( eng_ratio, 2)}\n # %%", "def get_real_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/self.votes", "def analyze(self, text):\n\n score = 0.0;\n\n words = text.split(' ')\n # match each word in either the positives or negatives list adding or subtracting 1 from the score if present\n for word in words:\n for w in self.positives:\n if w == word.lower():\n score += 1.0\n continue\n \n for w in self.negatives:\n if w == word.lower():\n score -= 1.0\n continue\n\n return score", "def get_recommendations(prefs, person, similarity=sim_pearson):\n totals = {}\n similarity_sums = {}\n\n for other in prefs:\n if other == person:\n continue\n\n sim = similarity(prefs, person, other)\n\n if sim <= 0:\n continue\n\n for item in prefs[other]:\n if item not in prefs[person] or prefs[person][item] == 0:\n totals.setdefault(item, 0)\n totals[item] += prefs[other][item] * sim\n similarity_sums.setdefault(item, 0)\n similarity_sums[item] += sim\n\n # Normalized list\n rankings = [(total / similarity_sums[item], item)\n for item, total in totals.items()]\n\n # Returns normalized score, not an r that would be between -1 and 1\n rankings.sort()\n rankings.reverse()\n return rankings", "def sentiment_score(review):\n return sum([sentence_score(sentence, None, 0.0) for sentence in review])", "def analyze(self, text):\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for token in tokens:\n if token in self.positives_list:\n score += 1\n elif token in self.negatives_list:\n score -= 1\n\n return score", "def uncorrected_p_score(self, tail):\n if tail=='neg':\n return self.t_rank\n else:\n return (1 + self.dp - self.t_rank)\n p = self.t_rank if tail=='neg' else 1-self.t_rank\n return p", "def calculate_percent_match(primers,\n seq_count,\n exclude_seq_count=1):\n # Calculate percent of sequences that are 'hit' by each primer\n for n in range(len(primers)):\n # Calculate percent perfect match\n primers[n].percent_match=float(primers[n].match_count/seq_count)\n primers[n].non_specific_percent=\\\n float(primers[n].non_specific_hits/exclude_seq_count)\n \n return primers", "def rankingOfNeighbor(self, star1, star2):\n approximateRank = min(self.NStars, 10**int(self.getDistance(star1, star2) /self.stardistances[star1][10][0]))\n if approximateRank <= 0:\n return 1\n return approximateRank", "def compute_sentences_ranking(video_captions):\n sentences_global_ranking = []\n\n if config.experiment == 'experiment1':\n bfs = True\n embeddings = []\n labels = []\n for sentence in video_captions.sentences:\n sentence_embedding = sentence.get_sentence_embedding(bfs)\n if len(sentence_embedding) > 0: # there are sentences without senses (i.e. 'its a t') --> no embedding!\n embeddings.append(sentence_embedding)\n labels.append(sentence.sentence)\n\n embeddings_mean = np.mean(embeddings, axis=0)\n distances = [scipy.spatial.distance.cosine(embedding, embeddings_mean) for embedding in embeddings]\n for i, distance in enumerate(distances):\n sentences_global_ranking.append((video_captions.sentences[i].sentence, distance))\n\n elif config.experiment == 'experiment5':\n chencherry = SmoothingFunction()\n for i, sentence1 in enumerate(video_captions.sentences):\n scores = [bleu_score.sentence_bleu([sentence2.sentence.split(' ')], sentence1.sentence.split(' '), smoothing_function=chencherry.method4) for j, sentence2 in enumerate(video_captions.sentences)] # if i != j] # if we add 1 to all, result shouldn't change\n score = sum(scores) / len(scores)\n sentences_global_ranking.append((sentence1.sentence, score))\n\n else:\n result = np.zeros([20, 20])\n for i, sentence1 in enumerate(video_captions.sentences):\n for j, sentence2 in enumerate(video_captions.sentences):\n similarities = []\n for token1_id in sentence1.tokens_id_list:\n\n # find most similar token to sentence1.token1 in sentence2.tokens\n most_similar_token_in_sentence = (None, float('-inf'))\n for token2_id in sentence2.tokens_id_list:\n if (token1_id, token2_id) in config.tokens_set.tokens_similarities_closest:\n similarity = config.tokens_set.tokens_similarities_closest[(token1_id, token2_id)]\n if similarity > most_similar_token_in_sentence[1]:\n most_similar_token_in_sentence = (token2_id, similarity)\n\n # store token similarity (depending on the experiments we check if it is over threshold)\n if most_similar_token_in_sentence[0] is not None:\n if config.experiment in ['experiment4', 'experiment4symmetrical']:\n if most_similar_token_in_sentence[1] > config.th1:\n similarities.append((most_similar_token_in_sentence[0], 1.0)) # for each token we add 1 instead of similarity\n else:\n similarities.append((None, 0))\n elif config.experiment == 'experiment3':\n if most_similar_token_in_sentence[1] > config.th1:\n similarities.append(most_similar_token_in_sentence)\n else:\n similarities.append((None, 0))\n elif config.experiment == 'experiment2':\n similarities.append(most_similar_token_in_sentence)\n\n # compute and store similarity between sentence1 and sentence2\n if len(similarities) > 0:\n sentences_similarity = float(sum([a[1] for a in similarities])) / len(similarities)\n else:\n sentences_similarity = 0\n\n result[i, j] = sentences_similarity\n\n # we make the similarities symmetrical\n if config.experiment == 'experiment4symmetrical':\n for i in range(0, len(result)):\n for j in range(0, len(result)):\n symmetric_similarity = 0\n if result[i, j] + result[j, i] != 0:\n symmetric_similarity = (result[i, j] + result[j, i]) / 2\n result[i, j] = symmetric_similarity\n result[j, i] = symmetric_similarity\n\n # compute sentences similarity to all others (array of size 20)\n sentences_similarities = (np.sum(result, axis=1)) / result.shape[1] # sentences similarities normalized between 0 and 1\n for i, similarity in enumerate(sentences_similarities):\n sentences_global_ranking.append((video_captions.sentences[i].sentence, similarity))\n\n return sentences_global_ranking", "def popularity(self, user_list):\n item_popular = Counter(self.train['movieId'].values)\n ret = 0\n n = 0\n print('\\nCalculate popularity: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n for rec in set([data[0] for data in recom_data]):\n ret += math.log(1 + item_popular.get(rec))\n n += 1\n ret /= n * 1.0\n print('\\npopularity: ', ret)\n return ret", "def listening_count_to_rating(urm):\n for i in range(urm.shape[0]):\n u = [(j, c) for j, c in zip(np.nonzero(urm[i, :])[0], urm[i, np.nonzero(urm[i, :])[0]])]\n u = sorted(u, key=lambda e: e[1])\n l = sum([c for _, c in u])\n agg = 0\n for j, c in u:\n agg += c\n\n if agg <= 0.2*l:\n urm[i, j] = 1\n elif agg > 0.2*l and agg <= 0.4*l:\n urm[i, j] = 2\n elif agg > 0.4*l and agg <= 0.6*l:\n urm[i, j] = 3\n elif agg > 0.6*l and agg <= 0.8*l:\n urm[i, j] = 4\n elif agg > 0.8*l:\n urm[i, j] = 5\n\n return urm", "def string_rank (text):\n freq_set = { \n ' ':13.00, 'e':12.70, 't':9.056, 'a':8.167, 'o':7.507, 'i':6.966, 'n':6.749, \n 's':6.327, 'h':6.094, 'r':5.987, 'd':4.253, 'l':4.025, 'u':2.758, 'b':1.492, \n 'c':2.782, 'f':2.228, 'g':2.015, 'j':0.153, 'k':0.772, 'm':2.406, 'p':1.929, \n 'q':0.095, 'v':0.978, 'w':2.360, 'x':0.150, 'y':1.974, 'z':0.074 }\n return sum([freq_set[letter] for letter in text if letter in freq_set])", "def score(self, sentence):\n score = 0.0\n V = len(self.f1) # vocabulary size\n for token in sentence:\n if token in self.f1: score += self.f1[token]\n else: score -= math.log10(self.total + V)\t\t # OOV \n return score", "def rerank(test_predicted_paraphrases, test_features, ranker, minimum_score):\n new_test_predicted_paraphrases = { (w1, w2) : [] for (w1, w2) in test_predicted_paraphrases.keys() }\n\n for ((w1, w2), curr_paraphrases), curr_paraphrase_features in tqdm.tqdm(zip(\n test_predicted_paraphrases.items(), test_features)):\n pars_and_vectors = zip(curr_paraphrases.items(), curr_paraphrase_features)\n\n # Sort the paraphrases according to the ranking\n def compare_paraphrases(p1, p2):\n return ranker.predict((p2[1] - p1[1]).reshape(1, -1))\n\n # Consider both the original score (for the specific noun-compound)\n # and the new rank (which paraphrases are more commonly ranked higher)\n sorted_paraphrases = [(paraphrase, (len(curr_paraphrases) - rank) * float(score))\n for rank, ((paraphrase, score), feature) in\n enumerate(sorted(pars_and_vectors,\n key=functools.cmp_to_key(compare_paraphrases)))]\n\n sorted_paraphrases = sorted(sorted_paraphrases, key=lambda x: x[1], reverse=True)\n\n # Keep only paraphrases with score above threshold. Best score = k * 1 = k,\n new_test_predicted_paraphrases[(w1, w2)] = \\\n [(paraphrase, score) for (paraphrase, score) in sorted_paraphrases if score >= minimum_score]\n\n return new_test_predicted_paraphrases", "def score(self):", "def _rank(claim_docs):\n\n claim, retrieved_docs = claim_docs\n compare_claim = _clean_text(claim)\n partial_ratios = [(doc_id, fuzz.partial_ratio(compare_claim, _clean_text(doc_id))) for doc_id in retrieved_docs]\n ordered_partial_ratios = sorted(partial_ratios, key=lambda pair: pair[1], reverse=True)\n filtered_docs = [pair[0] for pair in ordered_partial_ratios]\n if len(ordered_partial_ratios) > 5:\n filtered_docs = filtered_docs[:5]\n return filtered_docs", "def rank():\n return 0", "def disp_score():", "def calculateScore(self, queue):\n for song in queue:\n if song['explicit']:\n song['score'] = 3 * song['age'] + 2 * song['upvotes'] - 2 * song['downvotes']\n else:\n song['score'] = -1 * song['downvotes']", "def score_sequence(seq, ngramlogprobs):\n return", "def ratio(n1,n2, explain=0, optimize=False):\n weight_normal_form = 5.0 #distance between soundexes of normal form\n weight_normal_form_soundex = 8.0 #average distance between soundexes of normal form\n weight_geslachtsnaam1 = 10.0 #distance between soundexes of geslachtsnamen\n weight_geslachtsnaam2 = 10.0 #distance between geslachtsnaam\n weight_initials = 2 #distance between initials\n\n nf1 = n1.guess_normal_form()\n nf2 = n2.guess_normal_form()\n\n if not nf1 or not nf2:\n return 0.0\n elif nf1 == nf2:\n return 1.0\n ratio_normal_form = Similarity.average_distance(split(nf1), split(nf2))\n \n #create a simkplified soundex set for this name\n #remove stopwords\n# nf1 = remove_stopwords( nf1)\n# nf2 = remove_stopwords( nf2)\n \n se1 = n1.get_normal_form_soundex()\n se2 = n2.get_normal_form_soundex()\n ratio_normal_form_soundex = Similarity.average_distance( se1, se2)\n \n #gelachtsnaam wordt op twee manieren met elkaar vergeleken\n g1 = n1.geslachtsnaam() #or n1.get_volledige_naam()\n g2 = n2.geslachtsnaam() #or n2.get_volledige_naam()\n g1 = to_ascii(g1)\n g2 = to_ascii(g2)\n if not optimize:\n #de soundexes van de achternaam worden meegewoen\n #g1_soundex = n1.soundex_nl(g1, group=2, length=-1)\n g1_soundex = n1.geslachtsnaam_soundex()\n #g2_soundex = n2.soundex_nl(g2, group=2, length=-1)\n g2_soundex = n2.geslachtsnaam_soundex()\n ratio_geslachtsnaam1 = Similarity.average_distance(g1_soundex, g2_soundex)\n else:\n ratio_geslachtsnaam1 = 1 \n weight_geslachtsnaam1 = 0\n \n #n de afstand van de woorden in de achtenraam zelf\n ratio_geslachtsnaam2 = Similarity.average_distance(\n re.split('[ \\.\\,\\-]', g1.lower()),\n re.split('[ \\.\\,\\-]', g2.lower()),\n levenshtein_ratio)\n n1_initials = n1.initials()\n n1_initials_lower = n1_initials.lower()\n n2_initials = n2.initials()\n n2_initials_lower = n2_initials.lower()\n n1_contains_initials = n1.contains_initials()\n n2_contains_initials = n2.contains_initials()\n #count initials only if we have more than one\n #(or perhaps make this: if we know the first name)\n if len(n1_initials) == 1 or len(n2_initials) == 1:\n #initials count much less if there is only one\n weight_initials = weight_initials_if_one_name_consists_of_one_word_only\n# ratio_initials = .5\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n elif n1_contains_initials or n2_contains_initials:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n weight_initials = weight_initials_if_one_name_is_in_initials\n elif len(n1_initials) > 1 and len(n2_initials) > 1:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n else:\n ratio_initials = 0.7\n \n if n1_contains_initials or n2_contains_initials:\n weight_normal_form = weight_normal_form_if_one_name_is_in_initials \n weight_normal_form_soundex = weight_normal_form_soundex_if_one_name_is_in_initials\n\n counter = (ratio_normal_form * weight_normal_form +\n ratio_normal_form_soundex * weight_normal_form_soundex +\n ratio_geslachtsnaam1 * weight_geslachtsnaam1 +\n ratio_geslachtsnaam2 * weight_geslachtsnaam2 +\n ratio_initials * weight_initials)\n numerator = (weight_normal_form + weight_normal_form_soundex +\n weight_initials + weight_geslachtsnaam1 + weight_geslachtsnaam2)\n if numerator == 0:\n return 0.0\n final_ratio = counter/numerator\n\n if explain:\n s = '-' * 100 + '\\n'\n s += 'Naam1: %s [%s] [%s] %s\\n' % (n1, n1_initials, n1.guess_normal_form(), se1)\n s += 'Naam2: %s [%s] [%s] %s\\n' % (n2, n2_initials, n2.guess_normal_form(), se2)\n s += 'Similarity ratio: %s\\n' % final_ratio\n s += '--- REASONS' + '-' * 30 + '\\n'\n format_s = '%-30s | %-10s | %-10s | %-10s | %-10s | %s-10s\\n'\n s += format_s % ('\\t property', ' ratio', ' weight','relative_weight', ' r*w', 'r * relative_w')\n s += '\\t' + '-' * 100 + '\\n'\n format_s = '\\t%-30s | %-10f | %-10f | %-10f | %-10f | %-10f\\n'\n s += format_s % (' normal_form', ratio_normal_form, weight_normal_form,weight_normal_form/counter, ratio_normal_form * weight_normal_form, ratio_normal_form * weight_normal_form/counter)\n s += format_s % ('soundex van normal_form', ratio_normal_form_soundex, weight_normal_form_soundex,weight_normal_form_soundex/counter, ratio_normal_form_soundex* weight_normal_form_soundex, ratio_normal_form_soundex * weight_normal_form_soundex/counter)\n s += format_s % ('soundex van geslachtsnaam1', ratio_geslachtsnaam1, weight_geslachtsnaam1,weight_geslachtsnaam1/counter, ratio_geslachtsnaam1 * weight_geslachtsnaam1, ratio_geslachtsnaam1 * weight_geslachtsnaam1/counter)\n s += format_s % ('geslachtsnaam', ratio_geslachtsnaam2, weight_geslachtsnaam2,weight_geslachtsnaam2/counter, ratio_geslachtsnaam2 *weight_geslachtsnaam2 , ratio_geslachtsnaam2 * weight_geslachtsnaam2/counter)\n s += format_s % ('initials', ratio_initials, weight_initials, weight_initials/counter, ratio_initials *weight_initials, ratio_initials * weight_initials/counter)\n s += '\\tTOTAL (numerator) | %s (counter = %s)\\n' % (counter, numerator)\n \n return s\n return final_ratio", "def combine_pos_score(self, word):\n if word[1] == 'NNP':\n return 5\n elif word[1] == 'NN':\n return 2\n else:\n return 1", "def passion_analyzer(text):\n\n\tlower_text = text.lower()\n\n\thashtag_scaling = 0.3\n\texclamation_scaling = 0.5\n\tuppercase_scaling = 0.2\n\n\n\tpassion_index = 0\n\n\tfor x in range(len(positive_words)):\n\t\tpassion_index += (lower_text.count(positive_words[x]))**2\n\tfor x in range(len(negative_words)):\n\t\tpassion_index -= (lower_text.count(negative_words[x]))**2\n\tif '!' in text:\n\t\tpassion_index *= exclamation_scaling * lower_text.count('!') + 1\n\tif '#' in text:\n\t\tpassion_index *= hashtag_scaling * lower_text.count('#') + 1\n\tpassion_index *= uppercase_scaling * sum(1 for c in text if c.isupper())\n\n\n\t\t\n\treturn math.sqrt(passion_index)", "def sentiment_analyzer(text):\n\n\tlower_text = text.lower()\n\t\t\n\thashtag_scaling = 0.3\n\texclamation_scaling = 0.5\n\tuppercase_scaling = 0.2\n\n\n\tsent_index = 0\n\n\tfor x in range(len(positive_words)):\n\t\tsent_index += lower_text.count(positive_words[x])\n\tfor x in range(len(negative_words)):\n\t\tsent_index -= lower_text.count(negative_words[x])\n\tif '!' in text:\n\t\tsent_index *= exclamation_scaling * lower_text.count('!') + 1\n\tif '#' in text:\n\t\tsent_index *= hashtag_scaling * lower_text.count('#') + 1\n\tsent_index *= uppercase_scaling * sum(1 for c in text if c.isupper())\n\t\t\n\treturn sent_index", "def rank_websites(filename=\"web_stanford.txt\", epsilon=0.85):\n raise NotImplementedError(\"Task 2 Incomplete\")", "def _profile(self, text):\n prof = zeros(len(self.alph)**self.N)\n ngs = ngrams(text, self.N)\n for tup in ngs:\n loc = 0\n for i in range(len(tup)):\n loc += (len(self.alph)**i) * self.alph.index(tup[i])\n prof[loc] += 1\n return prof", "def get_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/(self.votes+self.field.weight)", "def ranking_precision_score(y_true, y_score, k=10):\n unique_y = np.unique(y_true)\n\n if len(unique_y) > 2:\n raise ValueError(\"Only supported for two relevance levels.\")\n\n n_relevant = 0\n n_pos = 0\n for relevance_score in y_true:\n if relevance_score == 1:\n n_pos += 1\n\n for index in y_score[:k]:\n if y_true[index] == 1:\n n_relevant += 1\n\n # Divide by min(n_pos, k) such that the best achievable score is always 1.0.\n return float(n_relevant) / min(n_pos, k) if min(n_pos, k) > 0 else 0", "def determine_spammer_by_percentage(self, reviewer_id):\n cut_value = 0.8\n\n fake_sql = \"select count(*) from reviews_simple where reviewerID = '%s' and fake = 1\" % reviewer_id\n legitimate_sql = \"select count(*) from reviews_simple where reviewerID = '%s' and fake = 0\" % reviewer_id\n\n self.cursor.execute(fake_sql)\n fake_num = self.cursor.fetchone()[0]\n self.cursor.execute(legitimate_sql)\n legitimate_num = self.cursor.fetchone()[0]\n\n total_num = float(fake_num + legitimate_num)\n if total_num == 0:\n return 2 # 2 represents unknown label\n else:\n\n if fake_num/total_num > cut_value:\n return 1\n else:\n return 0", "def analyze(self, text):\n\n # TODO\n # tokens = tokenizer.tokenize(tweet)\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for word in tokens:\n # iterate over tokens#str.lower\n\n if word.lower() in self.positives:\n score = score+1\n\n elif word.lower() in self.negatives:\n score = score-1\n\n else:\n continue\n return score", "def analyze(title, artist):\n lyric_sentiment = 0\n title_sentiment = 0\n\n # Load the machine learning based model\n modelIMDB = nn_lyrics.loadModel(\"IMDB\")\n encoderIMDB = nn_lyrics.createEncoder(\"IMDB\")\n modelYelp = nn_lyrics.loadModel(\"Yelp\")\n encoderYelp = nn_lyrics.createEncoder(\"Yelp\")\n\n # Get the lyrics of the song\n print(\"Analyzing\", title, \"by\", artist, \"...\\n\")\n song = basic_lyrics.getSong(title, artist)\n if song is None:\n return\n lyrics_received = basic_lyrics.getLyrics(song)\n print(\"\")\n\n # weight_map = lyric_weights.getWeightMap(lyrics_received) Needed for line by line analysis\n\n # Get and print stats about the song\n feature_vec = features.getTrackFeatures(title, artist)\n features.printFeatures(feature_vec)\n tempo = int(feature_vec[5])\n mode = int(feature_vec[7])\n loudness = int(feature_vec[8])\n\n # Lexicon based analysis\n lyric_sentiment += ((basic_lyrics.analyze(lyrics_received, print=False) + 1)/2) # x+1/2 to convert to 0-1 scale\n title_sentiment += ((basic_lyrics.analyze(title, print=False) + 1)/2)\n\n # IMDB Model prediction\n imdb_lyrics = nn_lyrics.predict(lyrics_received, pad=True, model_to_predict=modelIMDB,\n encoder=encoderIMDB, prepro=True)\n lyric_sentiment += imdb_lyrics\n imdb_title = nn_lyrics.predict(title, pad=False, model_to_predict=modelIMDB,\n encoder=encoderIMDB, prepro=False) # Don't pre-process title since it is so short\n title_sentiment += imdb_title\n\n # Yelp Model Prediction\n yelp_lyrics = nn_lyrics.predict(lyrics_received, pad=True, model_to_predict=modelYelp,\n encoder=encoderYelp, prepro=True)\n lyric_sentiment += yelp_lyrics\n yelp_title = nn_lyrics.predict(title, pad=False, model_to_predict=modelYelp,\n encoder=encoderYelp, prepro=False)\n title_sentiment += yelp_title\n\n lyric_sentiment = lyric_sentiment/3\n title_sentiment = title_sentiment/3\n\n print(\"\\nLyric Sentiment: \", lyric_sentiment)\n print(\"\\nTitle Sentiment: \", title_sentiment)\n\n final_sentiment = equation.sentiment(mode, lyric_sentiment, title_sentiment, loudness, tempo)\n\n print(\"\\nFinal Sentiment: \", final_sentiment)", "def classify(tweets,table,positives,negatives,p_tweets,n_tweets):\n\n\n st = LancasterStemmer()\n\n n_words = len(table)\n in_table = 0\n not_in_table = 0\n\n\n y_pred = np.zeros(len(tweets)).astype('int32')\n\n for i in range(len(tweets)):\n likelihood_pos = 0\n likelihood_neg = 0\n \n # MAP negatives and positives\n for word in tweets[i].split():\n word = st.stem(word.decode('utf-8'))\n if word in table:\n in_table += 1\n likelihood_pos += m.log((table[word][0]+1)/float(positives + 1*n_words))\n likelihood_neg += m.log((table[word][1]+1)/float(negatives + 1*n_words))\n \n else:\n not_in_table += 1\n likelihood_pos += m.log(1/float(positives + 1*n_words))\n likelihood_neg += m.log(1/float(negatives + 1*n_words))\n\n likelihood_pos += m.log(p_tweets/float(p_tweets + n_tweets))\n likelihood_neg += m.log(n_tweets/float(p_tweets + n_tweets))\n\n\n\n # Classify as positive or negative\n if likelihood_neg < likelihood_pos: \n y_pred[i] = 1\n\n prediction = np.bincount(y_pred)\n\n print \"Known words: %d\" % in_table\n print \"Unknown words %d\\n\" % not_in_table\n\n positive_ratio = prediction[1]/float(prediction[1] + prediction[0])\n\n group = \"Positive\" if positive_ratio > 0.5 else \"Negative\" \n\n\n return positive_ratio,group", "def calculate_flesch_score(text):\n\n # Use the given formula and call other functions to get the values\n score = 206.835 - 1.015 * count_words(text) / count_sentences(text) - 84.6 * count_syllables(text) / count_words(\n text)\n\n return score", "def preprocessing(raw_text_df):\r\n \r\n stemmer = nltk.stem.porter.PorterStemmer()\r\n tokenizer = RegexpTokenizer(r'\\w+')\r\n # iterate over all lines for preprocessing\r\n for index, line in enumerate(raw_text_df):\r\n \r\n # if there is mention of stars from 1-5, change the integer into\r\n # text and combine the number and the word \"star\" to make a new word\r\n # example: \"I give this product 1 star\" is now \"I give this product onestar\"\r\n # why? numbers are removed as part of preprocessing\r\n if \"1 star\" in line:\r\n line = line.replace(\"1 star\", \"onestar\")\r\n if \"1 stars\" in line:\r\n line = line.replace(\"1 stars\", \"onestar\")\r\n if \"2 star\" in line:\r\n line = line.replace(\"2 star\", \"twostars\")\r\n if \"2 stars\" in line:\r\n line = line.replace(\"2 stars\", \"twostars\")\r\n if \"3 star\" in line:\r\n line = line.replace(\"3 star\", \"threestars\")\r\n if \"3 stars\" in line:\r\n line = line.replace(\"3 stars\", \"threestars\")\r\n if \"4 star\" in line:\r\n line = line.replace(\"4 star\", \"fourstars\")\r\n if \"4 stars\" in line:\r\n line = line.replace(\"4 stars\", \"fourstars\")\r\n if \"5 star\" in line:\r\n line = line.replace(\"5 star\", \"fivestars\")\r\n if \"5 stars\" in line:\r\n line = line.replace(\"5 stars\", \"fivestars\")\r\n \r\n # tokenize lines\r\n tokens = re.split('(\\d+)',line)\r\n # remove numbers\r\n no_digits = [w for w in tokens if not w.isdigit()]\r\n # join tokens\r\n joined_text = \" \".join(no_digits)\r\n # re tokenize\r\n tokens = tokenizer.tokenize(joined_text)\r\n # make tokens lowercase\r\n lower_tokens = [w.lower() for w in tokens if type(w) == str] \r\n # remove stopwords\r\n stopped_tokens = [w for w in lower_tokens if not w in stopwords.words('english')]\r\n # stem words\r\n clean_tokens = [stemmer.stem(w) for w in stopped_tokens]\r\n # join text\r\n joined_text = \" \".join(clean_tokens)\r\n # replace line with preprocessed line\r\n raw_text_df[index] = joined_text\r\n print(index)", "def textrank(doc, kp_count):\n tokens = [normalize(tok) for tok in doc]\n candidates = [normalize(*token) for token in ngrams(doc, 1)]\n\n word_graph = networkx.Graph()\n word_graph.add_nodes_from(set(candidates))\n word_graph.add_edges_from(zip(candidates, candidates[1:]))\n\n kw_ranks = networkx.pagerank_scipy(word_graph)\n\n if 0 < kp_count < 1:\n kp_count = round(kp_count * len(kw_ranks))\n kp_count = int(kp_count)\n\n top_words = {word: rank for word, rank in kw_ranks.items()}\n\n keywords = set(top_words.keys())\n phrases = {}\n\n tok_iter = iter(tokens)\n for tok in tok_iter:\n if tok in keywords:\n kp_words = [tok]\n kp_words.extend(it.takewhile(lambda t: t in keywords, tok_iter))\n n = len(kp_words)\n avg_rank = sum(top_words[w] for w in kp_words) / n\n phrases[' '.join(kp_words)] = avg_rank\n\n top_phrases = top_keys(kp_count, phrases)\n\n return top_phrases", "def stickers_for(phrase):\n\n stic_dict = {}\n for ltr in 'instagram':\n if ltr not in stic_dict:\n stic_dict[ltr] = 1\n else:\n stic_dict[ltr] += 1\n print(ltr)\n\n phr_dict = {}\n for ltr in phrase:\n if ltr not in phr_dict:\n phr_dict[ltr] = 1\n else:\n phr_dict[ltr] += 1\n print(ltr)\n\n ratios = []\n for ltr in stic_dict:\n if ltr in phr_dict:\n ratio = phr_dict[ltr] / stic_dict[ltr]\n ratios.append(ratio)\n\n if len(ratios) > 0:\n num_sticks = max(ratios)\n num_sticks = int(round(num_sticks, 0))\n return stic_dict, phr_dict, ratios, num_sticks\n\n else: \n return 0", "def analyze(self, text):\n score =0\n token = TweetTokenizer()\n tokens = token.tokenize(text)\n for token in tokens:\n if token.lower() in self.pos_list:\n score+=1\n elif token.lower() in self.neg_list:\n score-=1\n\n return score", "def assign_popularity_to_tweet(self, influencer, tweet):\n twNoLike = self.userTweetsStat[influencer][0][tweet]['like']\n twNoRt = self.userTweetsStat[influencer][0][tweet]['RT']\n twNoFlwr = self.userTweetsStat[influencer][0][tweet]['follower']\n twPopularity = (twNoLike + 2*twNoRt)/twNoFlwr\n \n return twPopularity", "def scoring(self):\n pass", "def evaluate_ranks(articles, rank_tuples):\n rank_tp=defaultdict(int)\n rank_fn=defaultdict(int)\n rank_fp=defaultdict(int)\n \n for article in articles:\n for mention in article.entity_mentions:\n form=mention.mention\n meaning=mention.gold_link\n sys_meaning=mention.sys_link\n t_gold=(form, meaning)\n t_sys=(form, sys_meaning)\n for rank, r_tuples in rank_tuples.items():\n if t_gold in r_tuples and t_sys in r_tuples:\n rank_tp[rank]+=1\n break\n elif t_gold in r_tuples:\n rank_fn[rank]+=1\n elif t_sys in r_tuples:\n rank_fp[rank]+=1\n print('tp', rank_tp)\n print('fp', rank_fp)\n print('fn', rank_fn)\n \n rank_prec={}\n rank_recall={}\n rank_f1={}\n \n for rank in range(1,13):\n if rank_tp[rank]+rank_fp[rank]>0:\n rank_prec[rank]=rank_tp[rank]/(rank_tp[rank]+rank_fp[rank])\n else:\n rank_prec[rank]=0.0\n if rank_tp[rank]+rank_fn[rank]>0:\n rank_recall[rank]=rank_tp[rank]/(rank_tp[rank]+rank_fn[rank])\n else:\n rank_recall[rank]=0.0\n if rank_prec[rank]+rank_recall[rank]>0:\n rank_f1[rank]=2*rank_prec[rank]*rank_recall[rank]/(rank_prec[rank]+rank_recall[rank])\n else:\n rank_f1[rank]=0.0\n print('precision', rank_prec)\n print()\n print('recall', rank_recall)\n print()\n print('f1', rank_f1)\n print()\n return rank_prec, rank_recall, rank_f1", "def average_precision(ranking, references, atk=None):\n total, num_correct = 0.0, 0.0\n for k, prediction in enumerate(ranking[:atk], 1):\n if prediction in references:\n num_correct += 1\n total += num_correct / k\n return total / num_correct if total > 0 else 0.0", "def spam_indicator(text):\n # This function returns the spam indicator rounded to two decimals\n\n word_list = text.split() # Turning string into list\n unique_words = set(word_list) # Turning list into set\n shared_words = unique_words & SPAM_WORDS # Intersection of two sets\n shared_ratio = (len(shared_words)) / (len(unique_words)) # Finding ratio\n rounded_ratio = round(shared_ratio, 2) # Rounding ratio to two places\n return rounded_ratio # Return rounded ratio", "def score(self, link_text, page_title, body_text):\n\t\tdoc = metapy.index.Document()\n\t\tdoc.content(link_text + page_title + body_text)\n\t\tdocvec = self.fwdIndex.tokenize(doc)\n\t\tlabel = self.classifier.classify(docvec)\n\t\tif label == \"NewHome\":\n\t\t\treturn 1.0\n\t\telif label == \"NotNewHome\":\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn 0.5", "def popularity(self,train = None,test = None,k = 8,nitem = 10):\n train = train or self.traindata\n test = test or self.testdata\n item_popularity = dict()\n for user ,items in train.items():\n for item in items.keys():\n item_popularity.setdefault(item,0)\n item_popularity[item] += 1\n ret = 0\n n = 0\n for user in train.keys():\n rank = self.recommend(user, train, k = k, nitem = nitem)\n for item ,_ in rank.items():\n ret += math.log(1+item_popularity[item])\n n += 1\n return ret / (n * 1.0)", "def personalization(prediction, n):\n # prediction\n # n top n recommendation\n\n top_n = get_top_n(prediction, n)\n\n rec_dict = {}\n for uid, user_ratings in top_n.items():\n rec_dict[uid] = [iid for (iid, _) in user_ratings]\n\n rec_user_ls = [pred[0] for pred in prediction]\n rec_item_ls = [pred[1] for pred in prediction]\n\n unique_rec_user_ls = np.unique(rec_user_ls)\n unique_rec_item_ls = np.unique(rec_item_ls)\n\n # assign each item with index number\n unique_rec_item_dict = {item: ind for ind,\n item in enumerate(unique_rec_item_ls)}\n\n n_unique_rec_user = len(unique_rec_user_ls)\n n_unique_rec_item = len(unique_rec_item_ls)\n\n # recommended user item matrix\n rec_matrix = np.zeros(shape=(n_unique_rec_user, n_unique_rec_item))\n\n # represent recommended item for each user as binary 0/1\n for user in range(n_unique_rec_user):\n # get userid\n user_id = unique_rec_user_ls[user]\n # get rec item list\n item_ls = rec_dict[user_id]\n\n for item_id in item_ls:\n # get item index\n item = unique_rec_item_dict[item_id]\n rec_matrix[user, item] = 1\n\n # calculate cosine similarity matrix across all user recommendations\n similarity = cosine_similarity(X=rec_matrix, dense_output=False)\n # calculate average of upper triangle of cosine matrix\n upper_right = np.triu_indices(similarity.shape[0], k=1)\n # personalization is 1-average cosine similarity\n score = 1 - np.mean(similarity[upper_right])\n return score", "def rank(post):\n score = post.postlike_set.filter(post=post).count()\n order = log(max(score, 1), 10)\n\n seconds = seconds_since_epoch(post.creation_date) - SECONDS_SINCE_START\n\n post.rating = round(order + seconds / 45000, 7)", "def rate(self, neighbors, labels):\n num = 0\n den = 0\n for neighbor in neighbors:\n lable = self.labels[neighbor[1]]\n dest_to_neighbor = neighbor[0]\n num += lable / dest_to_neighbor\n den += 1 / dest_to_neighbor\n return num/den", "def rank_summaries(self, summary):\n summary_split = summary.split(\"@ highlight\")\n\n embedding_index = self.get_word_embeddings()\n sentence_vectors = []\n # get word count vector for each sentence\n for sentence in summary_split:\n words = nltk.word_tokenize(sentence)\n mean_vector_score = sum([embedding_index.get(\n word, np.zeros((100,))) for word in words])/len(words)\n sentence_vectors.append(mean_vector_score)\n\n # similarity matrix\n sim_matrix = self.get_similarity_matrix(sentence_vectors)\n # graph of matrix - retrieve a set of scores based on page rank algorithm\n pageRank_scores = self.get_graph(sim_matrix)\n # rank sentences based off scores and extract top one as the chosen sentence for training\n sent_scores = [(pageRank_scores[i], sent)\n for i, sent in enumerate(summary_split)]\n sent_scores = sorted(sent_scores, reverse=True)\n chosen_summary = sent_scores[0][1]\n return(chosen_summary)", "def predictRating(toPredict, candidateList):\n\n ratingRelevantCandidates = []\n\n #Remove candidates with no rating specified\n for candidate in candidateList:\n currentCandidate = candidate[1]\n\n if float(currentCandidate['vote_avg']) > 0:\n ratingRelevantCandidates.append((float(currentCandidate['vote_avg']), candidate))\n\n #print(\"ratings::::::::\",currentCandidate['vote_avg'])\n\n #Remove outlier candidates based on rating\n ratingMean = np.mean([x[0] for x in ratingRelevantCandidates])\n print(\"ratingMean\", ratingMean)\n ratingSD = np.std([x[0] for x in ratingRelevantCandidates])\n print(\"ratingSD\", ratingSD)\n\n finalRatings = [x for x in ratingRelevantCandidates if (float(x[0]) < ratingMean + ratingSD)]#1.5 *\n finalRatings = [x for x in finalRatings if (float(x[0]) > ratingMean - ratingSD)]#.75 *\n\n finalRatingCandidatesWithWeight = []\n\n #Weight each candidate based on vote count, direct and actor popularity and matching score from part 1\n for candidate in finalRatings:\n directorPoints = compareDirectorPoints(toPredict['director'], candidate[1][1]['director'])\n actorPoints = compareActorPoints(toPredict['cast'], candidate[1][1]['cast'])\n voteCountPoints = int(candidate[1][1]['vote_count'])\n matchPoints = candidate[1][0] / np.max([float(x[1][0]) for x in finalRatings]) * 100\n candidateWeight = PREDICTION_MATCHPOINTS_WEIGHT * matchPoints \\\n + PREDICTION_ACTOR_WEIGHT * actorPoints \\\n + PREDICTION_DIRECTOR_WEIGHT * directorPoints \\\n + PREDICTION_VOTECOUNT_WEIGHT * voteCountPoints\n\n finalRatingCandidatesWithWeight.append((candidateWeight, candidate[0]))\n\n #Calculate the prediction\n sumRatingCandidateWeights = np.sum([float(x[0]) for x in finalRatingCandidatesWithWeight])\n sumRatingTimesCandidateWeight = np.sum([float(x[0]) * float(x[1]) for x in finalRatingCandidatesWithWeight])\n\n ratingPrediction = float(sumRatingTimesCandidateWeight / sumRatingCandidateWeights)\n\n return ratingPrediction", "def analyze(self, text):\n #Check each word in text\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n total_score = 0\n #Sum the total score\n for token in tokens:\n token = token.lower()\n if token in self.positives:\n total_score = total_score + 1\n elif token in self.negatives:\n total_score = total_score - 1\n else:\n total_score = total_score + 0\n \n return total_score", "def analyze(self, text):\n #analize every word in the text a value -1, 1 or 0 and calculate total score\n #tokens allow us to split words in single tokens we can initialize tokens like this:\n\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text.lower())\n\n score = 0\n\n if tokens[0] in self.negatives:\n score =- 1\n elif tokens[0] in self.positives:\n score =+ 1\n else:\n score = 0\n\n #print('', text)\n\n return score", "def extract_review_rating(soup):\r\n notes = (\"One\", \"Two\", \"Three\", \"Four\", \"Five\" )\r\n review_rating = \"None\"\r\n section = soup.find(\"div\", attrs={\"class\": \"col-sm-6 product_main\"})\r\n for n in notes:\r\n note = \"star-rating \" + n\r\n if section.find(\"p\", attrs={\"class\": note}):\r\n review_rating = n \r\n return review_rating", "def precompute_scoring():\n global volume_void_inclusion\n global attract_point_distances\n global perlin_values\n \n volume_void_inclusion = []\n for i,void in enumerate(volumes_void):\n inclusion = gh.PointInBrep(void,points_input,False)\n volume_void_inclusion.append(inclusion)\n \n attract_point_distances = []\n for i,point in enumerate(points_attractor):\n distances = gh.Division(gh.Distance(point,points_input),max_dist)\n attract_point_distances.append(distances)", "def score_ngram(self, score_fn, w1, w2, w3):\n n_all = self.N\n n_iii = self.ngram_fd[(w1, w2, w3)]\n if not n_iii:\n return\n n_iix = self.bigram_fd[(w1, w2)]\n n_ixi = self.wildcard_fd[(w1, w3)]\n n_xii = self.bigram_fd[(w2, w3)]\n n_ixx = self.word_fd[w1]\n n_xix = self.word_fd[w2]\n n_xxi = self.word_fd[w3]\n return score_fn(n_iii, (n_iix, n_ixi, n_xii), (n_ixx, n_xix, n_xxi), n_all)", "def calculate(self):\n\n rating = 0\n\n props = ['aroma', 'appearance', 'taste', 'palate', 'bottle_style']\n for item in props:\n rating += getattr(self, item, 0)\n\n self.overall = (rating / self.total) / .2", "def __rank_topics(self, found_topics, explanation):\n max_value = 0\n scores = []\n for _,topic in found_topics.items():\n topic[\"score\"] = topic[\"times\"] * len(topic['grams'].keys())\n scores.append(topic[\"score\"])\n if topic[\"score\"] > max_value:\n max_value = topic[\"score\"]\n\n for _,topic in found_topics.items():\n if \"syntactic\" in topic:\n topic[\"score\"] = max_value\n\n\n\n\n # Selection of unique topics\n unique_topics = {}\n for t_p,topic in found_topics.items():\n prim_label = self.cso.get_primary_label_wu(t_p)\n if prim_label in unique_topics:\n if unique_topics[prim_label] < topic[\"score\"]:\n unique_topics[prim_label] = topic[\"score\"]\n else:\n unique_topics[prim_label] = topic[\"score\"]\n\n # ranking topics by their score. High-scored topics go on top\n sort_t = sorted(unique_topics.items(), key=lambda v: v[1], reverse=True)\n #sort_t = sorted(found_topics.items(), key=lambda k: k[1]['score'], reverse=True)\n\n\n # perform\n vals = []\n for t_p in sort_t:\n vals.append(t_p[1]) #in 0, there is the topic, in 1 there is the info\n\n\n #### suppressing some warnings that can be raised by the kneed library\n warnings.filterwarnings(\"ignore\")\n try:\n x_vals = range(1,len(vals)+1)\n t_kn = KneeLocator(x_vals, vals, direction='decreasing')\n if t_kn.knee is None:\n #print(\"I performed a different identification of knee\")\n t_kn = KneeLocator(x_vals, vals, curve='convex', direction='decreasing')\n except ValueError:\n pass\n\n ##################### Pruning\n\n try:\n knee = int(t_kn.knee)\n except TypeError:\n knee = 0\n except UnboundLocalError:\n knee = 0\n\n if knee > 5:\n try:\n knee += 0\n except TypeError:\n print(\"ERROR: \",t_kn.knee,\" \",knee, \" \", len(sort_t))\n\n else:\n try:\n if sort_t[0][1] == sort_t[4][1]:\n top = sort_t[0][1]\n test_topics = [item[1] for item in sort_t if item[1]==top]\n knee = len(test_topics)\n\n else:\n knee = 5\n except IndexError:\n knee = len(sort_t)\n\n final_topics = []\n final_topics = [self.cso.get_topic_wu(sort_t[i][0]) for i in range(0,knee)]\n self.reset_explanation()\n self.explanation = {self.cso.topics_wu[sort_t[i][0]]: explanation[sort_t[i][0]] for i in range(0,knee)}\n\n return final_topics", "def vanilaScore(self,attended,state,W):", "def get_score(p):\n temp = path[round(p[0], 1), round(p[1], 1)] / a_star\n return (clip(1 - temp, a_min=0, a_max=1) + clip(1 - temp, a_min=0, a_max=1) ** 2) / 2" ]
[ "0.5887334", "0.57918924", "0.57401806", "0.5696041", "0.56602454", "0.56410843", "0.5623116", "0.5621459", "0.55914843", "0.5587887", "0.55824214", "0.55516285", "0.553804", "0.55314225", "0.5495516", "0.54903185", "0.5481853", "0.5480828", "0.54778075", "0.54706895", "0.54648983", "0.5460723", "0.54587245", "0.5458427", "0.5456214", "0.54553264", "0.5433163", "0.5432685", "0.54266787", "0.5416248", "0.5409317", "0.5397407", "0.53945315", "0.539405", "0.53821677", "0.53809226", "0.53783894", "0.5366531", "0.53614503", "0.5338938", "0.5336174", "0.53234696", "0.53212833", "0.5319629", "0.53153664", "0.5312804", "0.53105646", "0.5304119", "0.53002703", "0.5273596", "0.52677107", "0.5266785", "0.52623844", "0.52615654", "0.5256613", "0.5256466", "0.5256104", "0.525311", "0.5249179", "0.52487886", "0.5248516", "0.5248055", "0.52421707", "0.52421254", "0.5237598", "0.52352726", "0.52344453", "0.52320546", "0.52275765", "0.5218154", "0.52178425", "0.5200862", "0.5192675", "0.5186697", "0.51820153", "0.51815313", "0.5175696", "0.5173859", "0.5168416", "0.5166813", "0.5166523", "0.51643497", "0.5163055", "0.5157964", "0.5151778", "0.5146846", "0.5145654", "0.5141223", "0.5140551", "0.5135208", "0.51341397", "0.51331735", "0.5129199", "0.51230407", "0.5119216", "0.51182044", "0.51154745", "0.510923", "0.5097821", "0.50931895" ]
0.5987914
0
Counts how many times each PPT layout is used. Returns the total number of interactive layouts, plus a dictionary of the layout counts.
def count_layouts(prs:Presentation) -> Tuple[int, Dict[str, int]]: layouts = collections.defaultdict(int) layouts_interactive = 0 for slide in prs.slides: layouts[slide.slide_layout.name] += 1 if slide.slide_layout.name in INTERACTIVE: layouts_interactive += 1 return (layouts_interactive, layouts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNumLayouts(self):\n return _libsbml.LayoutModelPlugin_getNumLayouts(self)", "def pobj_counts(pcode_obj):\n pcode = (pcode_obj.asDict())['pcode'][0] # no multiple pcode blocks - no delimiter\n counts = {'galleries': 0, 'spreads': 0, 'layouts': 0, 'panelgroups': 0}\n # , 'panels': 0, 'skips': 0 }\n galleries = pcode.pop('gallery', '')\n counts['galleries'] = len(galleries)\n for gallery in galleries:\n spreads = gallery.pop('spread', '')\n counts['spreads'] += len(spreads)\n for spread in spreads:\n layouts = spread.pop('layout', '')\n counts['layouts'] += len(layouts)\n for layout in layouts:\n panelgroups = layout.pop('panelgroup', '')\n counts['panelgroups'] += len(panelgroups)\n return counts", "def get_available_layouts(self):\n\n return self._layout_infos.iterkeys()", "def summarize(self) -> Mapping[str, int]:\n return dict(\n compounds=self.count_compounds(),\n side_effects=self.count_side_effects(),\n indications=self.count_indications(),\n umls=self.count_umls(),\n )", "def get_usage_count(equations):\n usage_count = {}\n for eq in equations:\n usage_count.setdefault(eq.lhs, 0)\n for var in eq.rhs.atoms(Variable):\n usage_count.setdefault(var, 0)\n usage_count[var] += 1\n return usage_count", "def _count_shapes(self, shape_data : dict) -> dict:\n shape_count = {}\n for item in shape_data:\n item_shape_type = item.get('shapeType')\n if item_shape_type not in shape_count:\n shape_count[item_shape_type] = 1\n else:\n shape_count[item_shape_type] += 1\n return shape_count", "def getNumGrids(self):\n c = list(self.gridVars.keys())\n return len(list(self.gridVars[c[0]].values()))", "def numberActivities(self):\n if self.use_dic:\n nb_data = self.dic.keys()\n nb_act = (self.dic[nb_data[0]]).keys()\n return len(nb_data)*len(nb_act)\n else:\n return -1", "def counts(self):\n\n counts = defaultdict(int)\n\n for i, geom in zip(self.tree_ids, self.tree):\n point_int = list(self.sindex.intersection(geom.bounds))\n if point_int:\n counts[i] += len(point_int)\n\n return dict(counts)", "def pdelements_count(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(0), ctypes.c_int32(0))", "def get_all_dataset_counts(\n self,\n ) -> Dict[Tuple[str, int, int], int]:\n res = self._engine.execute(\n select(\n [\n PRODUCT.c.name,\n TIME_OVERVIEW.c.start_day,\n TIME_OVERVIEW.c.period_type,\n TIME_OVERVIEW.c.dataset_count,\n ]\n )\n .select_from(TIME_OVERVIEW.join(PRODUCT))\n .where(TIME_OVERVIEW.c.product_ref == PRODUCT.c.id)\n .order_by(\n PRODUCT.c.name, TIME_OVERVIEW.c.start_day, TIME_OVERVIEW.c.period_type\n )\n )\n\n return {\n (\n r.name,\n *TimePeriodOverview.from_flat_period_representation(\n r.period_type, r.start_day\n )[:2],\n ): r.dataset_count\n for r in res\n }", "def detect_layouts(self, lscpu_output=None):\n stdout = self.detect_layout_lscpu(lscpu_output)\n rows = [row for row in stdout.strip().split('\\n') if not row.startswith('#')]\n layouts = [list(map(any2int, row.split(',')[2:4])) for row in rows]\n numa_layout, socket_layout = zip(*layouts)\n self.numa_layout = dict(enumerate(numa_layout))\n self.socket_layout = dict(enumerate(socket_layout))", "def get_layout_names(base_url=DEFAULT_BASE_URL):\n res = commands.cyrest_get('apply/layouts', base_url=base_url)\n return res", "def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res", "def size(self, level=None):\n level = level or self.local_variables\n names = {}\n while level:\n for name in level.bindings:\n names[name] = 1\n level = level.parent\n return len(names)", "def _get_as_dict_count(self):\n counter = Counter()\n for product in self.products:\n counter[product.id] += 1\n return counter", "def retrieve_panelist_appearance_counts(panelist_id: int,\n database_connection: mysql.connector.connect\n ) -> List[Dict]:\n\n cursor = database_connection.cursor()\n query = (\"SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count \"\n \"FROM ww_showpnlmap pm \"\n \"JOIN ww_shows s ON s.showid = pm.showid \"\n \"JOIN ww_panelists p ON p.panelistid = pm.panelistid \"\n \"WHERE pm.panelistid = %s AND s.bestof = 0 \"\n \"AND s.repeatshowid IS NULL \"\n \"GROUP BY p.panelist, YEAR(s.showdate) \"\n \"ORDER BY p.panelist ASC, YEAR(s.showdate) ASC\")\n cursor.execute(query, (panelist_id, ))\n result = cursor.fetchall()\n\n if not result:\n return None\n\n appearances = OrderedDict()\n total_appearances = 0\n for row in result:\n appearances[row[0]] = row[1]\n total_appearances += row[1]\n\n appearances[\"total\"] = total_appearances\n return appearances", "def getNumberOfViews(self) -> int:\n ...", "def get_paper_counter_per_topic_id(all_topic_assignments):\n counter = {}\n for topic_assignment in all_topic_assignments:\n for topic_index, topic_value in topic_assignment:\n if topic_index not in counter:\n counter[topic_index] = 0\n\n counter[topic_index] += 1\n\n return counter", "def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret", "def active_type_counts(self):\n names = self.visible()\n return {\n 'total': names.count(),\n 'personal': len([n for n in names if n.is_personal()]),\n 'organization': len([n for n in names if n.is_organization()]),\n 'event': len([n for n in names if n.is_event()]),\n 'software': len([n for n in names if n.is_software()]),\n 'building': len([n for n in names if n.is_building()])\n }", "def get_number_of_measurement(self):\n used_fragments = set()\n counter = 0\n for fragment in self.observed_fragments:\n num_of_isotope = 0\n used_counter = 0\n for i in self.mdv[fragment]:\n num_of_isotope = num_of_isotope + 1\n if self.mdv[fragment][i]['use'] == 'use':\n\n counter = counter + 1\n used_counter = used_counter + 1\n if num_of_isotope == used_counter:\n used_fragments.add(fragment)\n return counter-len(used_fragments)", "def count(self):\n count = {}\n\n for path, lines in self.lines_added.items():\n count[path] = count.get(path, 0) + sum(lines)\n\n for path, lines in self.lines_removed.items():\n count[path] = count.get(path, 0) + sum(lines)\n\n return count", "def numProcs(reportname):\n with open(reportname, \"rb\") as f:\n data = json.load(f)\n numProcesses = len(data[\"behavior\"][\"processes\"])\n return numProcesses", "def count(self):\n return {'count': self.collection.count()}", "def graph_count(self) -> int:\n return int(self.graph_tuple_stats.graph_count)", "def test_collect_incident_layouts_dependencies(self, module_repo):\n expected_result = {(\"PrismaCloudCompute\", True)}\n\n test_input = [\n {\n \"Dummy Layout\": {\n \"typeID\": \"dummy_layout\",\n \"name\": \"Dummy Layout\",\n \"pack\": \"dummy_pack\",\n \"kind\": \"edit\",\n \"path\": \"dummy_path\",\n \"incident_and_indicator_types\": [\n \"MITRE ATT&CK\",\n \"Prisma Cloud Compute Cloud Discovery\",\n ],\n \"incident_and_indicator_fields\": [\n \"indicator_adminname\",\n \"indicator_jobtitle\",\n ],\n }\n }\n ]\n\n found_result = PackDependencies._collect_layouts_dependencies(\n pack_layouts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n assert set(found_result) == set(expected_result)", "def count(self):\r\n\r\n return len(self.widgets_list)", "def get_count_of_controls(self, recurse: bool) -> int:\n return len(list(self.get_all_controls(recurse)))", "def count_modes(self):\n comment_count = self.db.fetchall(\n 'SELECT mode, COUNT(comments.id) FROM comments '\n 'GROUP BY comments.mode')\n\n logger.info(\"Comment count is %s\", comment_count)\n return dict(comment_count)", "def count_indications(self) -> int:\n return self._count_model(Indication)", "def get_num_of_containers(self):\n Container.num_of_cntnrs = len(Container.containers)\n return self.num_of_cntnrs", "def getViewPortAppCount(self):\n logger.debug('Getting map view port app count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.dstCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def number_of_sections(self):\n #print (len(self.config.sections()))\n return len(self.config.sections())", "def get_num_displayed_responses(self):\r\n return len(self._find_within(\".discussion-response\"))", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def number_objects():\n classes = [Amenity, City, Place, Review, State, User]\n names = [\"amenities\", \"cities\", \"places\", \"reviews\", \"states\", \"users\"]\n\n num_objs = {}\n for i in range(len(classes)):\n num_objs[names[i]] = storage.count(classes[i])\n\n return jsonify(num_objs)", "def total_ports(self):\n return len(self.port_extension_map.keys())", "def getNumViews(self):\n\n # Compute number of views of each 2D points\n self.num_views = np.sum( np.sum(self.pts2D, axis = 0) != 0, 1 )\n return self.num_views", "def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict", "def count_explorations():\n return exp_models.ExplorationModel.get_exploration_count()", "def getNumDimensions(self):\n return len(self.di.keys())", "def enumerate_viewports(self,*args):\n schema=\"org.compiz.core\"\n path=\"/org/compiz/profiles/unity/plugins/core/\"\n keys=['hsize','vsize']\n screen = Gdk.Screen.get_default()\n screen_size=[screen.get_width(),screen.get_height()]\n grid=[int(str(self.gsettings_get(schema,path,key))) for key in keys]\n x_vals=[screen_size[0]*x for x in range(0,grid[0])]\n y_vals=[screen_size[1]*x for x in range(0,grid[1])]\n \n viewports=[(x,y) for y in y_vals for x in x_vals ]\n viewports_dict = OrderedDict()\n for ix,vp in enumerate(viewports,1):\n viewports_dict[vp] = ix\n return viewports_dict", "def gives_stats():\n dict_count = {\n \"amenities\": storage.count(Amenity),\n \"cities\": storage.count(City),\n \"places\": storage.count(Place),\n \"reviews\": storage.count(Review),\n \"states\": storage.count(State),\n \"users\": storage.count(User)\n }\n return jsonify(dict_count)", "def get_number_of_items(self):\n return len(self.__item_map)", "def test_collect_indicator_layouts_dependencies(self, module_repo):\n expected_result = {\n (\"FeedMitreAttack\", True),\n (\"CommonTypes\", True),\n (\"CrisisManagement\", True),\n }\n\n test_input = [\n {\n \"Dummy Layout\": {\n \"typeID\": \"dummy_layout\",\n \"name\": \"Dummy Layout\",\n \"pack\": \"dummy_pack\",\n \"kind\": \"indicatorsDetails\",\n \"path\": \"dummy_path\",\n \"incident_and_indicator_types\": [\n \"MITRE ATT&CK\",\n \"Prisma Cloud Compute Cloud Discovery\",\n ],\n \"incident_and_indicator_fields\": [\n \"indicator_adminname\",\n \"indicator_jobtitle\",\n ],\n }\n }\n ]\n\n found_result = PackDependencies._collect_layouts_dependencies(\n pack_layouts=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)", "def getLayoutDimensions(n, pref=\"height\"):\n nopt = np.sqrt(n)\n inoptw = int(nopt)\n inopth = int(nopt)\n while inoptw * inopth < n:\n if pref == \"width\":\n inoptw += 1\n if inoptw * inopth > (n - inopth):\n inoptw -= 1\n inopth += 1\n else:\n inopth += 1\n if inoptw * inopth > (n - inoptw):\n inopth -= 1\n inoptw += 1\n\n return (inopth, inoptw)", "def operation_counts(self) -> Dict[int, Dict[str, int]]:\n return self._operation_counts", "def GetToolCount(self):\r\n\r\n return len(self._items)", "def count(wrd):\n ltrs = {}\n for i in wrd:\n ltrs[i] = wrd.count(i)\n return ltrs", "def layout_method_mapper(self):\n return {\n \"kamada_kawai_layout\": kamada_kawai_layout,\n \"fruchterman_reingold_layout\": fruchterman_reingold_layout,\n \"spectral_layout\": spectral_layout,\n }", "def number_of_sections(self):\n sections = self.config.sections()\n return len(sections)", "def _tally_limits(self, limits, elements, connections=None):\n counts = {}\n for x in limits:\n ele = elements[x]\n if self.use_coordination:\n ele += str(len(connections[x]))\n if ele not in counts:\n counts[ele] = 0\n counts[ele] += 1\n return counts", "def slidelayouts(self):\n return self.__slidelayouts", "def count(self):\n return len(self._components)", "def get_count(self, asset=None):\n if asset is None or 'pc:count' not in asset.properties:\n return self.item.properties.get('pc:count')\n else:\n return asset.properties.get('pc:count')", "def GetNumberOfVariables(self):\n\n # nvar = 0\n # for i in self.variables_order:\n # # DO NOT COUNT VARIABLES THAT GET CONDENSED OUT\n # if i!=0:\n # if mesh.element_type == \"tri\":\n # nvar += (i+1)*(i+2) // 2\n # elif mesh.element_type == \"tet\":\n # nvar += (i+1)*(i+2)*(i+3) // 6\n # elif mesh.element_type == \"quad\":\n # nvar += (i+1)**2\n # elif mesh.element_type == \"hex\":\n # nvar += (i+1)**3\n\n # nvar = sum(self.variables_order)\n if self.nvar == None:\n self.nvar = self.ndim\n return self.nvar", "def count_collections(self, size=None, constraints=None):\n if size is None:\n size = self.total\n\n if constraints is not None:\n constraints = constraints.replace('\\n', ' ')\n\n x = CollectionConstraintHandler(constraints or '', self, size)\n\n return x.solution", "def DictFunction3():\r\n print \"Create Third Dictionary\"\r\n Dictionary3 = {key:value.count(\"a\") for key, value in food_prefs.iteritems()}\r\n print Dictionary3", "def clothing_type_count(clothes_list):\n types_count = {}\n for garment in clothes_list:\n if garment.db.clothing_type:\n type = garment.db.clothing_type\n if type not in types_count.keys():\n types_count[type] = 1\n else:\n types_count[type] += 1\n return types_count", "def get_total_counts(self):\n ret = {}\n all_loggers_count = 0\n for logger, name_map in self.acc_map.items():\n cur_logger_count = 0\n ret[logger.name] = {}\n for name, status_map in name_map.items():\n cur_name_count = 0\n ret[logger.name][name] = {}\n for status, acc in status_map.items():\n cur_count = acc.total_count\n ret[logger.name][name][status] = cur_count\n cur_name_count += cur_count\n cur_logger_count += cur_count\n all_loggers_count += cur_count\n ret[logger.name][name]['__all__'] = cur_name_count\n ret[logger.name]['__all__'] = cur_logger_count\n ret['__all__'] = all_loggers_count\n return ret", "def dimensions(self):\n d=dict()\n d['div'] = (self._div)\n d['var'] = len(self.used_variables)\n d['x'] = self.Xdim\n d['y'] = self.Ydim\n d['lev'] = self.lev\n d['dir'] = self._nb_dir\n return(d)", "def view_counts():\n out = {}\n for i in range(len(classes)):\n out.update({decoded[i]: storage.count(classes[i])})\n return out", "def _get_comment_counts(account, patchset):\n # A key-only query won't work because we need to fetch the patch key\n # in the for loop further down.\n comment_query = models.Comment.query(ancestor=patchset.key)\n\n # Get all comment counts with one query rather than one per patch.\n comments_by_patch = {}\n drafts_by_patch = {}\n for c in comment_query:\n pkey = c.patch_key\n if not c.draft:\n comments_by_patch[pkey] = comments_by_patch.setdefault(pkey, 0) + 1\n elif account and c.author == account.user:\n drafts_by_patch[pkey] = drafts_by_patch.setdefault(pkey, 0) + 1\n\n return comments_by_patch, drafts_by_patch", "def _get_comment_counts(account, patchset):\n # A key-only query won't work because we need to fetch the patch key\n # in the for loop further down.\n comment_query = models.Comment.query(ancestor=patchset.key)\n\n # Get all comment counts with one query rather than one per patch.\n comments_by_patch = {}\n drafts_by_patch = {}\n for c in comment_query:\n pkey = c.patch_key\n if not c.draft:\n comments_by_patch[pkey] = comments_by_patch.setdefault(pkey, 0) + 1\n elif account and c.author == account.user:\n drafts_by_patch[pkey] = drafts_by_patch.setdefault(pkey, 0) + 1\n\n return comments_by_patch, drafts_by_patch", "def count(self):\n return len(self.wallpapers)", "def build_rule_count_dict(counts_iterator):\n rule_count_dict = {}\n for l in counts_iterator:\n if l[1] != 'NONTERMINAL':\n x = l[2]\n y = l[1] == 'UNARYRULE' and l[3] or l[3] + ' ' + l[4]\n # if l[1] == 'UNARYRULE':\n # y = l[3]\n # else: # l[1] == 'BINARYRULE'\n # y = l[3] + ' ' + l[4]\n if x not in rule_count_dict:\n rule_count_dict[x] = {}\n rule_count_dict[x][y] = int(l[0])\n return rule_count_dict", "def counts(self) -> dict:\n return Counter(self.sequence)", "def parse_layout(layout):\n global index \n for lt_obj in layout:\n print(lt_obj.__class__.__name__)\n print(lt_obj.bbox)\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n print(lt_obj.get_text())\n d[lt_obj.get_text().strip()]=(index,lt_obj.bbox)\n index+=1\n elif isinstance(lt_obj, LTFigure):\n parse_layout(lt_obj) # Recursive", "def getNumberOfPivotPointKeys(self, view) -> int:\n ...", "def dictCount(aList: list) -> dict:\n d = defaultdict(int)\n for elm in aList:\n d[elm] += 1\n\n return d", "def split_counts(self) -> Dict[int, int]:\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts", "def apply(self):\n counter = {}\n for act in self.activities:\n freq = []\n for trace in self.log:\n freq.append(len(self.project_trace(trace, [act])))\n if not len(freq) == 0:\n counter[act] = {'sum': sum(freq), 'min': min(freq),\n 'max': max(freq)}\n return counter", "def StateCounts(self):\r\n\t\treturn self._get_attribute('stateCounts')", "def module_count(self):\n return self._module_count", "def doc_lengths(self):\n return dict(zip(self.keys(), map(len, self.values())))", "def dimension_count(self):\n return self._dimensionCount", "def get_number_of_elements(self):\n if self.page.paginator.count < int(self.page.number) * self.page_size:\n show = self.get_shows()\n\n return \"{} - {}\".format(show, self.page.paginator.count)\n else:\n show = self.get_shows()\n return \"{} - {}\".format(show, self.get_page_range())", "def stats():\n class_counts = {}\n convert_dict = {\n 'Amenity': 'amenities',\n 'State': 'states',\n 'City': 'cities',\n 'User': 'users',\n 'Place': 'places',\n 'Review': 'reviews'\n }\n\n for _class in convert_dict.keys():\n class_counts[convert_dict[_class]] = storage.count(_class)\n\n return jsonify(class_counts)", "def counts(sequence):\n # initialize the countainer\n count = defaultdict(int)\n # iterates through sequence elements\n for item in sequence:\n # if element not in counts add 0\n # else add 1\n count[item] = count.get(item, 0) + 1\n return dict(count)", "def get_num_carn_landscape(self):\n return len(self.carn_pop)", "def __used(self):\n tot=0\n assign={}\n for c in self.assigned :\n if not assign.has_key(c.start) :\n assign[c.start]=c.end\n tot+=c.end-c.start+1\n return tot", "def make_layout(self):\n\n for h in range(0, self.num_layout_heads):\n self.set_random_layout(h)\n self.set_sliding_window_layout(h)\n self.set_global_layout_itc(h)\n\n self.check_and_propagate_first_head_layout()\n return self.layout", "def Counts(dict_of_list):\n return {k: len(v) for k, v in dict_of_list.iteritems()}", "def count_package(self, count_func=count_package_function):\n count = count_func(self)\n return count", "def nprograms(self):\n return len(self.__programs)", "def get_counts(area_group):\n # An area_group ID of -1 means there are no other routes in the area\n if area_group.name == -1:\n area_group['area_counts'] = 1\n else:\n area_group['area_counts'] = len(area_group)\n\n return area_group", "def review_counts(stat_info_dict):\n review_counts = {}\n for release, stat_dict in stat_info_dict.items():\n review_counts_per_release = {}\n for key, stat in stat_dict.items():\n # review count\n review_counts_per_release[key] = stat['metric']\n review_counts[release] = review_counts_per_release\n return review_counts", "def count(seats: List[str]) -> int:\n # Map dimensions\n m = len(seats)\n n = len(seats[0]) if m else 0\n \n count = 0\n \n # Count locations filled with \"#\"\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"#\":\n count += 1\n\n return count", "def get_attribute_counts(self):\n counts = defaultdict(int)\n for attr in self:\n counts[attr.name] += 1\n\n return dict(counts)", "def _get_num_proposals(self):\n total_props = self._df['nprops'].sum()\n return total_props", "def num_polys(self):\n ret_val = self._num_polys()\n return ret_val", "def var_count(self, kind):\n return self.counter[kind]", "def count_freq(self, types=1):\n count_dict = {}\n if types == 1:\n for cat in self.categories:\n num_images = sum(\n [1 for i in self.data['annotations'] if i['category_id'] == self.cats_idx[cat]])\n count_dict[cat] = num_images\n elif types == 2:\n pass\n\n return count_dict", "def count_ops(self, visual=None):\n from .function import count_ops\n return count_ops(self, visual)", "def getDimensions(self):\n return _libsbml.Layout_getDimensions(self)", "def get_layout_name_mapping(base_url=DEFAULT_BASE_URL):\n layout_names = get_layout_names(base_url=base_url)\n layout_mapping = {}\n\n # get the full name of a layout and create {fullname:layoutname} in dictionary\n for layout_name in layout_names:\n res = commands.cyrest_get('apply/layouts/' + layout_name, base_url=base_url)\n layout_mapping.update({res['longName']: layout_name})\n\n return layout_mapping", "def count(self):\n return len([i for i in self.iteritems()])", "def count_vars(scope=''):\n v = get_vars(scope)\n return sum([np.prod(var.shape.as_list()) for var in v])" ]
[ "0.6073172", "0.60642576", "0.5786426", "0.57859993", "0.57683474", "0.5574106", "0.54529566", "0.5449059", "0.539238", "0.53353953", "0.5332541", "0.53282636", "0.53045416", "0.52998465", "0.5298258", "0.52905923", "0.5276604", "0.52639884", "0.5256031", "0.52523476", "0.52348846", "0.5231947", "0.5190944", "0.5173385", "0.5165457", "0.5147762", "0.51390517", "0.5135757", "0.51283187", "0.5127755", "0.5124647", "0.5115731", "0.50815123", "0.5056241", "0.5043431", "0.50328296", "0.50328296", "0.50297594", "0.5028273", "0.50193065", "0.5014547", "0.50133044", "0.5012441", "0.50109696", "0.50104564", "0.5007304", "0.50070447", "0.5003208", "0.500274", "0.4992527", "0.4982976", "0.49819764", "0.4964761", "0.49589533", "0.4958521", "0.49519485", "0.49511096", "0.4947001", "0.49382532", "0.49355763", "0.4933019", "0.49321595", "0.49305415", "0.4925915", "0.49214825", "0.49214825", "0.49182057", "0.4916927", "0.4915733", "0.49135065", "0.48977828", "0.4895348", "0.48943865", "0.4877426", "0.4872376", "0.48721117", "0.4869421", "0.48673186", "0.48655787", "0.48547676", "0.4847034", "0.4846376", "0.48450974", "0.48428378", "0.48417816", "0.4832518", "0.48243862", "0.48206893", "0.48199373", "0.48198172", "0.4818528", "0.48162782", "0.48157838", "0.48149827", "0.48075885", "0.4806254", "0.4796993", "0.47922918", "0.47907436", "0.4789384" ]
0.8039856
0
Count the amount of text in each slide.
def get_slide_analytics_new(slides) -> List[int]: word_count = [] for slide in slides: print(slide) words = 0 for shape in slide.shapes: if not shape.has_text_frame: continue print(shape.name) for paragraph in shape.text_frame.paragraphs: for run in paragraph.runs: print(" " + run.text) words += len(run.text.split()) word_count.append(words) return word_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_word_counts(slides) -> List[int]:\n word_count = []\n for slide in slides:\n # print(f\"========== slide {len(text_count)+1} ========== [{slide.slide_layout.name}]\")\n words = 0\n # find all text\n for shape in slide.shapes:\n if not shape.has_text_frame:\n continue\n # print(shape.name)\n for paragraph in shape.text_frame.paragraphs:\n for run in paragraph.runs:\n # print(\" \" + run.text)\n words += len(run.text.split())\n word_count.append(words)\n return word_count", "def count_passages(self, step, count):\r\n count = int(count)\r\n assert_equals(len(world.css_find('.annotatable-span')), count)\r\n assert_equals(len(world.css_find('.annotatable-span.highlight')), count)\r\n assert_equals(len(world.css_find('.annotatable-span.highlight-yellow')), count)", "def total_slides(path):\n # print(\"CALLING.. total_slides\")\n prs = Presentation(path)\n tot_slides = len(prs.slides._sldIdLst)\n return tot_slides", "def count(text):\n return len(text)", "def paragraph_count(self, doc):\n\n paragraphs = doc.split(\"\\n\\n\")\n # remove the empty string\n return len([paragraph for paragraph in paragraphs if paragraph])", "def words(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words = number_of_words + len(list(i.text.split()))\n return number_of_words", "def count_paragraphs(all_articles):\n total_paragraphs = 0\n for title in all_articles:\n total_paragraphs += all_articles[title]['content'].count('\\n')\n print(f\"There are {total_paragraphs} paragraphs written.\")", "def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)", "def paragraphs(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}')+1)]\n number_of_paragraphs = len(list(root.iter(root_tag + 'p')))\n return number_of_paragraphs", "def total_words(self):\n return len(strip_tags('%s %s' % (self.lead, self.content)).split())", "def count():", "def count_words(all_articles):\n total_words = 0\n for title in all_articles:\n total_words += all_articles[title]['word-count']\n print(f\"There are {total_words} words written.\")", "def text_count(self, text):\n res = 0\n for intv in self:\n if intv._text == text:\n res += 1\n return res", "def get_about_count_results(soup):\n title = soup.find('div', {'id': 'gs_ab_md'})\n if title:\n title = title.find('div', {'class': 'gs_ab_mdw'})\n if title:\n count_papers = title.text\n if count_papers:\n count_papers = count_papers.split(' ')[1].replace(',', '')\n else:\n count_papers = len(soup.find_all('h3', class_=\"gs_rt\"))\n try:\n int(count_papers)\n except:\n count_papers = title.text.split(' ')[0].replace(',', '')\n else:\n count_papers = len(soup.find_all('h3', class_=\"gs_rt\"))\n return int(count_papers)", "def displayed_words(self):\n return (len(strip_tags(self.preview).split()) -\n (len(self.more_string.split()) * int(not bool(self.lead))))", "def sentence_count(self):\n count = 0\n for line in self.lines:\n if '.' in line:\n count += 1\n if count == 0:\n count = 1\n return count\n #return line.count('.')\n #else:\n #return 1", "def counts(self, regex = \"\\w+\"): \n tokenizer = RegexpTokenizer(r'{}'.format(regex))\n count = []\n for i in tqdm(self.text):\n count.append(len(tokenizer.tokenize(i)))\n return count", "def count(self, word):\n pass", "def get_number_of_paragraph(self):\n file_to_read = f'{self.path}/{self.filename}'\n file = open(file_to_read, 'r', encoding='utf-8')\n string_to_match = '<p>'\n count = 0\n for line in file:\n if string_to_match in line:\n count += 1\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_paragraph', count)\n print(datetime.now(), '-', 'number_of_paragraph for', self.filename, 'calculated =', count)\n return None", "def __count_text(text, limit=None):\n\n count = 0\n is_text = True\n for i, c in enumerate(text):\n if is_text and c == '\\33':\n is_text = False\n\n if is_text:\n count += 1\n if limit is not None and count == limit:\n return i + 1\n\n if not is_text and c == 'm':\n is_text = True\n\n if limit is not None:\n return len(text)\n else:\n return count", "def get_number_of_elements(self):\n if self.page.paginator.count < int(self.page.number) * self.page_size:\n show = self.get_shows()\n\n return \"{} - {}\".format(show, self.page.paginator.count)\n else:\n show = self.get_shows()\n return \"{} - {}\".format(show, self.get_page_range())", "def count(app, status):\n item = app.tv.selection()[0]\n\n def count_children(item):\n children = app.tv.get_children(item)\n return len(children) + sum(count_children(child) for child in children)\n\n status.config(text=f'{count_children(item)} descendants')", "def get_counts(self):\n value = self.text_ctrl.GetValue()\n chars = len(value)\n words = len(re.findall('\\w+', value))\n pub.sendMessage('update_counts', chars=chars, words=words)", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def get_lenght(text):\n return range(len(Articles.split(text)))", "def __emphasis(self, title, text):\n title_counts = collections.Counter(title)\n text_counts = collections.Counter(text)\n text_count = 0\n title_count = 0\n exclamatory = ('?', '!')\n for k in exclamatory:\n if title_counts[k] is not None:\n title_count += title_counts[k]\n if text_counts[k] is not None:\n text_count += text_counts[k]\n return text_count, title_count", "def total_exs(dataset):\n total = 0\n for article in dataset['data']:\n for para in article['paragraphs']:\n total += len(para['qas'])\n return total", "def testSectionCount(self):\n\n self.sectionCount(3640)", "def word_count(self):\n return len(self.text)", "def count_sonata(self):\n return self.run_query(\"count( /mediawiki/page[starts-with (title, 'Sonata') ] )\")", "def count_words(filename):", "def count_articles(all_articles):\n print(f\"There are {len(all_articles)} articles.\")", "def render_words_count(request):\n count = 0\n try:\n count = sum([len(d.body.split(None)) for d in Devotional.objects.all()])\n except:\n pass\n\n return render_to_response('devotional/view_word_count.html',\n {'count': count},\n context_instance=RequestContext(request))", "def hives_count(self) -> int:\n return self.hives.count()", "def total_syllables(target_text):\n\n splited_text = target_text.split()\n count = 0\n for word in splited_text:\n count = count + word_syllables(word)\n return count", "def testArticleCount(self):\n\n self.articleCount(17)", "def total_words(target_text):\n\n splited_text = target_text.split()\n nbwords = len(splited_text)\n return nbwords", "def test_counts(self):\n lines, words, chars = analyze_text(self.filename)\n self.assertEqual(lines, 4)\n self.assertEqual(words, 8)\n self.assertEqual(chars, 36)", "def counter(self) -> int:", "def counter(self) -> int:", "def getCount(self):\n return _osgAnimation.Target_getCount(self)", "def count_search_results(self):\n raw_text = self.driver.find_element(*self.HEADING_COUNTER).text\n num = re.findall(r'\\d+', raw_text) \n return int(num[0])", "def sections(self) -> int:\n return len(self.string.split(\".\"))", "def count(self):\n # TODO not implemented yet\n return 0", "def count(self):\n\n raise NotImplementedError", "def amount_nouns_and_numerals_spacy(self) -> int:\n #choose language\n if self.lang == 'en':\n lang_for_spacy = 'en_core_web_sm'\n elif self.lang == 'de':\n lang_for_spacy = 'de_core_news_sm'\n elif self.lang == 'fr':\n lang_for_spacy = 'fr_core_news_md'\n nlp = spacy.load(lang_for_spacy)\n doc = nlp(self.sent)\n for word in doc:\n #if the part of speech is a noun, a proper noun or a numeral \n #(only for en) \n if self.lang == 'en':\n if word.pos_ == 'NOUN' or word.pos_ == 'PROPN' or word.pos_ == 'NUM':\n self.amount_nouns_and_num += 1\n elif self.lang == 'de' or self.lang == 'fr':\n if word.pos_ == 'NOUN' or word.pos_ == 'PROPN':\n self.amount_nouns_and_num += 1\n return self.amount_nouns_and_num", "def main_func(sources):\n art_count = 0\n word_count = 0\n for source in sources:\n titles = get_articles(source)\n art_count += len(titles)\n word_count += count_word('trump', titles)\n\n return (word_count, art_count)", "def wordCount(document):\n return float(len(document.split(None)))", "def test_run():\r\n print(count_words(\"cat bat mat cat bat cat\", 3))\r\n print(count_words(\"betty bought a bit of butter but the butter was bitter\", 3))", "def count(self):\n return len(self.find())", "def slider(self):\n\n if self.count >= len(self.txt):\n self.count = -1\n self.text = ''\n self.heading.config(text=self.text)\n\n else:\n self.text = self.text + self.txt[self.count]\n self.heading.config(text=self.text)\n self.count += 1\n\n self.heading.after(100, self.slider)", "def text_cond_count(self, condition):\n res = 0\n for intv in self:\n if condition(intv._text):\n res += 1\n return res", "def count(self):\n return self.ming_cursor.count()", "def count_containers(lines: Lines) -> int:\n rules = parse_rules(lines)\n allowed_containers = containers(\"shiny gold\", rules)\n assert allowed_containers is not None\n return len(allowed_containers) - 1", "def num_words():\n # Load the GT.\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n stats = {\n \"T\": {\"words\": [], \"duration\": []},\n \"P\": {\"words\": [], \"duration\": []},\n \"sess\": {\"words\": [], \"duration\": []},\n }\n\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n stats[\"P\"][\"words\"].append(float(row[\"gt_patient_num_words\"]))\n stats[\"T\"][\"words\"].append(float(row[\"gt_therapist_num_words\"]))\n stats[\"P\"][\"duration\"].append(float(row[\"gt_patient_time_spoken\"]))\n stats[\"T\"][\"duration\"].append(\n float(row[\"gt_therapist_time_spoken\"])\n )\n stats[\"sess\"][\"duration\"].append(float(row[\"sess_dur\"]))\n n_words = (\n row[\"gt_therapist_num_words\"] + row[\"gt_patient_num_words\"]\n )\n stats[\"sess\"][\"words\"].append(n_words)\n\n for speaker in stats:\n for metric in stats[speaker]:\n print(f\"------ {speaker} | {metric} ------\")\n print_stats(stats[speaker][metric])", "def count_words(filename):\n try:\n with open(filename) as f_obj:\n contents = f_obj.read()\n except FileNotFoundError:\n msg = \"sorry, \" + filename + \" does not exist\"\n print(msg)\n else:\n words = contents.split()\n num_words = len(words)\n print(\"The words'number is \" + str(num_words))", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def word_count(excerpt):\n # Validate that we are actually give something to work with\n assert excerpt, \"excerpt cannot be blank\"\n return Counter(excerpt.split())", "def letters(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_letters = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_letters = number_of_letters + len([letter for letter in i.text if letter.isalnum()])\n return number_of_letters", "def count_data_points(conversation, parse_text, i, **kwargs):\n data = conversation.temp_dataset.contents['X']\n num_elements = len(data)\n\n parse_op = gen_parse_op_text(conversation)\n\n if len(parse_op) > 0:\n description_text = f\" where <b>{parse_op}</b>\"\n else:\n description_text = \"\"\n\n message = f\"There are <b>{num_elements} items</b> in the data{description_text}.\"\n\n message += \"<br><br>\"\n message += \"Let me know if you want to see their ids.\"\n ids = list(data.index)\n rest_of_text = str(ids)\n conversation.store_followup_desc(rest_of_text)\n return message, 1", "def run_and_get_word_count(self) -> int:\n r = requests.get(self.url)\n if r.status_code != status.HTTP_200_OK:\n raise ScraperException\n soup = BeautifulSoup(r.content, \"html.parser\")\n matches = soup(text=re.compile(f\"{self.word}\"))\n count = 0\n for match in matches:\n words = re.findall(fr\"\\b{self.word}\\b\", match)\n count = count + len(words)\n return count", "def num_articles(self):\n\t\treturn len(index)", "def count_exemplar_words(self):\n valid_exemplars = [_ for _ in self.exemplars if _.validate()]\n\n total_words = 0\n for eg in valid_exemplars:\n eg.n_words = eg.count_words()\n total_words += eg.n_words\n return valid_exemplars, total_words", "def get_num_of_pages(self):", "def num_divs(self):\n return len(self.q(css='div.test').results)", "def count_words_and_dublicates(novel):", "def embedcount(line):\r\n\r\n x_temp = line.count(BOX_CHAR['lu'])\r\n return self.defaults.get('size')-(4*x_temp)", "def word_count(text, word):\n \n #answer\n word_list = text.split(\" \")\n return (word_list.count(word))\n \n #return (text.count(word)) - deoesn't work", "def on_text(self, event):\n self.get_counts()\n self.save()", "def children_num(self,p):\n counter = 0\n for child in self.children(p):\n counter += 1\n return counter", "def __capitals(self, title, text):\n text_words = nltk.word_tokenize(text)\n text_count = 0\n title_count = 0\n for word in text_words:\n if word.isupper():\n text_count += 1\n for word in title.split():\n if word.isupper():\n title_count += 1\n return title_count, text_count", "def get_number_of_words(self):\n filename = f'{self.path}/{self.filename}'\n # word_counter = {}\n # w_cnt = 0\n # x = 0\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n # for word in word_list:\n # w_cnt += 1\n # if word not in word_counter:\n # word_counter[word] = 1\n # else:\n # word_counter[word] = word_counter[word] + 1\n\n # for word in word_list:\n # x += 1\n # print(word, word.isalpha(), x)\n\n w_cnt = sum([a[0].isalpha() for a in word_list])\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_words', w_cnt)\n print(datetime.now(), '-', 'number_of_words for', self.filename, 'calculated =', w_cnt)\n return None", "def npulses(self):\n return self.header.pulse_count", "def __sent_len(self, title, text):\n total = 0\n text_sent = nltk.sent_tokenize(text)\n for sent in text_sent:\n total += len(nltk.word_tokenize(sent))\n return (len(nltk.word_tokenize(title)), total / len(text_sent))", "def count() -> int:\n pass", "def countWords(file_name, start, end):\r\n\r\n with open(file_name, \"r\") as file:\r\n counter_words = 0\r\n\r\n for line in islice(file, start, end):\r\n res = len(line.split())\r\n counter_words += res\r\n\r\n return counter_words", "def get_words(self, article: BeautifulSoup):\n return len(re.findall(r'\\w+', self.get_article_text(article)))", "def count(self, item):\n return _(self._.count(item))", "def test_count_publications(self):\n pass", "def document_count(self):\n raise NotImplementedError", "def sent_count(self):\n count = []\n for i in tqdm(self.text):\n count.append(len(sent_tokenize(i)))\n return count", "def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)", "def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)", "def total_phrases(target_text):\n\n nbphrase = 0\n separators = '.!?;'\n for char in target_text:\n if char in separators:\n nbphrase = nbphrase + 1\n return nbphrase", "def count_word(doc):\n count = count = 0\n for w in document.split(\" \"):\n count = count + 1\n return count", "def b_count_test(self):\n \t \n\tsel = self.selenium\n test = \"Test B - Count Articles, Titles, Headings, Etc.\"\n print test\n \n headers = sel.get_css_count(\"css=\" + CSS[1])\n images = sel.get_css_count(\"css=\" + CSS[2])\n authors = sel.get_css_count(\"css=\" + CSS[3])\n\tdots = sel.get_css_count(\"css=\" + CSS[7]) + sel.get_css_count(\"css=\" + CSS[6])\t\n \n if ((images < 8) or (dots < 8) or (authors < 8) or (headers < 8)):\n print \"Missing articles!\"\n L.log(BROWSERS[x], test, \"FAIL, MISSING CONTENT\", \"Images: \" + str(images) + \" Dots: \" + str(dots) + \" Authors: \" + str(authors) + \" Headers: \" + str(headers)) \n \n\telse:\n\t L.log(BROWSERS[x], test, \"PASS, OK\", \"None\")\n\t \n\t######################################################################## ", "def get_related_list_count(self, heading):\n locator = lex_locators[\"record\"][\"related\"][\"count\"].format(heading)\n count = self.selenium.get_webelement(locator).text\n count = count.replace(\"(\", \"\").replace(\")\", \"\")\n return int(count)", "def get_num_of_images(self):", "def count(self):\n return len(self.names)", "def sample_count(self):", "def amount_nouns_and_numerals_stanford_nlp(self) -> int:\n stanza.download(self.lang, processors = 'tokenize,mwt,pos')\n nlp = stanza.Pipeline(self.lang, processors = 'tokenize,mwt,pos')\n doc = nlp(self.sent)\n for sentence in doc.sentences:\n for word in sentence.words:\n #if the part of speech is a noun, a proper noun or a numeral \n #(only for en) \n if self.lang == 'en':\n if word.upos == 'NOUN' or word.upos == 'PROPN' or word.upos == 'NUM':\n self.amount_nouns_and_num += 1\n elif self.lang == 'de' or self.lang == 'fr':\n if word.upos == 'NOUN' or word.upos == 'PROPN':\n self.amount_nouns_and_num += 1\n return self.amount_nouns_and_num", "def count_words(self, clean_func=clean_up):\n return (\n len(clean_func(self.transcript_file.text()).split())\n if self.validate()\n else 0\n )", "def _text_length(self, text):\n\n if isinstance(text, dict): # {key: value} case\n return len(next(iter(text.values())))\n elif not hasattr(text, '__len__'): # Object has no len() method\n return 1\n elif len(text) == 0 or isinstance(text[0], int): # Empty string or list of ints\n return len(text)\n else:\n return sum([len(t) for t in text]) # Sum of length of individual strings", "def countWords(f, ext):\n\n word_count = 0\n if ext == \".xml\":\n # Parse with lxml\n tree = lxml.etree.parse(f)\n root = tree.getroot()\n # Get the text of all tags.\n if root is not None:\n text = root.xpath(\"//text()\")\n else:\n return word_count\n # Join the text together.\n text = \" \".join(text)\n # Split the text into words.\n words = text.split(\" \")\n # Remove empty strings.\n words = [w for w in words if w != \"\"]\n # Remove words that are just numbers.\n words = [w for w in words if not w.isnumeric()]\n # Remove one-letter words\n words = [w for w in words if len(w) > 1]\n # Count the words\n word_count = len(words)\n # Subtract off the number of child tags from the root.\n word_count = word_count - len(root.getchildren()) - 1\n\n elif ext == \".html\":\n # Parse the file with BeautifulSoup.\n soup = BS(f, \"html.parser\")\n # Get the text of all tags.\n text = soup.get_text()\n # Split the text into words.\n words = text.split(\" \")\n # Remove empty strings.\n words = [w for w in words if w != \"\"]\n # Remove words that are just numbers.\n words = [w for w in words if not w.isnumeric()]\n # Remove one-letter words\n words = [w for w in words if len(w) > 1]\n # Count the words\n word_count = len(words)\n\n elif ext == \".md\" or ext == \".srt\":\n for line in f:\n # Skip blank lines.\n if len(line) == 0 or line == \"\\n\" or line == \"\\r\":\n continue\n # Check for SRT time lines and skip them.\n if re.search(\"\\d\\d --> \\d\\d:\", line):\n continue\n # Skip lines that are just a single number.\n if re.search(\"^\\d+$\", line):\n continue\n # Check for lines that are just times and skip them.\n if re.search(\"^\\d\\d:\\d\\d$\", line):\n continue\n if re.search(\"^\\d\\d:\\d\\d:\\d\\d$\", line):\n continue\n\n raw_words = line.split(\" \")\n reduced_words = []\n for w in raw_words:\n # Don't include 1-character \"words\"\n if len(w) > 1:\n reduced_words.append(w)\n\n # Store filename and count\n word_count += len(reduced_words)\n\n # Sometimes the word count is negative. I don't know why.\n if word_count < 0:\n word_count = 0\n return word_count", "def count_occurrences(article_json, selected_word):\n selected_word = selected_word.lower()\n total_titles = 0 # some rows miss the title field, so not using len()\n selected_word_counter = 0\n for row in article_json:\n if 'title' in row:\n title = row['title']\n total_titles += 1\n for word_in_title in title.lower().split():\n if word_in_title == selected_word:\n selected_word_counter += 1\n return total_titles, selected_word_counter", "def _counter(title_list):\n t = Tokenizer()\n words_count = defaultdict(int)\n words = []\n for title in title_list:\n tokens = t.tokenize(title)\n for token in tokens:\n pos = token.part_of_speech.split(',')[0]\n if pos == '名詞':\n words_count[token.base_form] += 1\n words.append(token.base_form)\n return words_count, words", "def count_words(filename):\n try:\n with open(filename) as f_obj:\n contents = f_obj.read()\n except FileNotFoundError:\n # msg = \"Sorry, the file \" + filename + \" does not exist.\"\n # print(msg)\n pass\n else: \n words = contents.split()\n num_words = len(words)\n print(\"The file \" + filename + \" has about \" + str(num_words) + \" words.\")" ]
[ "0.75697744", "0.699523", "0.6631901", "0.65998024", "0.65277624", "0.65032226", "0.63644123", "0.63634205", "0.6349974", "0.6309825", "0.6171061", "0.6156245", "0.6148901", "0.60785764", "0.60774815", "0.6072669", "0.603562", "0.59676707", "0.59530956", "0.59443533", "0.5926551", "0.59191763", "0.5901001", "0.5897695", "0.5897695", "0.5897695", "0.5897695", "0.58900493", "0.58835316", "0.5869407", "0.5859996", "0.5853128", "0.58055353", "0.5774251", "0.5757163", "0.5752262", "0.57411385", "0.57390624", "0.5738895", "0.57297707", "0.5729508", "0.5727655", "0.5727655", "0.5707323", "0.56938654", "0.56796443", "0.56605667", "0.564855", "0.5640703", "0.56353414", "0.56281257", "0.561886", "0.55845064", "0.5578795", "0.5578319", "0.5553415", "0.5546276", "0.55393434", "0.5537116", "0.55253625", "0.5525328", "0.5519031", "0.5496333", "0.54958844", "0.54911214", "0.5480267", "0.5466869", "0.5466241", "0.54654187", "0.5459029", "0.5456582", "0.54559624", "0.54547644", "0.54506266", "0.5448946", "0.54477507", "0.5446901", "0.5446835", "0.544084", "0.54405236", "0.54342574", "0.5434116", "0.5426969", "0.541459", "0.54023635", "0.54023635", "0.5401984", "0.5394674", "0.5394373", "0.5392787", "0.5386115", "0.5373402", "0.53683203", "0.5364351", "0.53566676", "0.5353533", "0.53531283", "0.53514326", "0.53471255", "0.5347099" ]
0.7645021
0
Analyses a presentation and returns a dictionary of starratings, plus extra details. Works best on presentations that use the LIFTS template, with known layout names.
def analyse_presentation(pres_name:str, verbose=False) -> Dict[str, Any]: prs = Presentation(pres_name) if verbose: debug_dump(prs) (layouts_interactive, layouts) = count_layouts(prs) interaction_stars = min(layouts_interactive, 5) topic_stars = ([1,1,3,5,5,4,3,2,1]+[1]*100)[layouts["Section Header"]] pres_properties = get_presentation_properties(prs) word_count = get_word_counts(prs.slides) words_per_slide = sum(word_count) / len(word_count) # ideal words/slide is 30-40 (5 stars) text_stars = calculate_text_stars(word_count) # print("word counts:", word_count) # Create a list of warnings about very text-heavy slides heavy_warnings = [] for slide, words in enumerate(word_count): if words > MAX_WORDS_PER_SLIDE: heavy_warnings.append(f"WARNING: slide {slide} has {words} words!") slides = get_slide_analytics(prs.slides) print(slides) result = { "presentation_rating_stars_interaction": interaction_stars, "presentation_rating_stars_section": topic_stars, "presentation_rating_stars_accessibility": 3, # not implemented yet! "presentation_rating_stars_text": text_stars, "presentation_count_slide": len(prs.slides), "presentation_count_layout": layouts, # dictionary that maps layout name to count "presentation_total_words": words_per_slide, # a float "presentation_warning_text_heavy": heavy_warnings, # a list of warning strings "presentation_data_slides": slides, # a list of slides and analytics "filename": pres_name, # TODO: strip any Path and just return file name? "name": "ICT999", "description": "Introduction to ICT" } return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analyze_pptx(template_file):\n prs = Presentation(template_file)\n # Each powerpoint file has multiple layouts\n # Loop through them all and see where the various elements are\n slide_masters = prs.slide_masters\n for index, slide_master in enumerate(prs.slide_masters):\n print('------------------------------------')\n print('------------------------------------')\n print(f\"slide master indexed: {index}\")\n print(slide_master)\n print(\"text boxes:\")\n for shape in slide_master.shapes:\n try:\n dummystring = f\"shape name: {shape.name} - shape text: {shape.text}\"\n shape.text = shape.name\n print(dummystring)\n except:\n pass\n # shape.text = 'hahahaha'\n # for shape in slide_master.slideshapes:\n # print(shape)\n print('------------------------------------')\n for index, slide_layout in enumerate(slide_master.slide_layouts):\n print(f\"\\tslide layout: {slide_layout.name}\")\n slide = prs.slides.add_slide(slide_master.slide_layouts[index])\n # Not every slide has to have a title\n try:\n title = slide.shapes.title\n title.text = 'Title for Layout {}'.format(index)\n except AttributeError:\n print(\"No Title for Layout {}\".format(index))\n # Go through all the placeholders and identify them by index and type\n for shape in slide.placeholders:\n if shape.is_placeholder:\n phf = shape.placeholder_format\n # Do not overwrite the title which is just a special placeholder\n try:\n if 'Title' not in shape.text:\n shape.text = 'Placeholder index:{} type:{}'.format(phf.idx, shape.name)\n except AttributeError:\n print(\"{} has no text attribute\".format(phf.type))\n print(f\"\\t\\tid: {phf.idx} - name: {shape.name}\")\n # output_file = '..\\\\resources\\pptx\\\\template_names.pptx'\n # prs.save(output_file)", "def extract_information(preprocessed_sentences):\n parsed = list(map(lambda sentence: nlp(sentence), preprocessed_sentences))\n\n quantities = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'QUANTITY'), parsed))\n dates = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'DATE'), parsed))\n\n hurricane_name = eh.extract_frequent_regex_match(parsed, '[Hh]urricane ([A-Z][a-z]+)').most_common(1)[0][0]\n hurricane_category = eh.extract_frequent_regex_match(parsed, '[Cc]ategory ([0-9]+)').most_common(1)[0][0]\n\n tropical_storm_name = eh.extract_frequent_regex_match(parsed, '[Tt]ropical [Ss]torm ([A-Z][a-z]+)').most_common(1)[0][0]\n formation_date, middle_month = extract_storm_timeline(dates, hurricane_name)\n\n preperation_info = extract_preparation_information(parsed)\n prep_gpes = preperation_info[0].most_common(3)\n\n restore_info = extract_restoration_information(parsed)\n\n landfall_info = extract_landfall_information(parsed)\n\n wind_info = extract_wind_information(quantities)\n rain_info = extract_rain_information(quantities)\n size_info = extract_size_information(parsed)\n\n # formation_info = extract_formation_info(parsed)\n death_info = extract_death_damages_info(parsed)\n\n print(constants.HURRICANE_SENTENCE.format(hurricane_name, middle_month, hurricane_category))\n print(constants.LANDFALL_SENTENCE.format(hurricane_name, landfall_info[2], landfall_info[3], landfall_info[0], landfall_info[1]))\n print(constants.WIND_SENTENCE.format(wind_info[0], wind_info[1], wind_info[2]))\n print(constants.RAIN_SENTENCE.format(hurricane_name, rain_info[1], rain_info[0], rain_info[2]))\n print(constants.FORMATION_SENTENCE.format(formation_date, tropical_storm_name))\n print(constants.PREPARATION_SENTENCE.format(prep_gpes[0][0], prep_gpes[1][0], prep_gpes[2][0], preperation_info[1].\n most_common(1)[0][0]))\n print(constants.SIZE_SENTENCE.format(size_info[0], size_info[1]))", "def prepare_metadata(self, presentation):\r\n return {\"title\": presentation.title,\r\n \"artist\": presentation.speaker,\r\n \"performer\": presentation.speaker,\r\n \"album\": presentation.event,\r\n \"location\": presentation.room,\r\n \"date\": str(datetime.date.today()),\r\n \"comment\": presentation.description}", "def create_stim_tables(\n exptpath,\n stimulus_names = ['drifting_gratings_grid', 'natural_movie_full', \n 'natural_movie_one','checkerboard', 'dot'],\n verbose = True):\n data = load_stim(exptpath)\n twop_frames = load_alignment(exptpath)\n\n stim_table_funcs = {\n 'drifting_gratings_grid': DGgrid_table,\n 'natural_movie_full': NMfull_table,\n 'natural_movie_one': NMone_table,\n 'checkerboard': Checkerboard_table,\n 'dot': Dot_table\n }\n stim_table = {}\n for stim_name in stimulus_names:\n try:\n stim_table[stim_name] = stim_table_funcs[stim_name](\n data, twop_frames\n )\n except KeyError:\n if verbose:\n print(\n 'Could not locate stimulus type {} in {}'.format(\n stim_name, exptpath\n )\n )\n continue\n\n return stim_table", "def count_layouts(prs:Presentation) -> Tuple[int, Dict[str, int]]:\n layouts = collections.defaultdict(int)\n layouts_interactive = 0\n for slide in prs.slides:\n layouts[slide.slide_layout.name] += 1\n if slide.slide_layout.name in INTERACTIVE:\n layouts_interactive += 1\n return (layouts_interactive, layouts)", "def debug_dump(prs:Presentation):\n print(\"Presentation has\", len(prs.slides), \"slides\")\n\n # Print summary of all slides, plus text\n n = 0\n for slide in prs.slides:\n n += 1\n print(\"========== slide {} ========== [{}]\".format(n, slide.slide_layout.name))\n for shape in slide.shapes:\n if not shape.has_text_frame:\n continue\n print(shape.name)\n for paragraph in shape.text_frame.paragraphs:\n for run in paragraph.runs:\n print(\" \" + run.text)", "def featurize_sift(list_of_demonstrations, kinematics, sr):\n\n\tdata_X_xy = {}\n\tdata_X_x = {}\n\n\tfor demonstration in list_of_demonstrations:\n\t\tprint \"SIFT for \", demonstration\n\t\tPATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + \"_\" + str(constants.CAMERA) + \".p\"\n\n\t\tZ = []\n\t\tstart, end = parser.get_start_end_annotations(PATH_TO_ANNOTATION)\n\t\tfor frm in range(start, end + 1):\n\t\t\tPATH_TO_IMAGE = constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + \"_\" + constants.CAMERA + \"/\"\n\t\t\tZ.append(sift.run_sift_frame(utils.get_full_image_path(PATH_TO_IMAGE, frm)))\n\n\t\tZ = np.array(Z)\n\t\tZ = Z.reshape(Z.shape[0],1)\n\n\t\tW = kinematics[demonstration]\n\t\tW_onlyx = utils.only_X(W)\n\n\t\tX = np.concatenate((W, Z), axis = 1)\n\t\tX_onlyx = np.concatenate((W_onlyx, Z), axis = 1)\n\n\t\tdata_X_xy[demonstration] = X\n\t\tdata_X_x[demonstration] = X_onlyx\n\n\tpickle.dump(data_X_xy, open(PATH_TO_FEATURES + \"SIFT_xy.p\", \"wb\"))\n\tpickle.dump(data_X_x, open(PATH_TO_FEATURES + \"SIFT_x.p\", \"wb\"))", "def process_paper(filename):\n\n # Start time\n start_time = time.time()\n\n # Read in the paper\n paper = useful_functions.read_in_paper(filename, sentences_as_lists=True)\n\n # Extract the gold summary\n gold = paper[\"HIGHLIGHTS\"]\n gold_string_list = [\" \".join(x) for x in gold]\n\n # Extract the title\n title = paper[\"MAIN-TITLE\"][0]\n title_string = \" \".join(title)\n\n # Extract the abstract\n abstract = paper[\"ABSTRACT\"]\n abstract_string_list = [\" \".join(x) for x in abstract]\n\n # Extract the keyphrases\n try:\n keyphrases = paper[\"KEYPHRASES\"][0]\n except IndexError:\n keyphrases = []\n\n # Turn the paper into a single string and calculate the bag of words score\n paper_string = \" \".join([\" \".join(x) for key, val in paper.iteritems() for x in val])\n bag_of_words = useful_functions.calculate_bag_of_words(paper_string)\n\n # Get the paper as a list of sentences, associating each sentence with its section name - will be used by oracle\n # to find best summary sentences.\n paper_sentences = [(\" \".join(x), key) for key, val in paper.iteritems() for x in val\n if key != \"ABSTRACT\"]\n\n # Create a list of sentences, their ROUGE-L scores with the Highlights and the section they occur in\n # (as a string)\n sents_scores_secs = []\n\n for sentence, section in paper_sentences:\n # For some reason the candidate sentence needs to be the only item in a list\n r_score = rouge.calc_score([sentence], gold_string_list)\n\n sents_scores_secs.append((sentence.split(\" \"), r_score, section))\n\n # Sort the sentences, scores and sections into descending order\n sents_scores_secs = sorted(sents_scores_secs, key=itemgetter(1), reverse=True)\n\n pos_sents_scores_secs = sents_scores_secs[:num_summary]\n neg_sents_scores_secs = sents_scores_secs[num_summary:]\n\n if len(neg_sents_scores_secs) < len(pos_sents_scores_secs):\n print(\"{}**** NOT A SUFFICIENT AMOUNT OF DATA IN PAPER {}, IGNORING PAPER ****{}\".format(\n Color.RED, filename, Color.END))\n return\n\n # Positive sentences\n positive_sents_secs_class = [(sent, sec, 1) for sent, _, sec in pos_sents_scores_secs]\n\n # Negative sentences\n\n # Take the sentences not used as positive and reverse it to have worst scores first then take an equal number\n neg_sents_scores_secs = [x for x in reversed(neg_sents_scores_secs)][:len(positive_sents_secs_class)]\n negative_sents_secs_class = [(sent, sec, 0) for sent, _, sec in neg_sents_scores_secs]\n\n # Don't create data from this paper if it's less than 40 sentences - i.e. there would be more positive than\n # negative data. The data needs to be balanced.\n #if len(positive_sents_secs_class) != len(negative_sents_secs_class):\n # print(\"{}**** NOT A SUFFICIENT AMOUNT OF DATA IN PAPER {}, IGNORING PAPER ****{}\".format(\n # Color.RED, filename, Color.END))\n # return\n\n # Concatenate the positive and negative sentences into a single data item and shuffle it\n data = positive_sents_secs_class + negative_sents_secs_class\n random.shuffle(data)\n\n # Average word vectors of each sentence and convert to list for JSON serialisation\n sentvecs_secs_class = [(useful_functions.sentence2vec(sent).tolist(), sec, y) for sent, sec, y in data]\n\n # Calculate features for each sentence\n features = [useful_functions.calculate_features(sent, bag_of_words, document_wordcount, keyphrases,\n abstract_string_list, title_string, sec)\n for sent, sec, y in data]\n\n # Calculate abstract vector\n abs_vector = useful_functions.abstract2vector(abstract_string_list).tolist()\n\n # Description of the data\n description_text = \"All text is of the form of a list of lists, where each sentence is a list of words. The\" \\\n \" sentences are of the form [(sentence (as a list of words), section in paper,\" \\\n \" classification)]. The sentence vectors are of a similar form, except the sentence text is\" \\\n \" replaced with the vector representation of the sentence. The features are of the form \" \\\n \"[(AbstractROUGE, TF-IDF, Document_TF-IDF, keyphrase_score, title_score, numeric_score,\" \\\n \" sentence_length, section)]. The dimensions of each sentence vector are [1x100]. The \" \\\n \"abstract vector is a single [1x100] vector also.\"\n\n # The data item that will be written for this paper\n data_item = {\n \"filename\": filename,\n \"gold\": gold,\n \"title\": paper[\"MAIN-TITLE\"],\n \"abstract\": abstract,\n \"abstract_vec\": abs_vector,\n \"sentences\": data,\n \"sentence_vecs\": sentvecs_secs_class,\n \"sentence_features\": features,\n \"description\": description_text\n }\n\n # Write the data out\n with open(TRAINING_DATA_WRITE_LOC + filename.strip(\".txt\") + \".json\", \"wb\") as f:\n json.dump(data_item, f)\n\n print(\"--> Finished processing {}, took {} seconds, data length: {}.\".format(\n filename, (time.time() - start_time), len(data)))", "def parse(self, is_president):\n mp = {}\n self.content = self.page.find(id=\"content\")\n\n salutation = self.content.find(id=\"inhalt\").text.strip()\n if is_president:\n salutation = salutation[: salutation.rfind(\" - \")]\n mp[\"is_president\"] = is_president\n mp[\"salutation\"] = salutation\n mp[\"in_committees\"] = self.page.find(\"a\", href=\"#tab-Ausschuesse\") is not None\n mp.update(self._parse_picture())\n mp.update(self._parse_contact_information())\n mp.update(self._parse_biography())\n return mp", "def get_plotting_data(each_misfit_windows_collection, iterations_list, snr_threshold, event_depth_dict):\n result = {}\n phases_zr = [\"P\", \"pP\", \"sP\", \"PP\", \"S\", \"sS\", \"SS\"]\n phases_t = [\"ScS\", \"S\", \"sS\", \"SS\"]\n conditions = {\n \"P\": {\n \"exclude_p\": False,\n \"exclude_s\": True\n },\n \"pP\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"sP\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"PP\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"S\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n \"sS\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"SS\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"ScS\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"surface_z\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n \"surface_r\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n \"surface_t\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n }\n # we can exrtact the information from the misfit_windows in the order of the pdf output.\n # order will be z,r,t[,surface_z,surface_r,surface_t]\n rep_net_sta = sorted(event_depth_dict.keys())[0]\n event_depth_this_event = event_depth_dict[rep_net_sta]\n if (event_depth_this_event > SURFACE_THRESHOLD):\n category_list = [\"z\", \"r\", \"t\"]\n category_phases = [phases_zr, phases_zr, phases_t]\n else:\n category_list = [\"z\", \"r\", \"t\", \"surface_z\", \"surface_r\", \"surface_t\"]\n category_phases = [phases_zr, phases_zr, phases_t,\n [\"surface_z\"], [\"surface_r\"], [\"surface_t\"]]\n for each_iteration in iterations_list:\n result[each_iteration] = {}\n for each_category, each_category_phases in zip(category_list, category_phases):\n result[each_iteration][each_category] = []\n for each_category_phase in each_category_phases:\n phase_condition = conditions[each_category_phase]\n cc = get_windows_cc(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase)\n cc = cc[cc >= 0]\n deltat = get_windows_deltat(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase)\n deltat = deltat[np.abs(deltat) <= 10]\n similarity = get_windows_similarity(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase)\n similarity = similarity[similarity >= 0]\n result[each_iteration][each_category].append(\n {\"net_sta\": get_windows_net_sta(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase),\n \"cc\": cc,\n \"deltat\": deltat,\n \"similarity\": similarity,\n }\n )\n # result:dict->each_iteration:dict->each_category:list as the dict showed before, we should return the category_phases\n # we should combine the surface wave phases to one page\n if (len(category_phases) == 6):\n for each_iteration in iterations_list:\n category_phases = [phases_zr, phases_zr, phases_t,\n [\"surface_z\", \"surface_r\", \"surface_t\"]]\n category_list = [\"z\", \"r\", \"t\", \"surface\"]\n result[each_iteration][\"surface\"] = []\n result[each_iteration][\"surface\"].append(\n result[each_iteration][\"surface_z\"][0])\n result[each_iteration][\"surface\"].append(\n result[each_iteration][\"surface_r\"][0])\n result[each_iteration][\"surface\"].append(\n result[each_iteration][\"surface_t\"][0])\n del result[each_iteration][\"surface_z\"]\n del result[each_iteration][\"surface_r\"]\n del result[each_iteration][\"surface_t\"]\n\n return result, category_phases, category_list", "def get_illustrations(self):\n \n temp=[[\"middle\",[],0,0],[\"middle\",[],0,0],[\"center\",[],0,0]]\n for pic in self.illustrations.all():\n \n if pic.position==\"L\":\n temp[0][1].append(pic)\n temp[0][2]=max(temp[0][2],pic.width)\n temp[0][3]+=pic.height\n elif pic.position==\"R\":\n temp[1][1].append(pic)\n temp[1][2]=max(temp[1][2],pic.width)\n temp[1][3]+=pic.height\n else:\n temp[2][1].append(pic)\n temp[2][2]+=pic.width\n temp[2][3]=max(temp[2][3],pic.height)\n temp[0][3]=max(temp[0][3],temp[1][3])\n temp[1][3]=temp[0][3]\n if len(temp[2][1])>0:\n pos = temp[2][1][0].position\n if pos == \"BR\":\n temp[2][0] = \"right\"\n elif pos == \"BL\":\n temp[2][0] = \"left\"\n for i in range(2):\n if len(temp[i][1])>0:\n pos = temp[i][1][0].position\n if pos in [\"RT\",\"LT\"]:\n temp[i][0] = \"top\"\n elif pos in [\"RB\",\"LB\"]:\n temp[i][0] = \"bottom\"\n self.text_size=(-temp[0][2]-temp[1][2],-temp[2][3])\n print(temp)\n return temp", "def parse_layout(input_filename):\n with open(input_filename, 'r') as layout_file:\n height = int(layout_file.next().strip()) # = height\n layout_file.next() # = width\n n_info = int(layout_file.next().strip()) # = number of associated info pieces\n layout = \"\"\n for i in xrange(height):\n layout += layout_file.next()\n patt_info = \\\n re.compile(r\"^\\s*(?P<row>\\d+)\\s+(?P<col>\\d+)\\s+(?P<word>[\\w']+).*\")\n info_dict = {}\n for i in xrange(n_info):\n match_info = patt_info.match(layout_file.next())\n loc = ( int(match_info.group('row')), int(match_info.group('col')) )\n info_dict[loc] = match_info.group('word')\n return (layout, info_dict)", "def get_presentations(self):\r\n raise NotImplementedError", "def info_about_petrol_kinds(petrol_stations):\n info_about_petrol_kinds = {}\n info_about_petrol_kinds['total amount of petrol'] = 0\n\n for number_of_petrol in petrol_stations:\n for petrol_name in petrol_stations[number_of_petrol]['kinds']:\n if petrol_name not in info_about_petrol_kinds:\n info = {}\n if petrol_name == 'АИ-80':\n price = 38.95\n elif petrol_name == 'АИ-92':\n price = 43.01\n elif petrol_name == 'АИ-95':\n price = 45.69\n elif petrol_name == 'АИ-98':\n price = 49.2\n info['price'] = price\n info['stations'] = [number_of_petrol]\n info['amount of petrol'] = 0\n info_about_petrol_kinds[petrol_name] = info\n else:\n info = info_about_petrol_kinds[petrol_name]\n info['stations'] = info['stations'] + [number_of_petrol]\n return info_about_petrol_kinds", "def get_stim_onset_times(sessions, metadata_dict):\n if not isinstance(sessions, list):\n sessions = list(sessions)\n\n for line in sessions:\n session_id = line['Sess.ID']\n if session_id: # we loaded a line with session info\n session_name = '{}_{}'.format(line['Experiment'], line['Sess.ID'])\n\n # Check if session is already in database\n if database is not None and session_name in database.index:\n continue\n session_stimuli = {}\n session_stimuli['session_id'] = session_id\n session_stimuli['stimuli'] = {}\n session_stimuli['stimuli']['visual'] = []\n session_stimuli['stimuli']['audio'] = []\n session_stimuli['stimuli']['digital'] = []\n videopaths = []\n # load data from .tdms and .avi fils\n for recording in line['Recordings']:\n path = os.path.join(line['Base fld'], line['Exp fld'], recording)\n for f in os.listdir(path):\n if '.avi' in f:\n videopaths.append(os.path.join(path, f))\n print(videopaths)\n elif '.tdms' == f[-5:]:\n tdmspath = os.path.join(path, f)\n # Loop over each .tdms file and extract stimuli frames\n print(colored('Loading {}: {}'.format(session_name,os.path.basename(tdmspath)),'yellow'))\n tdms = TdmsFile(tdmspath)\n if metadata_dict[session_name]['software'] == 'behaviour':\n visual_rec_stims, audio_rec_stims, digital_rec_stims = [], [], []\n for group in tdms.groups():\n for obj in tdms.group_channels(group):\n if 'stimulis' in str(obj).lower():\n for idx in obj.as_dataframe().loc[0].index:\n if \"/' \" in idx:\n framen = int(idx.split(\"/' \")[1].split('-')[0])\n elif \"/' \" in idx:\n framen = int(idx.split(\"/' \")[1].split('-')[0])\n else:\n framen = int(idx.split(\"/'\")[2].split('-')[0])\n if 'visual' in str(obj).lower():\n visual_rec_stims.append(framen)\n elif 'audio' in str(obj).lower():\n audio_rec_stims.append(framen)\n elif 'digital' in str(obj).lower():\n digital_rec_stims.append(framen)\n else:\n print(colored('Couldnt load stim correctly','yellow'))\n # Now use the AI channels to find the *real* stimulus onset times and replace them\n if audio_rec_stims:\n stimulus_on_idx = np.where(tdms.group_channels('AI')[3].data > .55)[0] #in first data sets this is AI 1, later AI 2\n idx_since_last_stimulus_on = np.diff(stimulus_on_idx)\n if stimulus_on_idx.size:\n stimulus_start_idx = stimulus_on_idx[np.append(np.ones(1).astype(bool),idx_since_last_stimulus_on>2*10000)] #usually 10 or 30\n stimulus_start_frame = np.ceil(stimulus_start_idx / 10000 / (33 + 1 / 3) * 1000).astype(int)\n stimulus_start_frame = stimulus_start_frame[stimulus_start_frame > 300]\n else:\n stimulus_start_frame = np.array(audio_rec_stims)\n print('NO STIMULI FOUND!!')\n\n if len(stimulus_start_frame) != len(audio_rec_stims):\n print('audio AI channel does not match number of timestamps by ' + str(len(audio_rec_stims)-len(stimulus_start_frame)) )\n else:\n discrepancy = stimulus_start_frame - audio_rec_stims\n if sum(discrepancy>8):\n print('audio AI channel does not match values of timestamps')\n else:\n print(discrepancy)\n # for conditioning experiment, just use what the tdms says\n # if 'food' in line['Experiment']:\n # stimulus_start_frame = np.array(audio_rec_stims)\n audio_rec_stims = list(stimulus_start_frame)\n\n session_stimuli['stimuli']['visual'].append(visual_rec_stims)\n session_stimuli['stimuli']['audio'].append(audio_rec_stims)\n session_stimuli['stimuli']['digital'].append(digital_rec_stims)\n\n else:\n \"\"\" HERE IS WHERE THE CODE TO GET THE STIM TIMES IN MANTIS WILL HAVE TO BE ADDEDD \"\"\"\n pass\n\n # Add to dictionary (or update entry)\n stimulus_dict[session_name] = session_stimuli\n return stimulus_dict", "def featurize_1(list_of_demonstrations, kinematics, sr):\n\tprint \"FEATURIZATION 1\"\n\n\tdata_X_1 = {}\n\tdata_X_2 = {}\n\tfor demonstration in list_of_demonstrations:\n\t\tprint \"SIFT for \", demonstration\n\t\tstart, end = parser.get_start_end_annotations(constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER\n\t\t\t\t\t\t+ demonstration + \"_\" + constants.CAMERA +\".p\")\n\n\t\tW = kinematics[demonstration]\n\t\tW_sampled = utils.sample_matrix(W, sampling_rate = sr)\n\n\n\t\tPATH_TO_SIFT = constants.PATH_TO_DATA + \"sift_FCED/SIFT_\"+ demonstration\n\t\tZ = pickle.load(open(PATH_TO_SIFT + \"_1.p\", \"rb\"))\n\t\tZ = Z[start:end + 1]\n\t\tZ_sampled_1 = utils.sample_matrix(Z, sampling_rate = sr)\n\n\t\tZ = pickle.load(open(PATH_TO_SIFT + \"_2.p\", \"rb\"))\n\t\tZ = Z[start:end + 1]\n\t\tZ_sampled_2 = utils.sample_matrix(Z, sampling_rate = sr)\n\n\t\tassert Z_sampled_1.shape[0] == W_sampled.shape[0]\n\t\tassert Z_sampled_2.shape[0] == W_sampled.shape[0]\n\n\t\tdata_X_1[demonstration] = np.concatenate((W_sampled, Z_sampled_1), axis = 1)\n\t\tdata_X_2[demonstration] = np.concatenate((W_sampled, Z_sampled_2), axis = 1)\n\n\tpickle.dump(data_X_1, open(PATH_TO_FEATURES + \"SIFT_1.p\", \"wb\"))\n\tpickle.dump(data_X_2, open(PATH_TO_FEATURES + \"SIFT_2.p\", \"wb\"))", "def read_documents(file_path: str) -> List[Tuple[str, List[Tuple[str, List[str]]]]]:\n print(f'Reading SciREX documents from {file_path}')\n with open(file_path, 'r') as json_file:\n json_list = list(json_file)\n\n papers = []\n for json_str in json_list:\n papers.append(json.loads(json_str))\n\n def find_index_in_array(index, array):\n for array_index, (start, end) in enumerate(array):\n if end > index:\n return array_index\n\n result = []\n for paper in papers:\n result_sections = []\n\n # Populate the sentences list with section information.\n for index, section in enumerate(paper['sections']):\n # Get the first sentence of the section.\n index = find_index_in_array(section[0], paper['sentences'])\n sentence = paper['sentences'][index]\n # The section name is the first sentence of the section.\n section_name = paper['words'][sentence[0]:sentence[1]]\n\n # Example for the first sentence on a section:\n # [\"section\", \":\", \"Abstract\"]\n # If the first sentence starts with [\"section\", \":\"], we are only interested in the words after that prefix.\n if len(section_name) >= 2 and section_name[1] == \":\":\n section_name_length = len(section_name)\n section_name = section_name[2:]\n else:\n section_name_length = 0\n if index == 0:\n # First section will always be labled as 'Title'\n section_name = ['Title']\n else:\n section_name = []\n\n result_sections.append((\" \".join(section_name), []))\n\n words = paper['words']\n for info in paper['sentences']:\n sentence = words[info[0]:info[1]]\n section_index = find_index_in_array(info[0], paper['sections'])\n\n result_sections[section_index][1].append(\" \".join(sentence))\n\n result.append((str(paper['doc_id']), result_sections))\n\n return result", "def organize_data(self, data):\n presentation = \"\"\n\n offices = data['offices']\n living = data['living']\n\n presentation += \"OFFICE \\n\\n\"\n if len(offices) == 0:\n presentation += \"No Office allocations\\n\"\n\n else:\n presentation += \"Office Allocations\\n\"\n for office in offices:\n presentation += office['room'].capitalize() + \"\\n\"\n presentation += \"Members: \\n\"\n presentation += ','.join(office['names'])\n\n presentation += \"LIVING SPACES \\n\\n\"\n if len(offices) == 0:\n presentation += \"No Living space allocations\\n\"\n\n else:\n presentation += \"Office Allocations\\n\"\n for space in living:\n presentation += living['room'].capitalize() + \"\\n\"\n presentation += ','.join(living['names'])\n\n return presentation", "def extract_data(filename: str, directory: str) -> Dict:\n with open(filename) as f:\n lines = f.readlines()\n\n # Split data by :\n annotations = [line.replace(\" \", \"\").split(\":\") for line in lines]\n\n # Split data by ;\n for annotation in annotations:\n annotation[1] = annotation[1].split(\";\")\n\n # Loop for saving metadata into dictionary\n annot_dict = dict()\n for annotation in annotations:\n img = annotation[0]\n bbox_metadata = annotation[1]\n bbox = list()\n \n # Path to images\n img_path = os.path.join(directory, img)\n im = Image.open(img_path)\n width, height = im.size\n\n # Iterate over each bounding box\n for annot in bbox_metadata:\n \n if \"MISC_SIGNS\" == annot:\n signStatus = 'N/A'\n signTypes = \"MISC_SIGNS\"\n signPurpose = 'N/A'\n\n signBB = (-1, -1, -1, -1)\n signC = (-1, -1)\n signSize = 0\n aspectRatio = 0\n\n bbox.append({\"signStatus\": signStatus, \n \"signTypes\": signTypes, \n \"signPurpose\": signPurpose, \n \"signBB\": signBB, \n \"signC\": signC, \n \"signSize\": signSize, \n \"aspectRatio\": aspectRatio})\n elif \"\\n\" in annot:\n pass\n else:\n data = annot.split(\",\")\n \n signStatus = data[0] # signStatus\n signTypes = data[6] # signTypes\n signPurpose = data[5] # PROHIBITORY, WARNING, OTHER, INFORMATION\n tl_x, tl_y, br_x, br_y = data[3], data[4], data[1], data[2]\n \n if is_valid_decimal(tl_x):\n tl_x = float(tl_x)\n else:\n tl_x = float(cutoff_letter(tl_x))\n\n if is_valid_decimal(tl_y):\n tl_y = float(tl_y)\n else:\n tl_y = float(cutoff_letter(tl_y))\n\n if is_valid_decimal(br_x):\n br_x = float(br_x)\n else:\n br_x = float(cutoff_letter(br_x))\n\n if is_valid_decimal(br_y):\n br_y = float(br_y)\n else:\n br_y = float(cutoff_letter(br_y))\n\n if tl_x < 0:\n tl_x = 0\n elif tl_x > width:\n tl_x = width\n \n if tl_y < 0:\n tl_y = 0\n elif tl_y > height:\n tl_y = height\n \n if br_x < 0:\n br_x = 0\n elif br_x > width:\n br_x = width\n \n if br_y < 0:\n br_y = 0\n elif br_y > height:\n br_y = height\n\n signBB = (tl_x, tl_y, br_x, br_y)\n signC = (br_x + tl_x)/2, (br_y + tl_y)/2\n signSize = (br_x - tl_x) * (br_y - tl_y)\n aspectRatio = (br_x - tl_x) / (br_y - tl_y)\n\n bbox.append({\"signStatus\": signStatus, \n \"signTypes\": signTypes, \n \"signPurpose\": signPurpose, \n \"signBB\": signBB, \n \"signC\": signC, \n \"signSize\": signSize, \n \"aspectRatio\": aspectRatio})\n \n \n annot_dict[img_path] = bbox\n return annot_dict", "def get_style_features(text, nlp):\n doc = nlp(text)\n \n final_data = {f'mpqa_{k}': v for k, v in doc._.total_argument_types.items()}\n final_data['tb_sentiment'] = doc.sentiment\n final_data['tb_subjectivity'] = doc._.subjectivity\n \n # Return avg for emotions\n emotion_data = doc._.emotions\n emotion_data = {k: v / len(doc) for k, v in emotion_data.items()}\n \n final_data.update(emotion_data)\n \n cur_lemmas = list(set(w.lemma_ for w in doc))\n final_data['lemmas'] = cur_lemmas\n \n return final_data", "def overview(game_id):\n output = {}\n # get data\n overview = mlbgame.data.get_overview(game_id)\n # parse data\n overview_root = etree.parse(overview).getroot()\n\n try:\n output = add_raw_box_score_attributes(output, game_id)\n except ValueError:\n pass\n\n # get overview attributes\n for x in overview_root.attrib:\n output[x] = overview_root.attrib[x]\n\n # Get probable starter attributes if they exist\n home_pitcher_tree = overview_root.find('home_probable_pitcher')\n if home_pitcher_tree is not None:\n output.update(build_namespaced_attributes(\n 'home_probable_pitcher', home_pitcher_tree))\n else:\n output.update(build_probable_starter_defaults('home'))\n\n away_pitcher_tree = overview_root.find('away_probable_pitcher')\n if away_pitcher_tree is not None:\n output.update(build_namespaced_attributes(\n 'away_probable_pitcher', away_pitcher_tree))\n else:\n output.update(build_probable_starter_defaults('away'))\n\n return output", "def studio_state(self):\n submission = self.get_question()\n if submission:\n uploaded_submission = submission.get(\"question\").get(\"filename\", None)\n if uploaded_submission:\n quploaded = {\"filename\": submission['question']['filename']}\n else:\n quploaded = None\n else:\n quploaded = None\n \n submission = self.get_solution()\n if submission:\n uploaded_submission = submission.get(\"solution\").get(\"filename\", None)\n if uploaded_submission:\n suploaded = {\"filename\": submission['solution']['filename']}\n else:\n suploaded = None\n else:\n suploaded = None\n\n\n return {\n \"display_name\": self.title,\n \"question\":self.question,\n \"uploaded\": quploaded,\n \"suploaded\":suploaded,\n \"raw_question\" : self.raw_question,\n \"solutionUploaded\": suploaded,\n \"raw_soluion\": self.raw_solution,\n \"weight\":self.weight\n }", "def _presets(self, hdr):\n # presput/slit = 1080\n # presput/lens = 3640\n # measure/slit = 1080\n # measure/lens = 3640\n # 2 x 1080 padding\n # padding can be before presput, inbetween presput and measure,\n # and after measure.\n\n d = {}\n d['Presputter'] = {}\n padding = 0\n if not self._preset_start(hdr):\n hdr.seek(1080, 1)\n padding += 1\n d['Presputter']['Slits'] = self._preset(hdr, group='slit')\n d['Presputter']['Lenses'] = self._preset(hdr, group='lens')\n d['Measure'] = {}\n if not self._preset_start(hdr):\n hdr.seek(1080, 1)\n padding += 1\n d['Measure']['Slits'] = self._preset(hdr, group='slit')\n d['Measure']['Lenses'] = self._preset(hdr, group='lens')\n hdr.seek(1080 * (2 - padding), 1)\n return d", "def get_hole_dicts(sketch):\n return {\n name: bits\n for name, bits in findall(r'(\\w+)= \\?\\?\\((\\d+)\\);', sketch)\n }", "def proc_education_summary( summary: Tag ) -> Dict[str, str]:\n edu_record = dict()\n edu_record['school'] = summary.find('h3').text.strip()\n edu_record['is_abroad_school'] = _is_abroad_school( edu_record['school'] )\n\n for parag in summary.find_all('p'):\n spans = [span.text.strip() for span in parag.find_all('span')]\n if len( spans ) == 2:\n fld_name, value = spans\n value = value.strip()\n elif len(spans) == 0:\n # print( 'education parag: ', parag )\n edu_record['description'] = parag.text.strip()\n continue\n else:\n print( 'education spans: ', spans )\n continue\n\n if fld_name == 'Nombre de la titulación':\n edu_record['degree_raw'] = value\n edu_record['degree'] = _classify_degree( value )\n # print( 'degree: ', value, _classify_degree(value) )\n elif fld_name == 'Disciplina académica':\n edu_record['field_raw'] = value\n elif fld_name == 'Nota':\n edu_record['grade_raw'] = value\n elif fld_name.startswith('Fechas de estudios'):\n edu_record['period_raw'] = value\n elif fld_name.startswith('Actividades y asociaciones'):\n edu_record['activities_raw'] = value\n else:\n print(\"proc_education_summary: \", fld_name, ' :: ', value)\n\n if edu_record.get('degree', 'Unknown') == 'Unknown':\n if re.search( 'Ingenier|Engineering', edu_record.get('field_raw', '') ):\n edu_record['degree'] = 'University'\n\n return edu_record\n # %%", "def structure_anslysis(text, display = None):\n st.write('Text Size Exceeded! Truncating...')\n doc = nlp(text[:100000])\n pos_freq = pos_tag_counts(doc)\n ent_freq = entity_counts(doc)\n \n fig, axs = plt.subplots(1, 2, figsize = (15, 6))\n \n sns.barplot(list(pos_freq.keys()), list(pos_freq.values()), color='#e84118', ax = axs[0])\n axs[0].set_title('POS COUNTS')\n axs[0].set_xticklabels(labels = list(pos_freq.keys()), rotation = 90)\n \n sns.barplot(list(ent_freq.keys()), list(ent_freq.values()), color='#273c75', ax = axs[1])\n axs[1].set_title('ENTITY COUNTS')\n axs[1].set_xticklabels(labels = list(ent_freq.keys()), rotation = 90) \n \n plt.show()\n \n if display:\n spacy_streamlit.visualize_ner(doc, labels = nlp.get_pipe('ner').labels)\n \n \n return pos_freq, ent_freq", "def get_slide_analytics_new(slides) -> List[int]:\n word_count = []\n for slide in slides:\n print(slide)\n words = 0\n for shape in slide.shapes:\n if not shape.has_text_frame:\n continue\n print(shape.name)\n for paragraph in shape.text_frame.paragraphs:\n for run in paragraph.runs:\n print(\" \" + run.text)\n words += len(run.text.split())\n word_count.append(words)\n return word_count", "def analyze(self):\n result = []\n for frame_no, frame in enumerate(self.work.files('frames')):\n areas = self.blur.check_image(frame, self.work.files('templates'))\n for area in areas:\n index = find_area(result, area)\n if index == -1:\n result.append({'area': area, 'frames': [frame_no]})\n else:\n result[index]['frames'].append(frame_no)\n\n for values in result:\n sectors = [[values['frames'][0], values['frames'][0]]]\n for index in range(1, len(values['frames'])):\n if (values['frames'][index] - sectors[-1][1]) == 1:\n sectors[-1][1] = values['frames'][index]\n else:\n sectors.append([values['frames'][index], values['frames'][index]])\n values['sectors'] = sectors\n return result", "def extract_structural_profile(merged_path, num_seq, window):\n\n # parse further and load structural profile as np.array\n f = open(merged_path, 'r')\n structure = []\n for i in range(num_seq):\n seq = f.readline()\n paired = f.readline().strip().split('\\t')\n hairpin = f.readline().strip().split('\\t')\n internal = f.readline().strip().split('\\t')\n multi = f.readline().strip().split('\\t')\n external = f.readline().strip().split('\\t')\n\n paired = np.array(paired).astype(np.float32)\n hairpin = np.array(hairpin).astype(np.float32)\n internal = np.array(internal).astype(np.float32)\n multi = np.array(multi).astype(np.float32)\n external = np.array(external).astype(np.float32)\n\n # pad sequences\n seq_length = len(paired)\n offset1 = int((window - seq_length)/2)\n offset2 = window - seq_length - offset1\n struct = np.array([paired, hairpin, internal, multi, external])\n num_dims = struct.shape[0]\n if offset1:\n struct = np.hstack([np.zeros((num_dims,offset1)), struct])\n if offset2:\n struct = np.hstack([struct, np.zeros((num_dims,offset2))])\n structure.append(struct)\n\n return np.array(structure)", "def ppt_file_to_dict(self, file_path):\n try:\n file = open(file_path, \"rb\")\n\n except IOError as e:\n print(e)\n return\n\n pres = Presentation(file)\n file.close()\n\n for i in range(len(pres.slides)):\n self.process_slide(pres.slides[i], i + 1, file_path)", "def infotodict(seqinfo):\n \n \"\"\"\n MCF Pilot Protocol acquired on Friday April 13th\n \n >>> hdc_look.py -s mfc001 -ss 1\n series_id sequence_name series_description dim1 dim2 dim3 dim4 TR TE is_derived is_motion_corrected\n 0 1-localizer *fl2d1 localizer 192 192 3 1 0.020 5.00 False False\n 1 2-pre_Neutral1_A>>P Resting 4X4X4 *epfid2d1_64 pre_Neutral1_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 2 3-pre_topup_A>>P *epse2d1_64 pre_topup_A>>P 64 64 140 1 2.400 38.00 False False\n 3 4-pre_topup_P>>A *epse2d1_64 pre_topup_P>>A 64 64 140 1 2.400 38.00 False False\n 4 5-Field_mapping 4X4X4 A>>P *fm2d2r Field_mapping 4X4X4 A>>P 64 64 35 1 0.488 4.92 False False\n 5 6-Field_mapping 4X4X4 A>>P *fm2d2r Field_mapping 4X4X4 A>>P 64 64 35 1 0.488 7.38 False False\n 6 7-pre+heat1_A>>P 4X4X4 *epfid2d1_64 pre+heat1_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 7 8-pre_Neutral2_A>>P Resting 4X4X4 *epfid2d1_64 pre_Neutral2_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 8 9-pre+heat2_A>>P 4X4X4 *epfid2d1_64 pre+heat2_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 9 10-MPRAGE_GRAPPA2 *tfl3d1_16ns MPRAGE_GRAPPA2 256 240 192 1 2.300 2.98 False False\n 10 11-post_Neutral3_A>>P Resting 4X4X4 *epfid2d1_64 post_Neutral3_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 11 12-post+heat3_A>>P 4X4X4 *epfid2d1_64 post+heat3_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 12 13-post_Neutral4_A>>P Resting 4X4X4 *epfid2d1_64 post_Neutral4_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 13 14-post+heat4_A>>P 4X4X4 *epfid2d1_64 post+heat4_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 14 15-post_topup_A>>P *epse2d1_64 post_topup_A>>P 64 64 140 1 2.400 38.00 False False\n 15 16-post_topup_P>>A *epse2d1_64 post_topup_P>>A 64 64 140 1 2.400 38.00 False False\n \n \"\"\"\n\n bids_prefix = 'sub-{subject}/{session}/'\n\n pre_neutral1_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral1_acq-epi_rec-fmap_bold.{item:01d}')\n pre_heat1_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat1_acq-epi_rec-fmap_bold.{item:01d}')\n pre_heat2_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat2_acq-epi_rec-fmap_bold.{item:01d}')\n pre_neutral2_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral2_acq-epi_rec-fmap_bold.{item:01d}')\n\n pre_neutral1_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral1_acq-epi_rec-topup_bold.{item:01d}')\n pre_heat1_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat1_acq-epi_rec-topup_bold.{item:01d}')\n pre_heat2_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat2_acq-epi_rec-topup_bold.{item:01d}')\n pre_neutral2_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral2_acq-epi_rec-topup_bold.{item:01d}')\n\n pre_topup_ap = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-preEpi_dir-ap_epi.{item:01d}')\n pre_topup_pa = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-preEpi_dir-pa_epi.{item:01d}')\n\n # The item was commented out for Phase Difference field maps. Conversion did not work correctly. I removed the item number to try to\n # isolate the problem.\n\n pre_fmap_magnitude1 = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-pre_magnitude1.{item:01d}')\n pre_fmap_phasediff = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-pre_phasediff.{item:01d}')\n\n t1w = create_key(bids_prefix + 'anat/sub-{subject}_{session}_T1w')\n\n post_neutral3_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral3_acq-epi_rec-fmap_bold.{item:01d}')\n post_heat3_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat3_acq-epi_rec-fmap_bold.{item:01d}')\n post_heat4_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat4_acq-epi_rec-fmap_bold.{item:01d}')\n post_neutral4_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral4_acq-epi_rec-fmap_bold.{item:01d}')\n\n post_neutral3_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral3_acq-epi_rec-topup_bold.{item:01d}')\n post_heat3_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat3_acq-epi_rec-topup_bold.{item:01d}')\n post_heat4_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat4_acq-epi_rec-topup_bold.{item:01d}')\n post_neutral4_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral4_acq-epi_rec-topup_bold.{item:01d}')\n\n post_topup_ap = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-postEpi_dir-ap_epi.{item:01d}')\n post_topup_pa = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-postEpi_dir-pa_epi.{item:01d}')\n\n # Create an empty dictionary called info for each key\n\n info = {pre_neutral1_ap_fmap: [],\n pre_heat1_ap_fmap: [],\n pre_heat2_ap_fmap: [],\n pre_neutral2_ap_fmap: [],\n\n pre_neutral1_ap_topup: [],\n pre_heat1_ap_topup: [],\n pre_heat2_ap_topup: [],\n pre_neutral2_ap_topup: [],\n\n pre_topup_ap: [],\n pre_topup_pa: [],\n\n pre_fmap_magnitude1: [],\n pre_fmap_phasediff: [],\n\n t1w: [],\n\n post_neutral3_ap_fmap: [],\n post_heat3_ap_fmap: [],\n post_heat4_ap_fmap: [],\n post_neutral4_ap_fmap: [],\n\n post_neutral3_ap_topup: [],\n post_heat3_ap_topup: [],\n post_heat4_ap_topup: [],\n post_neutral4_ap_topup: [],\n\n post_topup_ap: [],\n post_topup_pa: [],\n\n }\n\n # Loop over each sequence. Use if statements to determine which sequences should be linked to which key\n\n for idx, s in enumerate(seqinfo):\n\n if 'pre_Neutral1' in s.series_id:\n info[pre_neutral1_ap_fmap].append([s.series_id])\n info[pre_neutral1_ap_topup].append([s.series_id])\n\n if 'pre+heat1' in s.series_id:\n info[pre_heat1_ap_fmap].append([s.series_id])\n info[pre_heat1_ap_topup].append([s.series_id])\n\n if 'pre+heat2' in s.series_id:\n info[pre_heat2_ap_fmap].append([s.series_id])\n info[pre_heat2_ap_topup].append([s.series_id])\n\n if 'pre_Neutral2' in s.series_id:\n info[pre_neutral2_ap_fmap].append([s.series_id])\n info[pre_neutral2_ap_topup].append([s.series_id])\n\n if 'pre_topup_A>>P' in s.series_id:\n info[pre_topup_ap].append([s.series_id])\n\n if 'pre_topup_P>>A' in s.series_id:\n info[pre_topup_pa].append([s.series_id])\n\n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n (s.TE == 4.92)):\n info[pre_fmap_magnitude1].append([s.series_id])\n \n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n (s.TE == 7.38)):\n info[pre_fmap_phasediff].append([s.series_id])\n\n if 'MPRAGE_GRAPPA2' in s.series_id:\n info[t1w].append([s.series_id])\n\n if 'post_Neutral3' in s.series_id:\n info[post_neutral3_ap_fmap].append([s.series_id])\n info[post_neutral3_ap_topup].append([s.series_id])\n\n if 'post+heat3' in s.series_id:\n info[post_heat3_ap_fmap].append([s.series_id])\n info[post_heat3_ap_topup].append([s.series_id])\n\n if 'post+heat4' in s.series_id:\n info[post_heat4_ap_fmap].append([s.series_id])\n info[post_heat4_ap_topup].append([s.series_id])\n\n if 'post_Neutral4' in s.series_id:\n info[post_neutral4_ap_fmap].append([s.series_id])\n info[post_neutral4_ap_topup].append([s.series_id])\n\n if 'post_topup_A>>P' in s.series_id:\n info[post_topup_ap].append([s.series_id])\n\n if 'post_topup_P>>A' in s.series_id:\n info[post_topup_pa].append([s.series_id])\n\n return info", "def test_reviewData():\n starttime = UTCDateTime('2018-06-18T02:34:20')\n endtime = UTCDateTime('2018-06-18T02:37:20')\n st = rd.getdata('IU', 'TEIG,PAYG', '00', 'BHZ', starttime, endtime, savedat=True,\n filenamepref='Test1_', loadfromfile=True, reloadfile=False)\n\n event_lat = 14.178\n event_lon = -90.670\n\n rd.attach_coords_IRIS(st)\n rd.attach_distaz_IRIS(st, event_lat, event_lon)\n\n fig = rd.recsec(st)\n\n freqs, amps, fig2 = rd.make_multitaper(st, render=False)\n\n fig3 = rd.make_spectrogram(st)\n\n rd.nextpow2(7)\n\n stacc, stvel = rd.getpeaks(st)\n\n rd.fourier_spectra(st)", "def parse_design(self, detailed_design_file):", "def _extract_dictionary_information(self, entry):\n # Clean up the furigana for the result\n furigana = \"\".join([f.text for f in entry.select(\".kanji\")])\n\n # Cleans the vocabulary word for the result\n vocabulary = self._get_full_vocabulary_string(entry) if not entry.select(\".concept_light-representation .furigana rt\") else entry.select_one(\".concept_light-representation .furigana rt\").text\n\n # The fact that this needs to exist is really annoying.\n # If you go to a page like this: https://jisho.org/word/%E5%8D%B0%E5%BA%A6\n # you'll see that this is a word whose furigana is actually in katakana\n # I didn't realize this happens (it makes sense now), and the huge issue\n # is that there's different HTML in this case, so the previous parsing method\n # doesn't work, so we need a new method...\n\n # Now there could be *really* weird cases where there's a word with both\n # katakana furigana and hiragana furigana (which would be cool), but tbh this\n # I'm satisfied with assuming the whole word corresponds with the whole furigana.\n\n # Grab the difficulty tags for the result\n diff_tags = [m.text for m in entry.select(\".concept_light-tag.label\")]\n\n # Grab each of the meanings associated with the result\n cleaned_meanings = self._isolate_meanings(entry.select_one(\".meanings-wrapper\"))\n meanings = [m.select_one(\".meaning-meaning\") for m in cleaned_meanings]\n meanings_texts = [m.text for m in meanings if m != None]\n\n # Romanize the furigana\n halpern = kana_to_halpern(furigana)\n\n information = {\n \"furigana\": furigana,\n \"vocabulary\": vocabulary,\n \"difficulty_tags\": diff_tags,\n \"meanings\": dict(zip(range(1, len(meanings_texts) + 1), meanings_texts)),\n \"n_meanings\": len(meanings_texts),\n \"halpern\": halpern\n }\n\n return information", "def _get_summary_struct(self):\n\n model_fields = [\n (\"Number of reference examples\", 'num_examples')]\n\n training_fields = [\n (\"Method\", 'method'),\n (\"Total training time (seconds)\", 'training_time')]\n\n sections = [model_fields, training_fields]\n section_titles = ['Schema', 'Training']\n\n return (sections, section_titles)", "def storefront_annotate():\n\n\tprint \"CREATING ANNOTATIONS\"\n\tPrintBreakLine()\n\n\tcurrentView = uidoc.ActiveView\n\tstorefrontFrames = []\n\tannotationNotes = []\n\n\tstandardTol = 0.01\n\n\n\t#Form input\n\tfrom rpw.ui.forms import Label, ComboBox, Separator, Button, FlexForm\n\n\tcomponents = [Label('Select System'),\n\t\t\t\t\tComboBox(\"combobox1\", {\"Elite\": \"Elite\", \"MODE\": \"MODE\", \"Extravega\": \"Extravega\"}),\n\t\t\t\t\tComboBox(\"combobox2\", {\"Custom/Standard\": \"CS\", \"Glass Sizes\": \"GS\"}),\n\t\t\t\t\tSeparator(),\n\t\t\t\t\tButton('Go')]\n\n\tform = FlexForm(\"Storefront Annotate\", components)\n\tform.show()\n\n\tif not form.values:\n\t\tsys.exit()\n\telse: \n\t\tsystemName = form.values[\"combobox1\"]\n\t\tannoType = form.values[\"combobox2\"]\n\n\n\t#Load config\n\tstorefrontConfig = storefront_options()\n\n\tif not systemName.lower() in storefrontConfig.currentConfig[\"currentSystem\"].lower():\n\t\tstorefrontConfig.storefront_set_config()\n\t\tsystemName = storefrontConfig.currentConfig[\"currentSystem\"]\n\t\tstorefrontConfig.storefront_save_config()\n\t\n\tfamTypeDict = GetFamilyTypeDict(\"Panel-Symbol-Custom\")\n\tfamTypeDict.update(GetFamilyTypeDict(\"Panel-Symbol-Standard\"))\n\n\t#Load standard sizes\n\tsystemStandardHorizontals = storefrontConfig.currentConfig[\"systemStandardHorizontals\"]\n\tsillStandards = systemStandardHorizontals[\"sill\"]\n\t\n\n\n\t#collect notest in the view if there are any\n\tannotationNotes = list(GetElementsInView(BuiltInCategory.OST_GenericAnnotation, Autodesk.Revit.DB.FamilyInstance, currentView.Id))\n\tannotationNotes = FilterElementsByName(doc, annotationNotes,[\"Panel\",\"Symbol\"], False)\n\n\t#collect walls and mullions\n\tallStorefrontWalls = rpw.db.Collector(of_class='Wall', \n\t\t\t\t\t\t\t\t\t\t\tview=currentView, \n\t\t\t\t\t\t\t\t\t\t\twhere=lambda x: (str(x.WallType.Kind) == \"Curtain\") and (systemName.lower() in x.Name.lower()))\n\n\tallStorefrontMullions = []\n\n\t#Collect mullions\n\tfor sfwall in allStorefrontWalls:\n\t\tfor sfMullionsId in sfwall.CurtainGrid.GetMullionIds():\n\t\t\tallStorefrontMullions.append(doc.GetElement(sfMullionsId))\n\n\tannotationsList = []\n\n\t# Toggle: if theres annotations in the view already, then delete them.\n\n\tif annoType == \"CS\":\n\n\t\tif annotationNotes:\n\t\t\twith rpw.db.Transaction(\"Clear Annotations\"):\n\t\t\t\tDeleteElementsInView(currentView.Id, BuiltInCategory.OST_GenericAnnotation, Autodesk.Revit.DB.FamilyInstance, \"Panel-Symbol-Custom\")\n\t\t\t\tDeleteElementsInView(currentView.Id, BuiltInCategory.OST_GenericAnnotation, Autodesk.Revit.DB.FamilyInstance, \"Panel-Symbol-Standard\")\n\n\n\t\t# Toggle: if there is NOT annotations in the view, then create them\n\t\telse:\n\n\t\t\tfor sfMullion in allStorefrontMullions:\n\n\t\t\t\tsfMullionName = sfMullion.Name\n\n\t\t\t\tif sfMullion.LocationCurve:\n\n\t\t\t\t\tsfMullionLength = sfMullion.LocationCurve.Length\n\n\t\t\t\t\tif \"sill\" in sfMullionName.lower():\n\t\t\t\t\t\tplacementPoint = sfMullion.LocationCurve.Evaluate(0.5, True)\n\t\t\t\t\t\ttext = \"\"\n\t\t\t\t\t\tnotesymbol = None\n\n\t\t\t\t\t\tfor key, standardLength in sillStandards.items():\n\t\t\t\t\t\t\tif abs(sfMullionLength - standardLength) < standardTol:\n\t\t\t\t\t\t\t\ttext = \"STANDARD\"\n\t\t\t\t\t\t\t\tnotesymbol = doc.GetElement(famTypeDict[\"Panel-Symbol-Standard\"])\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\ttext = \"CUSTOM\"\n\t\t\t\t\t\t\t\tnotesymbol = doc.GetElement(famTypeDict[\"Panel-Symbol-Custom\"])\n\n\t\t\t\t\t\tannotationsList.append([placementPoint, text, notesymbol])\n\t\t\t\t\n\t\t\t#Place annotations\t\n\t\t\twith rpw.db.Transaction(\"Clear Annotations\"):\n\t\t\t\tfor annot in annotationsList:\n\t\t\t\t\tpoint = annot[0]\n\t\t\t\t\tsym = annot[2]\n\t\t\t\t\tannotInst = doc.Create.NewFamilyInstance(point, sym, currentView)\n\t\t\t\t\tfor p in annotInst.Parameters:\n\t\t\t\t\t\tif p.Definition.Name == \"label_text\":\n\t\t\t\t\t\t\tp.Set(annot[1])\n\n\n\t#Creates glass width tags on a plan\n\tif annoType == \"GS\":\n\n\t\tjunctionPoints = []\n\t\tintermediatePoints = []\n\n\t\tglassTagList = []\n\n\n\t\tfor mullion in allStorefrontMullions:\n\n\t\t\tmullionLength = mullion.get_Parameter(BuiltInParameter.CURVE_ELEM_LENGTH).AsDouble()\n\n\t\t\tif mullionLength > 0 and mullion.LocationCurve:\n\n\t\t\t\tmullionName = mullion.Name.lower()\n\t\t\t\tmullionRoom = mullion.get_Parameter(BuiltInParameter.ALL_MODEL_INSTANCE_COMMENTS).AsString()\n\t\t\t\tmullionPoint = mullion.Location.Point\n\t\t\t\tmullionPoint = XYZ(mullionPoint.X,mullionPoint.Y, 0)\n\n\t\t\t\tif \"post\" in mullionName:\n\t\t\t\t\tjunctionPoints.append([mullionPoint, mullionName])\n\n\t\t\t\tif \"wallstart\" in mullionName:\n\t\t\t\t\tjunctionPoints.append([mullionPoint, mullionName])\n\n\t\t\t\tif \"door\" in mullionName:\n\t\t\t\t\tjunctionPoints.append([mullionPoint, mullionName])\n\n\t\t\t\tif \"intermediate\" in mullionName:\n\t\t\t\t\tintermediatePoints.append([mullionPoint, mullionName])\n\n\n\n\t\tfor storefrontElevation in allStorefrontWalls:\n\n\t\t\tpanelIds = storefrontElevation.CurtainGrid.GetPanelIds()\n\n\t\t\tlinearGlass = storefrontElevation.Location.Curve.Length\n\n\t\t\tstorefrontElevationID = storefrontElevation.get_Parameter(BuiltInParameter.ALL_MODEL_MARK).AsString()\n\t\t\tstorefrontSuperType = storefrontElevation.get_Parameter(BuiltInParameter.ALL_MODEL_INSTANCE_COMMENTS).AsString()\n\n\t\t\t#Panels\n\t\t\tfor panelId in panelIds:\n\n\t\t\t\tpanel = doc.GetElement(panelId)\n\t\t\t\tpanelWidth = panel.get_Parameter(BuiltInParameter.WINDOW_WIDTH).AsDouble()\n\t\t\t\tpanelHeight = panel.get_Parameter(BuiltInParameter.WINDOW_HEIGHT).AsDouble()\n\n\t\t\t\tif (panelWidth > 0) and (panelHeight > 0):\n\n\t\t\t\t\tcondition = None\n\t\t\t\t\tvarient01 = None\n\t\t\t\t\tvarient02 = None\n\n\t\t\t\t\tpanelFamily = panel.get_Parameter(BuiltInParameter.ELEM_FAMILY_PARAM).AsValueString()\n\t\t\t\t\tpanelType = panel.get_Parameter(BuiltInParameter.ELEM_TYPE_PARAM).AsValueString()\n\t\t\t\t\tpanelSF = panelWidth * panelHeight\n\t\t\t\t\tpanelSizeName = str(panelWidth) + \" x \" + str(panelHeight)\n\n\t\t\t\t\t#Get panel point and flatten\n\t\t\t\t\tpanelPoint = panel.GetTransform().Origin\n\t\t\t\t\tpanelPoint = XYZ(panelPoint.X, panelPoint.Y, 0)\n\n\t\t\t\t\tpanelRoomID = panel.get_Parameter(BuiltInParameter.ALL_MODEL_INSTANCE_COMMENTS).AsString()\n\n\t\t\t\t\t#Default panel position\n\t\t\t\t\tpanelPositions = []\n\n\t\t\t\t\t# Checking end conditions against junctions (post, wallstart, and door frames)\n\t\t\t\t\tjuntionsAndDoorFrames = junctionPoints\n\n\n\t\t\t\t\tif \"glazed\" in panelFamily.lower():\n\t\t\t\t\t\t\n\t\t\t\t\t\tnumFoundEndConditions = 0\n\n\t\t\t\t\t\t#CORRECT PANEL WIDTH + HEIGHT FOR ACTUAL SIZES\n\t\t\t\t\t\t# Add correction for differences between modeling and reality\n\n\t\t\t\t\t\tglassWidthCorrection = 0\n\t\t\t\t\t\tglassHeightCorrection = 0\n\n\t\t\t\t\t\t#Ends\n\t\t\t\t\t\tfor i in range(len(juntionsAndDoorFrames)):\n\t\t\t\t\t\t\ttestPoint = juntionsAndDoorFrames[i][0]\n\t\t\t\t\t\t\ttestMullionName = juntionsAndDoorFrames[i][1]\n\t\t\t\t\t\t\ttestDist1 = testPoint.DistanceTo(panelPoint)\n\n\t\t\t\t\t\t\tif testDist1 < ((panelWidth/2) + (2.1/12)):\n\t\t\t\t\t\t\t\tglassWidthCorrection += storefrontConfig.currentConfig[\"panelCorrections\"][\"horizontalEnd\"]\n\t\t\t\t\t\t\t\tnumFoundEndConditions += 1 #Found an end condition\n\t\t\t\t\t\t\t\t#print storefrontConfig.currentConfig[\"panelCorrections\"][\"horizontalEnd\"]\n\n\t\t\t\t\t\t#Intermediates\n\t\t\t\t\t\tfor i in range(len(intermediatePoints)):\n\t\t\t\t\t\t\ttestPoint = intermediatePoints[i][0]\n\t\t\t\t\t\t\ttestMullionName = intermediatePoints[i][1]\n\t\t\t\t\t\t\ttestDist2 = testPoint.DistanceTo(panelPoint)\n\n\t\t\t\t\t\t\tif testDist2 < ((panelWidth/2) + (1.8/2)):\n\t\t\t\t\t\t\t\tglassWidthCorrection += storefrontConfig.currentConfig[\"panelCorrections\"][\"horizontalIntermediate\"]\n\t\t\t\t\t\t\t\tnumFoundEndConditions += 1 #Found an end condition\n\t\t\t\t\t\t\t\t#print storefrontConfig.currentConfig[\"panelCorrections\"][\"horizontalIntermediate\"]\n\n\t\t\t\t\t\t#Butt joints\n\t\t\t\t\t\tnumButtJoints = 2 - numFoundEndConditions #Glass has 2 ends, if above conditions arent detected, its assumed a butt joint is found\n\t\t\t\t\t\tglassWidthCorrection += (numButtJoints * storefrontConfig.currentConfig[\"panelCorrections\"][\"horizontalButtJoint\"])\n\n\t\t\t\t\t\t#print numButtJoints\n\n\n\t\t\t\t\t\t#Head and Sill pockets\n\t\t\t\t\t\tglassHeightCorrection += storefrontConfig.currentConfig[\"panelCorrections\"][\"verticalSill\"]\n\t\t\t\t\t\tglassHeightCorrection += storefrontConfig.currentConfig[\"panelCorrections\"][\"verticalHead\"]\n\t\t\t\t\t\t\n\t\t\t\t\t\t#create list of glass size tags\n\t\t\t\t\t\tglassTagList.append([panelPoint,(panelWidth + glassWidthCorrection)])\n\n\t\t\n\t\t#place glass tags\n\t\ttagFamTypeDict = GetFamilyTypeDict(\"Panel-Symbol-Standard\")\n\t\ttagSym = doc.GetElement(tagFamTypeDict[\"Panel-Symbol-Standard\"])\n\n\n\t\t#Set units and format options to convert decimal feet to inche fractional\n\t\tformatUnits = doc.GetUnits()\n\t\tfvo = FormatValueOptions()\n\t\tfo = FormatOptions(DisplayUnitType.DUT_FRACTIONAL_INCHES)\n\t\tfo.Accuracy = .0625\n\t\tfvo.SetFormatOptions(fo)\n\n\t\t#Place annotations\t\n\t\twith rpw.db.Transaction(\"Tag Glass\"):\n\t\t\tfor tag in glassTagList:\n\t\t\t\tpoint = tag[0]\n\t\t\t\t#tag = size[2]\n\t\t\t\t\n\t\t\t\tsizeInches = UnitFormatUtils.Format(formatUnits, UnitType.UT_Length, tag[1], False, False, fvo)\n\n\t\t\t\tannotInst = doc.Create.NewFamilyInstance(point, tagSym, currentView)\n\t\t\t\tfor p in annotInst.Parameters:\n\t\t\t\t\tif p.Definition.Name == \"label_text\":\n\t\t\t\t\t\tp.Set(sizeInches)\n\n\n\n\tprint \"FINISHED\"", "def FetchLayoutsData(client):\n layout_names = ['U_layout', 'J_layout', 'E_layout', 'B_layout']\n cols = ['scancode', 'x', 'y', 'w', 'h']\n layouts = FetchSpreadsheetFeeds(client, KEYBOARD_GLYPH_SPREADSHEET_KEY,\n layout_names, cols)\n ret = {}\n for layout_name, layout in layouts.items():\n ret[layout_name[0]] = []\n for row in layout:\n line = []\n for col in cols:\n value = row.get(col)\n if not value:\n line.append('')\n else:\n if col != 'scancode':\n value = float(value)\n line.append(value)\n ret[layout_name[0]].append(line)\n return ret", "def detail_matching(self):\n paradic = self.cfg['param']['paradic']\n work_dir = self.work_dir\n \n x = float(self.cfg['param']['x']) # selected pixel in the first image\n y = float(self.cfg['param']['y'])\n \n # sift parameters\n # number of bins in the orientation histogram\n n_bins = int(paradic['n_bins']) \n n_hist = int(paradic['n_hist']) \n # descriptor of n_hist X n_hist weighted histograms with n_ori\n n_ori = int(paradic['n_ori']) \n delta_min = float(paradic['delta_min'])\n sigma_min = float(paradic['sigma_min'])\n sigma_in = float(paradic['sigma_in'])\n lambda_ori = float(paradic['lambda_ori'])\n lambda_descr = float(paradic['lambda_descr'])\n #threshold defining reference orientations\n n_spo = int(paradic['n_spo'])\n \n # Read feature vectors from output files\n if (os.path.getsize(work_dir+'OUTmatches.txt') > 0 ):\n pairdata = find_nearest_keypoint(work_dir+'OUTmatches.txt', y, x)\n \n illustrate_pair(pairdata, n_bins, n_hist, n_ori, work_dir)\n\n \n # Read keys coordinates.\n d = 6+n_bins+n_hist*n_hist*n_ori # size of keydata inside pairdata\n v = n_hist*n_hist*n_ori\n [x1, y1, sigma1, theta1] = [float(x) for x in pairdata[0:4]]\n [o1, s1] = [float(x) for x in pairdata[4+v:4+v+2]]\n [x2a, y2a, sigma2a, theta2a] = [float(x) for x in pairdata[d:d+4]]\n [o2a, s2a] = [float(x) for x in pairdata[d+4+v:d+4+v+2]]\n [x2b, y2b, sigma2b, theta2b] = \\\n [float(x) for x in pairdata[2*d:2*d+4]]\n [o2b, s2b] = [float(x) for x in pairdata[2*d+4+v:2*d+4+v+2]]\n \n draw_one_match(pairdata,\n work_dir+'input_0.png',\n work_dir+'input_1.png',\n d,\n lambda_ori,\n lambda_descr,\n n_hist,\n work_dir+'OUTonepair.png')\n \n \n # Extract thumbnails.\n # keypoint 1 (image 1)\n print ' '.join(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n self.wait_proc(proc, timeout=self.timeout)\n \n # keypoint 2a (nearest neighbor in image 2)\n print ' '.join(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n # keypoint 2b (second nearest neighbor in image 2)\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2b), str(y2b), str(sigma2b), str(theta2b), str(o2b), str(s2b),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2b\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n \n return 1", "def record(aline):\n # delect the Illustration\n bodylst = aline.split('[Illustration]') \n linestr = functools.reduce(lambda x, y : x + y, bodylst) \n \n # handle punctuation & capitalization\n punclst = [i for i in string.punctuation]\n punclst.remove(\"'\") \n punclst.remove('-')\n for i in punclst:\n linestr = linestr.replace(i,' ')\n alst = linestr.split()\n txtlst = [i.lower() for i in alst]\n # Considering the capitalization\n \n # record the words found and update worddict\n for j in txtlst:\n if j not in worddict:\n worddict.update({j:1})\n else:\n worddict[j] += 1", "def panelist_info(rs, screen_name, idmap, missing_info):\n\n try:\n info = rs.panelist_info(screen_name)\n if screen_name.lower() in idmap:\n yougov_id = idmap[screen_name.lower()]\n info['yougov'] = {'id': yougov_id}\n panoptic = data.us_panoptic_data(yougov_id)\n if panoptic:\n info['panoptic'] = panoptic\n return json.dumps(info)\n except KeyError:\n missing_info.add(screen_name)", "def study():\n return render_template('study.html')", "def stats_preprocessing(self):\n output = {'before_tot':[],\n 'before_unique':[],\n 'after_tot':[],\n 'after_unique':[]}\n for i in range(len(self.table)):\n description_raw = self.table.description.iloc[i].split(' ')\n clean_txt = self.table.clean_text.iloc[i].split(' ')\n\n output['before_tot'].append(len(description_raw))\n output['before_unique'].append(len(set(description_raw)))\n output['after_tot'].append(len(clean_txt))\n output['after_unique'].append(len(set(clean_txt)))\n \n print(\"\"\"Before preprocessing a description had on average {0} words with standard deviation {1}. \\n\nMoreover, the average of unique words was {2} and the standard deviation {3}.\"\"\"\\\n .format(round(mean(output['before_tot']), 2), round(stdev(output['before_tot']), 2), \n round(mean(output['before_unique']), 2), round(stdev(output['before_unique'])), 2))\n \n print(\"\"\"\\nAfter preprocessing a description has on average {0} words with standard deviation {1}. \\n \nThe average of unique words is now {2} and the standard deviation {3}.\"\"\"\\\n .format(round(mean(output['after_tot']), 2), round(stdev(output['after_tot']), 2), \n round(mean(output['after_unique']),2), round(stdev(output['after_unique']), 2)))\n\n return output", "def find_mentioned_pol_figures_legacy(data):\n figures_mentioned = {}\n figures = get_political_figures_legacy()\n\n for ind, row in data.iterrows():\n subject_words = row[\"MetadataSubject\"].lower()\n message_words = row[\"RawText\"].lower()\n\n for figure in figures:\n if figure + \" \" in (subject_words + message_words):\n if figure in figures_mentioned:\n figures_mentioned[figure][0].append(ind)\n else:\n figures_mentioned[figure] = [[ind]]\n\n return pd.DataFrame(figures_mentioned)", "def get_results(self) -> dict:\n # do not call super() as this subclasses panos and not base directly\n results = dict()\n results['snippets'] = dict()\n results['pan_validation'] = dict()\n context = self.context\n\n for s in self.get_snippets():\n snippet_name = s.name\n cmd = s.cmd\n # handle both validate and validate_xml here\n if snippet_name in context and 'validate' in cmd:\n if 'results' in context[snippet_name]:\n result = context[snippet_name]['results']\n label_template = context[snippet_name].get('label', '')\n # attempt to render the label using supplied context\n context[snippet_name]['label'] = s.render(label_template, context)\n if not result:\n fail_message = s.metadata.get('fail_message', 'Snippet Validation results were {{ result }}')\n context[snippet_name]['output_message'] = s.render(fail_message, context)\n elif result:\n pass_message = s.metadata.get('pass_message', 'Snippet Validation results were {{ result }}')\n context[snippet_name]['output_message'] = s.render(pass_message, context)\n else:\n context[snippet_name]['output_message'] = 'Unknown results from Snippet Validation'\n\n results['snippets'][snippet_name] = result\n\n results['pan_validation'][snippet_name] = context[snippet_name]\n\n return self._parse_output_template(results)", "def lf_findall_interp_with_sharps(report):\n\n if 'interpretation' in report.sections.keys():\n interpretation = report.sections['interpretation']\n interp_text = interpretation['text']\n return abnormal_interp_with_sharps(interp_text)\n else:\n candtext = get_section_with_name(SEIZURE_SECTION_NAMES_LOWER, report)\n if candtext:\n return abnormal_interp_with_sharps(candtext)\n else:\n return ABSTAIN_VAL", "def figures_layout(figures_dict: Dict[str, go.Figure]):\n return [\n html.Div(className='cardlive-figures', children=[\n single_figure_layout(title='Map',\n description=['Geographic distribution of the submitted genomic samples.'],\n id='figure-geographic-map-id',\n fig=figures_dict['map']\n ),\n single_figure_layout(title='Samples timeline',\n description=['Submission dates for genomic samples.'],\n id='figure-timeline-id',\n fig=figures_dict['timeline'],\n dropdowns=figure_menus_layout(\n id_type='timeline-type-select',\n options_type=[\n {'label': 'Cumulative counts', 'value': 'cumulative_counts'},\n {'label': 'Cumulative percent', 'value': 'cumulative_percent'},\n {'label': 'Counts', 'value': 'counts'},\n {'label': 'Percent', 'value': 'percent'},\n ],\n value_type='cumulative_counts',\n id_color='timeline-color-select',\n options_color=[\n {'label': 'Default', 'value': 'default'},\n {'label': 'Geographic region', 'value': 'geographic'},\n {'label': 'Organism', 'value': 'organism'},\n ],\n value_color='default'\n ),\n ),\n single_figure_layout(title='Samples total',\n description=['Count of samples matching selection.'],\n id='figure-totals-id',\n fig=figures_dict['totals'],\n dropdowns=figure_menus_layout(\n id_type='totals-type-select',\n options_type=[\n {'label': 'Geographic region', 'value': 'geographic'},\n {'label': 'Organism', 'value': 'organism'},\n ],\n value_type='geographic',\n id_color='totals-color-select',\n options_color=[\n {'label': 'Default', 'value': 'default'},\n {'label': 'Geographic region', 'value': 'geographic'},\n {'label': 'Organism', 'value': 'organism'},\n ],\n value_color='default'\n ),\n ),\n single_figure_layout(title='RGI results',\n description=['Percent of selected samples (',\n html.Span(id='sample-count-figure', children=[LOADING]),\n ') with the chosen type of RGI results.'\n ],\n id='figure-rgi-id',\n fig=figures_dict['rgi'],\n dropdowns=figure_menus_layout(\n id_type='rgi-type-select',\n options_type=[\n {'label': 'Drug class', 'value': 'drug_class'},\n {'label': 'AMR gene', 'value': 'amr_gene'},\n {'label': 'AMR gene family', 'value': 'amr_gene_family'},\n {'label': 'Resistance mechanism', 'value': 'resistance_mechanism'},\n ],\n value_type='drug_class',\n id_color='rgi-color-select',\n options_color=[\n {'label': 'Default', 'value': 'default'},\n {'label': 'Geographic region', 'value': 'geographic'},\n {'label': 'Organism', 'value': 'organism'},\n ],\n value_color='default'\n ),\n ),\n single_figure_layout(title='RGI intersections',\n description=['Patterns of co-occurrence of the selected RGI result type across genome subset'],\n id='figure-rgi-intersections',\n fig=figures_dict['rgi'],\n dropdowns=figure_menus_layout(\n id_type='rgi-intersection-type-select',\n options_type=[\n {'label': 'Drug class', 'value': 'drug_class'},\n {'label': 'AMR gene', 'value': 'amr_gene'},\n {'label': 'AMR gene family', 'value': 'amr_gene_family'},\n {'label': 'Resistance mechanism', 'value': 'resistance_mechanism'},\n ],\n value_type='drug_class',\n )\n ),\n ])\n ]", "def samsemPlots29and30(samsem_data,path,dict):\n \n telleoevelse = dict['telleoevelse'] if 'telleoevelse' in dict else 0\n \n # Retrieving input data for the analysis\n path_res = path \n if 'subfolder' in dict:\n subfolder = dict['subfolder']\n path_res = os.path.join(path,subfolder)\n if not os.path.exists(path_res): os.makedirs(path_res)\n \n filename_orig = dict['filename']\n coldef_type = dict['coldef_type']\n observer_groups = dict['observer_groups']\n dict.update({'investigated-item': 'observer groups'})\n \n method_ids = dict['method_ids'] if 'method_ids' in dict else sorted(set(samsem_data['sim_id'].values.astype(int)))\n \n print(\"Starting SAMSEM_RES#29+30: Analyzing data of observer groups \"+ str(keys2values(observer_groups,settings.id2ColDefShort))+\" for \" + str(settings.id2ColDefLong[dict['coldef_type']]) + \" simulation methods: \"+str(keys2values(method_ids,settings.id2Sim))+\".\")\n \n # Restricting input data to only include the simulation methods that have been chosen for the analysis\n rel_data = pandas.DataFrame()\n for method_id in method_ids:\n if (method_id != 3) and (method_id != 99): \n whatArr_tmp = [['sim_id',operator.eq,method_id],['coldef_type',operator.eq,coldef_type]]\n else:\n whatArr_tmp = [['sim_id',operator.eq,method_id]] # For the kotera and the dummy method, both protanopia and deuteranopia variants are identical. Thus, no distinction of coldef_type is necessary.\n rel_data_tmp = organizeArray(samsem_data,whatArr_tmp)\n rel_data = pandas.concat([rel_data_tmp, rel_data])\n samsem_data_adj = rel_data.reset_index()\n \n # Retrieving data for each of the observation groups\n i = 0; pandas_dict = {}; order_dict = {}\n for observer_group in observer_groups:\n observer_coldef_type = observer_group\n observer_coldef_type_short = settings.id2ColDefShort[observer_coldef_type]\n \n whatArr_tmp = [['observer_coldef_type',operator.eq,observer_coldef_type]]\n obs_group_data_tmp = organizeArray(samsem_data_adj,whatArr_tmp)\n \n pandas_dict.update({observer_coldef_type_short:obs_group_data_tmp})\n order_dict.update({i:observer_coldef_type_short}) ; i += 1\n \n # Plot response time data as boxplots\n if telleoevelse: print(\"Observations RT plots\")\n boxes, labels = preparePandas4RTPlots(pandas_dict, order_dict)\n plotRTGraphs(boxes,labels,path_res, dict)\n \n # Plot accuracy with confidence intervals\n c = 1.96; type = 'wilson-score'\n if telleoevelse: print(\"Observations ACC plots\")\n accuracies = preparePandas4AccuracyPlots(pandas_dict,order_dict,c,type)\n plotAccuracyGraphs(accuracies,path_res,dict,order_dict)\n \n # Make median test as csv file\n dict.update({'filename': filename_orig+\"-RT\"})\n makeMedianTest(boxes, path_res, labels, dict)\n \n # Make Chi2 contingency test as txt file\n obs_array, obs_pandas = preparePandas4Chi2(pandas_dict, order_dict)\n dict.update({'filename': filename_orig+'-ACC'})\n makePearsonChi2Contingency(obs_array, obs_pandas, labels, path_res, dict)\n \n # Make Chi2 contingency test matrix as csv file\n makePearsonChi2Contingency2x2Test(obs_array, path_res, labels, dict)\n \n # Make normality plots as Q-Q and log-Q-Q plots\n for i in range(numpy.shape(boxes)[0]):\n distribution_tmp = boxes[i]\n label_tmp = labels[i]\n dict.update({'filename': filename_orig+'-RT-'+label_tmp})\n plotQQPlot(distribution_tmp, path_res, dict)\n \n distribution_log_tmp = numpy.log(distribution_tmp)\n distribution_log_tmp = distribution_log_tmp[~numpy.isnan(distribution_log_tmp)]\n dict.update({'filename': filename_orig+'-RT-'+label_tmp+'-log'})\n plotQQPlot(distribution_log_tmp, path_res, dict)", "def get_info_game(soup):\n info = []\n\n content = soup.select(\"div.fftit.s20.b\").pop()\n info.append(content.span.text)\n info.append(re.search(r'\\((.*?)\\)', content.text).group(1))\n\n for dt, dd in zip(soup.findAll(\"dt\"), soup.findAll(\"dd\")):\n if dt.text == \"Desarrollador:\":\n info.append(dd.text)\n elif dt.text == \"Editor:\":\n info.append(dd.text)\n elif dt.text == \"Género:\":\n info.append(dd.text)\n\n info.append(soup.find(\"span\", {\"itemprop\": \"releaseDate\"}).attrs['content'])\n\n info.extend([div.span.text for div in soup.select(\"div.dtc.wi36\")])\n\n return zip([\"name\", \"platform\", \"study\", \"publisher\", \"genre\", \"releaseDate\", \"3DJuegosScore\", \"userScore\"], info)", "def get_summary(page):\n stop_words = stopwords.words('english')\n sentences = sent_tokenize(page[\"text\"])\n S = ts.build_similarity_matrix(sentences, stop_words)\n sentence_ranks = ts.pagerank(S)\n ranked_sentence_indexes = [item[0] for item in sorted(enumerate(sentence_ranks), key=lambda item: -item[1])]\n SUMMARY_SIZE = 4\n SELECTED_SENTENCES = sorted(ranked_sentence_indexes[:SUMMARY_SIZE])\n summary = itemgetter(*SELECTED_SENTENCES)(sentences)\n temp = ''\n for sentence in summary:\n temp += ''.join(sentence)\n return {\"summary\": temp}", "def get_structure_recording_vars(st3d, with_props=False):\n recording_vars = []\n s = st3d['s']\n nsec = s.shape[0]\n nDP = st3d['DPs'].shape[1]\n\n DPs = ['DP%02d' % i for i in range(nDP)]\n\n regions = []\n webs = []\n for ireg, reg in enumerate(st3d['regions']):\n layers = []\n for i, lname in enumerate(reg['layers']):\n varname = 'r%02d%s' % (ireg, lname)\n layers.extend([varname + 'T', varname + 'A'])\n regions.extend(layers)\n if with_props:\n regions.append('r%02d_thickness')\n regions.append('r%02d_width')\n for ireg, reg in enumerate(st3d['webs']):\n layers = []\n for i, lname in enumerate(reg['layers']):\n varname = 'w%02d%s' % (ireg, lname)\n layers.extend([varname + 'T', varname + 'A'])\n if with_props:\n regions.append('r%02d_thickness')\n regions.append('r%02d_width')\n webs.extend(layers)\n\n recording_vars.extend(DPs)\n recording_vars.extend(regions)\n recording_vars.extend(webs)\n recording_vars.append('matprops')\n recording_vars.append('failmat')\n recording_vars.append('s_st')\n\n if with_props:\n recording_vars.extend(['web_angle',\n 'web_offset',\n 'pacc_u',\n 'pacc_l',\n 'pacc_u_curv',\n 'pacc_l_curv'])\n\n return recording_vars", "def extract_ppt(self, filename):\n prs = Presentation(filename)\n\n sents = []\n for slide in prs.slides:\n for shape in slide.shapes:\n sents.append(shape.text)\n\n text = \"\"\n for sent in sents:\n for bullet in sent.split('\\n'):\n bullstr = bullet.strip()\n if len(bullstr) > 0:\n text += bullstr\n if bullstr[-1] != '.' and bullstr[-1] != '!' and bullstr[-1] != '?':\n text += '.'\n text += ' '\n\n return text", "def parse(filepath):\n wos_list = []\n\n paper_start_key = 'PT'\n paper_end_key = 'ER'\n\n\n #\n line_list = []\n try:\n with open(filepath, 'r') as f:\n line_list = f.read().splitlines()\n except IOError: # File does not exist, or couldn't be read.\n raise IOError(\"File does not exist, or cannot be read.\")\n\n if len(line_list) is 0:\n raise IOError(\"Unable to read filepath or filepath is empty.\")\n # Convert the data in the file to a usable list of dictionaries.\n # Note: first two lines of file are not related to any paper therein.\n last_field_tag = paper_start_key # initialize to something.\n for line in line_list[2:]:\n\n field_tag = line[:2]\n\n if field_tag == ' ':\n pass\n\n if field_tag == paper_start_key:\n # Then prepare for next paper.\n wos_dict = _new_wos_dict()\n\n if field_tag == paper_end_key:\n # Then add paper to our list.\n wos_list.append(wos_dict)\n\n # Handle keys like AU,AF,CR that continue over many lines.\n if field_tag == ' ':\n field_tag = last_field_tag\n\n # Add value for the key to the wos_dict: only for the five tags.\n try:\n if field_tag in ['DE', 'DI', 'TI', 'SO', 'UT','PY']:\n wos_dict[field_tag] += ' ' + str(line[3:])\n # Rest all will just get passed\n else:\n pass\n\n except (KeyError, TypeError, UnboundLocalError):\n wos_dict[field_tag] = str(line[3:])\n\n last_field_tag = field_tag\n # End line loop.\n\n # Define keys that should be lists instead of default string.\n list_keys = ['DE']\n delims = {'DE': ';'}\n\n # And convert the data at those keys into lists.\n for wos_dict in wos_list:\n for key in list_keys:\n delim = delims[key]\n try:\n key_contents = wos_dict[key]\n if delim != '\\n':\n wos_dict[key] = key_contents.split(delim)\n else:\n wos_dict[key] = key_contents.splitlines()\n except KeyError:\n # One of the keys to be converted to a list didn't exist.\n pass\n except AttributeError:\n # Again a key didn't exist but it belonged to the wos\n # data_struct set of keys; can't split a None.\n pass\n\n return wos_list", "def make_jwst_spec_previews(input_file, flux_scale_factor=\n FLUX_SCALE_FACTOR_DEFAULT, fluxerr_scale_factor=\n FLUXERR_SCALE_FACTOR_DEFAULT, n_consecutive=\n N_CONSECUTIVE_DEFAULT,\n output_path=OUTPUT_PATH_DEFAULT,\n output_type=OUTPUT_TYPE_DEFAULT,\n dpi_val=DPI_VAL_DEFAULT, debug=DEBUG_DEFAULT,\n full_ylabels=FULL_YLABELS_DEFAULT, optimize=\n not NOOPTIMIZE_DEFAULT, verbose=VERBOSE_DEFAULT):\n\n # Print file name, if verbose is turned on.\n if verbose:\n print(\"Input file: \" + input_file)\n\n # Derive output file name from input file name.\n output_files = []\n for out_type in output_type:\n if out_type != \"screen\":\n if out_type != 'fits':\n output_file = (path.join(output_path, \"\") +\n path.basename(input_file).split(\".fits\")[0] +\n \".\" + out_type)\n else:\n output_file = (path.join(output_path, \"\") +\n path.basename(input_file).split(\".fits\")[0] +\n \"_prev.\" + out_type)\n else:\n output_file = None\n\n output_files.append(output_file)\n\n # Print name of output file.\n if verbose:\n print(\"Output file names are:\")\n for ofile in output_files:\n if ofile is not None:\n if ofile[-4:] == '.png':\n print(\" Output file: \" + ofile)\n print(\" Output file: \" + ofile.strip('\\.png') +\n '_thumb.png')\n else:\n print(\" Output file: \" + ofile)\n else:\n print(\" Plotting to screen.\")\n\n # Read in the FITS file to determine which instrument it comes from.\n # Print the name of the instrument found in the header if verbose is turned\n # on.\n this_instrument = get_instrument_name(input_file)\n if verbose:\n print(\"Instrument: \" + this_instrument)\n\n # Read in the FITS files and create plots using the local package\n # appropriate for the instrument used in the input file.\n if this_instrument in [\"MIRI\", \"NIRSPEC\", \"NIRISS\"]:\n # Get wavelengths, fluxes, flux uncertainties.\n jwst_spectrum = readspec(input_file)\n\n # Calculate plot metrics.\n spec_plot_metrics = calc_plot_metrics(this_instrument.lower(),\n jwst_spectrum.wavelengths,\n jwst_spectrum.fluxes,\n jwst_spectrum.fluxerrs,\n jwst_spectrum.dqs,\n n_consecutive, flux_scale_factor,\n fluxerr_scale_factor)\n\n # Make \"large-size\" plot.\n for out_type, out_file in zip(output_type, output_files):\n if out_type != \"fits\":\n plotspec(jwst_spectrum, out_type, out_file,\n flux_scale_factor,\n fluxerr_scale_factor, spec_plot_metrics,\n dpi_val=dpi_val, output_size=1024, debug=debug,\n full_ylabels=full_ylabels,\n optimize=optimize)\n\n if not debug:\n # Make \"thumbnail-size\" plot, if requested.\n plotspec(jwst_spectrum, out_type, out_file,\n flux_scale_factor,\n fluxerr_scale_factor, spec_plot_metrics,\n dpi_val=dpi_val, output_size=128,\n optimize=optimize)\n else:\n raise JWSTSpecPrevError(\"'INSTRUME' keyword not understood: \" +\n this_instrument)", "def phonology(request):\n\n perspective_cid = request.params.get('perspective_client_id')\n perspective_oid = request.params.get('perspective_object_id')\n\n # Checking if we have limits on number of computed results.\n\n limit = (None if 'limit' not in request.params else\n int(request.params.get('limit')))\n\n limit_exception = (None if 'limit_exception' not in request.params else\n int(request.params.get('limit_exception')))\n\n limit_no_vowel = (None if 'limit_no_vowel' not in request.params else\n int(request.params.get('limit_no_vowel')))\n\n limit_result = (None if 'limit_result' not in request.params else\n int(request.params.get('limit_result')))\n\n # TODO: get perspective's translation and language it belongs to.\n\n # We get lexical entries of this perspective with markup'ed sounds.\n\n Sound = aliased(Entity, name = \"Sound\")\n PublishingSound = aliased(PublishingEntity, name = \"PublishingSound\")\n\n query = DBSession.query(LexicalEntry, Entity, Sound, PublishingEntity, PublishingSound).filter(and_(\n LexicalEntry.parent_client_id == perspective_cid,\n LexicalEntry.parent_object_id == perspective_oid,\n LexicalEntry.marked_for_deletion == False,\n Entity.parent_client_id == LexicalEntry.client_id,\n Entity.parent_object_id == LexicalEntry.object_id,\n Entity.marked_for_deletion == False,\n Entity.additional_metadata.contains({\"data_type\": \"praat markup\"}),\n PublishingEntity.client_id == Entity.client_id,\n PublishingEntity.object_id == Entity.object_id,\n PublishingEntity.published == True,\n PublishingEntity.accepted == True,\n Sound.client_id == Entity.self_client_id,\n Sound.object_id == Entity.self_object_id,\n Sound.marked_for_deletion == False,\n PublishingSound.client_id == Sound.client_id,\n PublishingSound.object_id == Sound.object_id,\n PublishingSound.published == True,\n PublishingSound.accepted == True))\n\n # We process these lexical entries in batches. Just in case, it seems that perspectives rarely have more\n # then several hundred such lexical entries.\n\n exception_counter = 0\n no_vowel_counter = 0\n result_list = list()\n\n for index, row in enumerate(query.yield_per(100)):\n\n markup_url = row.Entity.content\n sound_url = row.Sound.content\n\n cache_key = 'phonology:{0}:{1}:{2}:{3}'.format(\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id)\n\n # Checking if we have cached result for this pair of sound/markup.\n\n cache_result = CACHE.get(cache_key)\n\n if cache_result == 'no_vowel':\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '\n '[CACHE {7}]: no vowels\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url))\n\n no_vowel_counter += 1\n\n if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # If we have cached exception, we do the same as with absence of vowels, show its info and\n # continue.\n\n elif isinstance(cache_result, tuple) and cache_result[0] == 'exception':\n exception, traceback_string = cache_result[1:3]\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n '[CACHE {7}]: exception\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url))\n\n log.debug(traceback_string)\n\n exception_counter += 1\n\n if (limit_exception and exception_counter >= limit_exception or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # If we actually have the result, we use it and continue.\n\n elif cache_result:\n\n result_string = '\\n'.join(\n 'tier {0} \\'{1}\\': {2}'.format(tier_number, tier_name,\n \n tier_result_seq_list if not isinstance(tier_result_seq_list, list) else\n tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else\n ''.join('\\n {0}'.format(tier_result) for tier_result in tier_result_seq_list))\n\n for tier_number, tier_name, tier_result_seq_list in cache_result)\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '\n '[CACHE {7}]:\\n{8}\\n{9}\\n{10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url, result_string))\n\n result_list.append(cache_result)\n\n if (limit_result and len(result_list) >= limit_result or\n limit and index + 1 >= limit):\n break\n\n continue\n\n try:\n # Getting markup, checking for each tier if it needs to be processed.\n\n markup_bytes = urllib.request.urlopen(urllib.parse.quote(markup_url, safe = '/:')).read()\n\n textgrid = pympi.Praat.TextGrid(xmax = 0)\n textgrid.from_file(\n io.BytesIO(markup_bytes),\n codec = chardet.detect(markup_bytes)['encoding'])\n\n tier_data_list = []\n vowel_flag = False\n\n for tier_number, tier_name in textgrid.get_tier_name_num():\n\n raw_interval_list = textgrid.get_tier(tier_number).get_all_intervals()\n raw_interval_seq_list = [[]]\n\n # Splitting interval sequence on empty intervals.\n\n for raw_index, interval in enumerate(raw_interval_list):\n\n if len(interval[2].strip()) <= 0:\n if len(raw_interval_seq_list[-1]) > 0:\n raw_interval_seq_list.append([])\n\n else:\n raw_interval_seq_list[-1].append((raw_index, interval))\n\n if len(raw_interval_seq_list[-1]) <= 0:\n del raw_interval_seq_list[-1]\n\n # Selecting interval sequences for analysis, checking if we have unusual markup.\n \n interval_seq_list = []\n interval_idx_to_raw_idx = dict()\n\n unusual_markup_flag = False\n unusual_markup_list = []\n\n for raw_interval_seq in raw_interval_seq_list:\n\n interval_seq_list.append([])\n interval_idx_to_raw_idx[len(interval_seq_list) - 1] = {}\n\n for partial_raw_index, (raw_index, interval) in enumerate(raw_interval_seq):\n\n interval_text = interval[2].strip()\n\n # Accepting interval if its text contains at least one vowel, and is short enough or\n # is a valid phonetic transcription.\n\n transcription_check = re.fullmatch(transcription_re, interval_text)\n\n if (len(interval_text) > 0 and\n any(character in vowel_set for character in interval_text) and\n (len(interval_text) <= 2 or transcription_check)):\n\n interval_seq_list[-1].append(interval)\n\n sequence_index = len(interval_seq_list) - 1\n interval_index = len(interval_seq_list[-1]) - 1\n\n interval_idx_to_raw_idx[(sequence_index, interval_index)] = raw_index\n interval_idx_to_raw_idx[sequence_index][interval_index] = partial_raw_index\n\n # Noting if the interval contains unusual (i.e. non-transcription) markup.\n\n elif not transcription_check:\n\n unusual_markup_flag = True\n unusual_markup_list.append((raw_index, interval))\n\n transcription_list = [text for begin, end, text in raw_interval_list]\n transcription = ''.join(transcription_list)\n\n selected_list = [text\n for interval_list in interval_seq_list\n for begin, end, text in interval_list]\n\n selected = ''.join(selected_list)\n\n # If we have intervals with unusual markup, we report them.\n\n if unusual_markup_flag:\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' has interval(s) with unusual transcription text: '\n '{9} / {10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name, transcription, dict(unusual_markup_list)))\n\n # If the markup does not have any vowels, we note it and also report it.\n\n if all(character not in vowel_set for character in transcription):\n\n tier_data_list.append((tier_number, tier_name, 'no_vowel'))\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' doesn\\'t have any vowel markup: {9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name, transcription_list))\n\n # It is also possible that while full transcription has vowels, intervals selected for\n # analysis do not. In that case we also note it and report it.\n\n elif not any(character in vowel_set for character in selected):\n\n tier_data_list.append((tier_number, tier_name, 'no_vowel_selected'))\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' intervals to be processed don\\'t have any vowel markup: '\n 'markup {9}, selected {10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name,\n transcription_list, selected_list))\n\n # Otherwise we store tier data to be used during processing of the sound file.\n\n else:\n tier_data_list.append((tier_number, tier_name,\n (raw_interval_list, raw_interval_seq_list, interval_seq_list,\n interval_idx_to_raw_idx, transcription)))\n\n vowel_flag = True\n\n # If there are no tiers with vowel markup, we skip this sound-markup file altogether.\n\n if not vowel_flag:\n\n CACHE.set(cache_key, 'no_vowel')\n no_vowel_counter += 1\n\n if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # Otherwise we retrieve the sound file and analyse each vowel-containing markup.\n # Partially inspired by source code at scripts/convert_five_tiers.py:307.\n\n sound = None\n with tempfile.NamedTemporaryFile() as temp_file:\n\n sound_file = urllib.request.urlopen(urllib.parse.quote(sound_url, safe = '/:'))\n temp_file.write(sound_file.read())\n temp_file.flush()\n\n sound = AudioPraatLike(pydub.AudioSegment.from_wav(temp_file.name))\n\n tier_result_list = []\n\n for tier_number, tier_name, tier_data in tier_data_list:\n\n if tier_data == 'no_vowel' or tier_data == 'no_vowel_selected':\n tier_result_list.append((tier_number, tier_name, tier_data))\n continue\n\n # Analyzing vowel sounds of each interval sequence.\n\n (raw_interval_list, raw_interval_seq_list, interval_seq_list, interval_idx_to_raw_idx,\n transcription) = tier_data\n\n tier_result_list.append((tier_number, tier_name, []))\n\n for seq_index, (raw_interval_list, interval_list) in enumerate(zip(\n raw_interval_seq_list, interval_seq_list)):\n\n if len(interval_list) <= 0:\n continue\n\n (max_intensity_index, max_intensity, max_length_index, max_length) = \\\n find_max_interval_praat(sound, interval_list)\n\n max_intensity_interval = interval_list[max_intensity_index]\n max_length_interval = interval_list[max_length_index]\n\n max_intensity_f1_f2 = sound.get_interval_formants(*max_intensity_interval[:2])\n max_length_f1_f2 = sound.get_interval_formants(*max_length_interval[:2])\n\n # Compiling results.\n\n max_length_str = '{0} {1:.3f} [{2}]'.format(\n max_length_interval[2], max_length,\n len(''.join(text for index, (begin, end, text) in\n raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_length_index]])))\n\n max_intensity_str = '{0} {1:.3f} [{2}]'.format(\n max_intensity_interval[2],\n max_intensity,\n len(''.join(text for index, (begin, end, text) in\n raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_intensity_index]])))\n\n tier_result_list[-1][2].append([\n ''.join(text for index, (begin, end, text) in raw_interval_list),\n max_length_str,\n '{0:.3f}'.format(max_length_f1_f2[0]),\n '{0:.3f}'.format(max_length_f1_f2[1]),\n max_intensity_str,\n '{0:.3f}'.format(max_intensity_f1_f2[0]),\n '{0:.3f}'.format(max_intensity_f1_f2[1]),\n '+' if max_intensity_index == max_length_index else '-'])\n\n # Saving result.\n\n result_list.append(tier_result_list)\n CACHE.set(cache_key, tier_result_list)\n\n result_string = '\\n'.join(\n 'tier {0} \\'{1}\\': {2}'.format(tier_number, tier_name,\n \n tier_result_seq_list if not isinstance(tier_result_seq_list, list) else\n tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else\n ''.join('\\n {0}'.format(tier_result) for tier_result in tier_result_seq_list))\n\n for tier_number, tier_name, tier_result_seq_list in tier_result_list)\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}):'\n '\\n{7}\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n markup_url, sound_url, result_string))\n\n # Stopping earlier, if required.\n\n if (limit_result and len(result_list) >= limit_result or\n limit and index + 1 >= limit):\n break\n\n except Exception as exception:\n\n #\n # NOTE\n #\n # Exceptional situations encountered so far:\n #\n # 1. TextGrid file actually contains sound, and wav file actually contains textgrid markup.\n #\n # Perspective 330/4, LexicalEntry 330/7, sound-Entity 330/2328, markup-Entity 330/6934\n #\n # 2. Markup for one of the intervals contains a newline \"\\n\", and pympi fails to parse it.\n # Praat parses such files without problems.\n #\n # Perspective 330/4, LexicalEntry 330/20, sound-Entity 330/6297, markup-Entity 330/6967\n #\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'exception\\n{7}\\n{8}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n markup_url, sound_url))\n\n # if we encountered an exception, we show its info and remember not to try offending\n # sound/markup pair again.\n\n traceback_string = ''.join(traceback.format_exception(\n exception, exception, exception.__traceback__))[:-1]\n\n log.debug(traceback_string)\n\n CACHE.set(cache_key, ('exception', exception,\n traceback_string.replace('Traceback', 'CACHEd traceback')))\n\n exception_counter += 1\n\n if (limit_exception and exception_counter >= limit_exception or\n limit and index + 1 >= limit):\n break\n\n log.debug('phonology {0}/{1}: {2} result{3}, {4} no vowels, {5} exceptions'.format(\n perspective_cid, perspective_oid,\n len(result_list), '' if len(result_list) == 1 else 's',\n no_vowel_counter, exception_counter))\n\n # If we have no results, we indicate the situation and also show number of failures and number of\n # markups with no vowels.\n\n if not result_list:\n request.response.status = HTTPPreconditionFailed.code\n\n return {\n \"error\": \"no markups for this query\",\n \"exception_counter\": exception_counter,\n \"no_vowel_counter\": no_vowel_counter}\n\n # Otherwise we create and then serve Excel file.\n\n excel_book = xlwt.Workbook(encoding = \"utf-8\")\n sheet = excel_book.add_sheet(\"Sheet 1\")\n\n sheet.write(0, 0, 'Transcription')\n sheet.write(0, 1, 'Longest (seconds) interval')\n sheet.write(0, 2, 'F1 (Hz)')\n sheet.write(0, 3, 'F2 (Hz)')\n sheet.write(0, 4, 'Highest intensity (dB) interval')\n sheet.write(0, 5, 'F1 (Hz)')\n sheet.write(0, 6, 'F2 (Hz)')\n sheet.write(0, 7, 'Coincidence')\n\n row_counter = 1\n\n for tier_result_list in result_list:\n for tier_number, tier_name, tier_result_seq_list in tier_result_list:\n\n if tier_result_seq_list == 'no_vowel':\n continue\n\n for tier_data in tier_result_seq_list:\n for index, tier_data_str in enumerate(tier_data):\n sheet.write(row_counter, index, tier_data_str)\n\n row_counter += 1\n\n # Formatting column widths.\n\n sheet.col(0).width = 24 * 256\n sheet.col(1).width = 24 * 256\n sheet.col(2).width = 12 * 256\n sheet.col(3).width = 12 * 256\n sheet.col(4).width = 24 * 256\n sheet.col(5).width = 12 * 256\n sheet.col(6).width = 12 * 256\n sheet.col(7).width = 12 * 256\n\n excel_stream = io.BytesIO()\n excel_book.save(excel_stream)\n excel_stream.seek(0)\n\n # See http://stackoverflow.com/questions/2937465/what-is-correct-content-type-for-excel-files for Excel\n # content-type.\n\n response = Response(content_type = 'application/vnd.ms-excel')\n\n response.app_iter = FileIter(excel_stream)\n response.headers['Content-Disposition'] = \"attachment; filename=phonology.xls\"\n\n return response", "def app_view(request):\n prior_queries = (request.dbsession.query(Sentiments, User)\n .join(User)\n .filter(User.username == request.authenticated_userid)\n .order_by(Sentiments.id.desc())\n .all())\n sentient_bodies = (query[0].body for query in prior_queries)\n sentimental_parts = (percentage(query[0].negative_sentiment) for query in prior_queries)\n logical_bits = (percentage(query[0].positive_sentiment) for query in prior_queries)\n sublime_insight = zip(sentient_bodies, sentimental_parts, logical_bits)\n if request.method == \"POST\":\n text_body = request.POST['body']\n url = \"http://text-processing.com/api/sentiment/\"\n payload = {'text': text_body}\n response = requests.request('POST', url, data=payload, headers=None)\n response_dict = json.loads(response.text)\n user_query = request.dbsession.query(User).filter(User.username == request.authenticated_userid).one().id\n sentiment_entry = Sentiments(\n body=text_body,\n negative_sentiment=response_dict['probability']['neg'],\n positive_sentiment=response_dict['probability']['pos'],\n user_id=user_query\n )\n request.dbsession.add(sentiment_entry)\n response_dict['probability']['neg'] = percentage(response_dict['probability']['neg'])\n response_dict['probability']['pos'] = percentage(response_dict['probability']['pos'])\n return {'response_dict': response_dict,\n 'text_body': text_body,\n 'consummate_awareness': sentient_bodies,\n 'conscious whole': sentimental_parts,\n 'divine oneness': logical_bits,\n 'hallowed_provenance': sublime_insight}\n return {'consummate_awareness': sentient_bodies,\n 'conscious whole': sentimental_parts,\n 'divine oneness': logical_bits,\n 'hallowed_provenance': sublime_insight}", "def index2well(self, layout_name: str) -> dict:\n return {v:k for k,v in self['well2index'][self['layout_format'][layout_name]].items() }", "def annual_summary(self):\n \n #Initialize dict with info about all of year's storms\n hurdat_year = {'id':[],'operational_id':[],'name':[],'max_wspd':[],'min_mslp':[],'category':[],'ace':[]}\n \n #Search for corresponding entry in keys\n count_ss_pure = 0\n count_ss_partial = 0\n iterate_id = 1\n for key in self.dict.keys():\n\n #Retrieve info about storm\n temp_name = self.dict[key]['name']\n temp_vmax = np.array(self.dict[key]['vmax'])\n temp_mslp = np.array(self.dict[key]['mslp'])\n temp_type = np.array(self.dict[key]['type'])\n temp_time = np.array(self.dict[key]['date'])\n temp_ace = self.dict[key]['ace']\n\n #Get indices of all tropical/subtropical time steps\n idx = np.where((temp_type == 'SS') | (temp_type == 'SD') | (temp_type == 'TD') | (temp_type == 'TS') | (temp_type == 'HU'))\n\n #Get times during existence of trop/subtrop storms\n if len(idx[0]) == 0: continue\n trop_time = temp_time[idx]\n if 'season_start' not in hurdat_year.keys():\n hurdat_year['season_start'] = trop_time[0]\n hurdat_year['season_end'] = trop_time[-1]\n\n #Get max/min values and check for nan's\n np_wnd = np.array(temp_vmax[idx])\n np_slp = np.array(temp_mslp[idx])\n if len(np_wnd[~np.isnan(np_wnd)]) == 0:\n max_wnd = np.nan\n max_cat = -1\n else:\n max_wnd = int(np.nanmax(temp_vmax[idx]))\n max_cat = convert_category(np.nanmax(temp_vmax[idx]))\n if len(np_slp[~np.isnan(np_slp)]) == 0:\n min_slp = np.nan\n else:\n min_slp = int(np.nanmin(temp_mslp[idx]))\n\n #Append to dict\n hurdat_year['id'].append(key)\n hurdat_year['name'].append(temp_name)\n hurdat_year['max_wspd'].append(max_wnd)\n hurdat_year['min_mslp'].append(min_slp)\n hurdat_year['category'].append(max_cat)\n hurdat_year['ace'].append(temp_ace)\n hurdat_year['operational_id'].append(self.dict[key]['operational_id'])\n \n #Handle operational vs. non-operational storms\n\n #Check for purely subtropical storms\n if 'SS' in temp_type and True not in np.isin(temp_type,['TD','TS','HU']):\n count_ss_pure += 1\n\n #Check for partially subtropical storms\n if 'SS' in temp_type:\n count_ss_partial += 1\n\n #Add generic season info\n hurdat_year['season_storms'] = len(hurdat_year['name'])\n narray = np.array(hurdat_year['max_wspd'])\n narray = narray[~np.isnan(narray)]\n hurdat_year['season_named'] = len(narray[narray>=34])\n hurdat_year['season_hurricane'] = len(narray[narray>=65])\n hurdat_year['season_major'] = len(narray[narray>=100])\n hurdat_year['season_ace'] = np.sum(hurdat_year['ace'])\n hurdat_year['season_subtrop_pure'] = count_ss_pure\n hurdat_year['season_subtrop_partial'] = count_ss_partial\n \n #Return object\n return hurdat_year", "def generate_explore_views(self):\n views = []\n if self._safety_surface[\"type\"] == \"circle\":\n # Generate points evently distributed on the circle\n center = self._safety_surface[\"center\"]\n center = Vector3r(center[0], center[1], center[2])\n x0 = center.x_val\n y0 = center.y_val\n z0 = center.z_val\n radius = self._safety_surface[\"radius\"]\n TOTAL_NUM = self._config[\"point_num\"]\n ROUND_NUM = self._config.get(\"round_num\", 1)\n delta_theta = 2 * math.pi / (TOTAL_NUM / ROUND_NUM)\n\n for i in range(TOTAL_NUM):\n theta = delta_theta * i\n x = x0 + radius * math.sin(theta)\n y = y0 + radius * math.cos(theta)\n pitch = -45\n views.append(\n {\n \"position\": Vector3r(x, y, z0),\n \"yaw\": -1 * (0.5 * math.pi + theta),\n \"pitch\": pitch,\n }\n )\n elif self._safety_surface[\"type\"] == \"cylinder\":\n # Generate points spiral the cylinder\n top_center = self._safety_surface[\"top_center\"]\n top_center = Vector3r(top_center[0], top_center[1], top_center[2])\n x0 = top_center.x_val\n y0 = top_center.y_val\n bottom = self._safety_surface.get(\"bottom\", 0)\n height = top_center.z_val - bottom\n radius = self._safety_surface[\"radius\"]\n TOTAL_NUM = self._config[\"point_num\"]\n ROUND_NUM = self._config.get(\"round_num\", 1)\n START_PITCH = self._config.get(\"start_pitch\", -45)\n END_PITCH = self._config.get(\"end_pitch\", 45)\n delta_theta = 2 * math.pi / (TOTAL_NUM / ROUND_NUM)\n delta_height = height / (TOTAL_NUM - 1)\n delta_pitch = (END_PITCH - START_PITCH) / TOTAL_NUM\n for i in range(TOTAL_NUM):\n theta = delta_theta * i\n x = x0 + radius * math.sin(theta)\n y = y0 + radius * math.cos(theta)\n z = bottom + i * delta_height\n pitch = START_PITCH + i * delta_pitch\n views.append(\n {\n \"position\": Vector3r(x, y, z),\n \"yaw\": -1 * (0.5 * math.pi + theta),\n \"pitch\": pitch / 180 * math.pi,\n }\n )\n else:\n print(\n \"OfflineNavigator: unknown type of safety_surface (%s)\"\n % self._safety_surface[\"type\"]\n )\n\n return views", "def proc_employment_summary(summary: Tag) -> Dict:\n\n xp_record = dict()\n xp_record['position'] = summary.find('h3').text.strip()\n company = summary.find_all('p', {'class': 'pv-entity__secondary-title'})[0]\n xp_record['company'] = \"; \".join( [ line.strip() for line in company.text.split('\\n')\n if line.strip() != ''] )\n # %%\n for xp_line in summary.find_all('h4'):\n fld_name, value = [span.text.strip() for span in xp_line.find_all('span') ]\n if fld_name == 'Fechas de empleo':\n xp_record['period_raw'] = value\n period = _extract_period( value )\n xp_record['period'] = period\n # print( period )\n xp_record['duration'] = np.round( (period[1] - period[0]).total_seconds()\n / SECS_IN_YEAR, 2)\n elif fld_name == 'Duración del empleo':\n xp_record['duration_raw'] = value\n elif fld_name == 'Ubicación':\n xp_record['location_raw'] = value\n # print( f'location: {value}')\n elif fld_name.startswith('LinkedIn me ayud'):\n continue\n else:\n print( \"proc_employment_summary: \", fld_name, value )\n # %%\n # pprint( xp_record )\n # %%\n return xp_record\n # %%", "def treatments_dict():\n treats = 2\n data = pd.DataFrame(\n data={\"id\": np.arange(100), \"block\": [0] * 40 + [1] * 30 + [2] * 30}\n )\n idx_col = \"id\"\n size = 90\n\n treatments = stochatreat(\n data=data,\n block_cols=[\"block\"],\n treats=treats,\n idx_col=idx_col,\n size=size,\n random_state=42,\n )\n\n treatments_dict = {\n \"data\": data,\n \"idx_col\": idx_col,\n \"size\": size,\n \"treatments\": treatments,\n }\n\n return treatments_dict", "def lf_findall_interp_with_spikes(report):\n\n if 'interpretation' in report.sections.keys():\n interpretation = report.sections['interpretation']\n interp_text = interpretation['text']\n return abnormal_interp_with_spikes(interp_text)\n else:\n candtext = get_section_with_name(SEIZURE_SECTION_NAMES_LOWER, report)\n if candtext:\n return abnormal_interp_with_spikes(candtext)\n else:\n return ABSTAIN_VAL", "def get_hotspot_provenance(self, suptitle, scenario, ancestor_files):\n caption = (f\"{suptitle}. Calculated for seasons \"\n f\"{self.seasons[0].upper()}, \"\n f\"{self.seasons[1].upper()} and {self.seasons[2].upper()} \"\n f\"in the future periods {self.cfg['future_periods'][0]} \"\n f\"and {self.cfg['future_periods'][1]} \"\n f\"for CMIP5 {self.formatter(f'cmip5-{scenario}')} \"\n f\"and CMIP6 {self.formatter(f'cmip6-{scenario}')}\")\n\n record = {\n 'caption': caption,\n 'statistics': ['anomaly', 'diff'],\n 'domains': ['reg'],\n 'plot_types': ['map'],\n 'authors': [\n 'cos_josep',\n ],\n 'references': [\n 'cos22esd',\n ],\n 'ancestors': ancestor_files,\n }\n return record", "def getDictWells(self):\n #Method begins here\n #nx=self.__grid['nx'] #From the geometry in grid\n ny=self.__grid['ny']\n nz=self.__grid['nz']\n minx=self.__grid['ox']\n miny=self.__grid['oy']\n minz=self.__grid['oz']\n rx=self.__grid['dx']\n ry=self.__grid['dy']\n rz=self.__grid['dz']\n \n # well package\n # Remember to use zero-based layer, row, column indices!\n lcoordw=np.zeros((self.__nwells,3),dtype=np.int32)\n for i in range (self.__nwells):\n lcoordw[i,0]=floor((self.__dflst.iloc[i,3]-minx)/rx)\n #In MODFLOW y ans z coordinates are inverted\n lcoordw[i,1]=floor((miny+ry*ny-self.__dflst.iloc[i,4])/ry)\n lcoordw[i,2]=floor((minz+rz*nz-self.__dflst.iloc[i,5])/rz)\n \n nper=self.__df.getForcPer()\n wel_sp = {} \n for i in range(nper):\n lst=[]\n for j in range(self.__nwells):\n pumping_rate=self.__dfwells.iloc[i+1,j+1]\n lst.append( [lcoordw[j,2], lcoordw[j,1], lcoordw[j,0], pumping_rate] )\n wel_sp[i]=lst\n print(wel_sp)\n \n print('*--- Succesfull reading of wells ---*')\n \n return wel_sp", "def intro_slide(prs):\n # pylint: disable=too-many-locals\n slide = prs.slides.add_slide(prs.slide_layouts[BLANK_SLIDE])\n slide = slide_title_header(\n slide, 'Explanation of Analysis', include_time=False)\n\n explanation_file_path = os.path.join(\"resources\", \"metric_explanation.txt\")\n\n if os.path.exists(explanation_file_path):\n with open(explanation_file_path, 'r', encoding='utf-8') as filer:\n content = filer.readlines()\n\n content2 = []\n for cont in content:\n cropped = cont.split('\\r\\n')[0]\n content2.append(cropped)\n content = content2\n filer.close()\n\n top = Inches(0.81)\n left = Inches(0.42)\n width = Inches(11)\n height = Inches(6)\n txt_box = slide.shapes.add_textbox(left, top, width, height)\n text_frame = txt_box.text_frame\n text_frame.word_wrap = True\n\n paragraph = text_frame.paragraphs[0]\n paragraph.text = content[0]\n paragraph.font.size = Pt(12)\n paragraph.font.bold = True\n\n for i in range(1, len(content)):\n paragraph = text_frame.add_paragraph()\n paragraph.text = content[i]\n\n if i == 5:\n paragraph.font.size = Pt(12)\n paragraph.font.bold = True\n else:\n paragraph.font.size = Pt(10)\n paragraph.font.bold = False\n\n else:\n print(f\"{WARNING}WARNING - file 'metric_explanation.txt' not found.{NORMAL}\")\n\n return prs", "def lf_abnormal_interp_with_sharps(report):\n if 'interpretation' in report.sections.keys():\n interpretation = report.sections['interpretation']\n interp_text = interpretation['text']\n return abnormal_interp_with_sharps(interp_text)\n elif 'summary' in report.sections:\n return abnormal_interp_with_sharps(report.sections['summary']['text'])\n elif 'findings' in report.sections: # fall back to look in the findings \n if 'summary' in report.sections['findings']: # fall back to look for a summary instead\n return abnormal_interp_with_sharps(report.sections['findings']['summary'])\n if 'impression' in report.sections['findings']:\n return abnormal_interp_with_sharps(report.sections['findings']['impression'])\n return ABSTAIN_VAL\n elif 'narrative' in report.sections: # fall back to look in the findings \n ky = 'narrative'\n if 'summary' in report.sections[ky]: # fall back to look for a summary instead\n return abnormal_interp_with_sharps(report.sections[ky]['summary'])\n if 'impression' in report.sections[ky]:\n return abnormal_interp_with_sharps(report.sections[ky]['impression']) \n return ABSTAIN_VAL \n else:\n return ABSTAIN_VAL", "def _read_stix_spec_file(spec_file):\n sdict = {}\n with fits.open(spec_file) as hdul:\n for i in range(5):\n sdict[str(i)] = [hdul[i].header, hdul[i].data]\n return sdict", "def extract_summary(self):\n metadata = {}\n\n ## document Id\n documentId = self.tree.find(\"./id\")\n documentId = documentId.attrib['root'] if documentId is not None and \"root\" in documentId.attrib else \"\"\n metadata[\"documentId\"] = documentId\n\n ## setId\n setid = self.tree.find(\"./setId\")\n setid = setid.attrib['root'] if setid is not None and \"root\" in setid.attrib else \"\"\n metadata[\"setId\"] = setid\n\n ## version number\n splversion = self.tree.find(\"./versionNumber\")\n versionNumber = \"\"\n if splversion is not None:\n if \"value\" in splversion.attrib:\n versionNumber = splversion.attrib[\"value\"]\n metadata[\"versionNumber\"] = versionNumber\n\n ## product type \n code = self.tree.find(\"./code\")\n check_if_attrib_exists = lambda x, key: x[key] if key in x else ''\n product_type = check_if_attrib_exists(code.attrib, \"displayName\")\n metadata[\"productType\"] = product_type\n\n ## title\n title_text = self.tree_et.xpath(\"./title//text()\")\n title = (\" \".join([self.strip_newline_tab(t) for t in title_text]) if len(title_text) > 0 else \"\")\n metadata[\"title\"] = title\n\n ## manufacturer\n manufacturer = self.tree.find(\"./author//representedOrganization/name\")\n if manufacturer != None and manufacturer.text != None:\n manufacturer = self.strip_newline_tab(manufacturer.text)\n else:\n manufacturer = \"\"\n metadata[\"manufacturer\"] = manufacturer\n\n ## effectivetime\n effectiveTime = self.tree_et.xpath(\"./effectiveTime/@value\")\n effectiveTime = self.__normalize_date(effectiveTime)\n\n metadata[\"effectiveTime\"] = effectiveTime\n metadata[\"publishedDate\"] = effectiveTime\n\n ## From manufacturedProduct section\n brand_name = self.tree_et.xpath(\".//manufacturedProduct//name\")\n brand_name = self.strip_newline_tab(brand_name[0].text) if len(brand_name) > 0 else \"\"\n metadata[\"drugName\"] = brand_name\n\n route = self.tree_et.xpath(\".//manufacturedProduct//formCode/@code\")\n route = self.strip_newline_tab(route[0]) if len(route) > 0 else \"\"\n metadata[\"routeOfAdministration\"] = route\n\n product_ndc = self.tree_et.xpath(\".//manufacturedProduct//code/@code\")\n product_ndc = self.strip_newline_tab(product_ndc[0]) if len(product_ndc) > 0 else \"\"\n metadata[\"ndcCode\"] = product_ndc\n\n generic_name = self.tree_et.xpath(\".//manufacturedProduct//asEntityWithGeneric//genericMedicine/name\")\n generic_name = self.strip_newline_tab(generic_name[0].text) if len(generic_name) > 0 else \"\"\n metadata[\"genericName\"] = generic_name\n\n ## dosage form\n dosage_form = self.tree_et.xpath(\".//manufacturedProduct//formCode/@displayName\")\n dosage_form = dosage_form[0] if len(dosage_form) > 0 else \"\"\n metadata[\"dosageForm\"] = dosage_form\n\n # active ingredients\n substance_name = sorted([self.strip_newline_tab(a.text) for a in\n self.tree_et.xpath(\".//.//manufacturedProduct//activeMoiety/activeMoiety/name\")])\n substance_name = \", \".join(set(substance_name))\n metadata[\"substanceName\"] = substance_name\n\n ## inactive ingredients\n inactive_ingredients = sorted([self.strip_newline_tab(inactive.text) for inactive in self.tree_et.xpath(\n \".//manufacturedProduct//inactiveIngredient/inactiveIngredientSubstance/name\")])\n\n if len(inactive_ingredients) == 0:\n inactive_ingredients = \"\"\n else:\n inactive_ingredients = \",\".join(set(inactive_ingredients))\n\n metadata[\"inactiveIngredients\"] = inactive_ingredients\n\n ## other ingredients\n ingredients = sorted([self.strip_newline_tab(ingredient.text) for ingredient in\n self.tree_et.xpath(\".//manufacturedProduct//ingredient/ingredientSubstance/name\")])\n\n if len(ingredients) == 0:\n ingredients = \"\"\n else:\n ingredients = \", \".join(set(ingredients))\n metadata[\"ingredients\"] = ingredients\n\n # marketing_category\n marketing_category = self.tree_et.xpath(\".//manufacturedProduct/subjectOf/approval/code/@displayName\")\n marketing_category = self.strip_newline_tab(marketing_category[0]) if len(marketing_category) > 0 else \"\"\n metadata[\"marketingCategory\"] = marketing_category\n\n # consumed in\n consumed_in = self.tree_et.xpath(\n \".//manufacturedProduct//consumedIn/substanceAdministration/routeCode/@displayName\")\n consumed_in = consumed_in[0] if len(consumed_in) > 0 else \"\"\n metadata[\"consumedIn\"] = consumed_in\n\n # revision date\n marketing_date = self.tree_et.xpath(\".//manufacturedProduct//marketingAct/effectiveTime/low/@value\")\n marketing_date = self.__normalize_date(marketing_date)\n metadata[\"marketingDate\"] = marketing_date\n\n return metadata", "def drawsheet_parse(text):\n logging.debug(\"################ PARSING DRAW ##################\")\n\n month = \"({})\".format('|'.join(RE_MONTHS))\n\n patterns = (\n ('surface', r\"Hard|Outdoor Hard|Red Clay|Green Clay|Clay|\"\n r\"Grass|Indoor Hard|Carpet|Indoor Carpet\"),\n ('date', r\"\\d{1,2}(th)? ?- ?\\d{1,2}(th)? \" + month + r\",? \\d{4}|\" +\n month + r\" \\d{1,2}(th)? ?- ?\\d{1,2}(th)?,? \\d{4}\"),\n ('year', r\"\\d{4}\"),\n ('seed', r\"(?<=\\[)\\d+(?=\\])|(?<=\\[ )\\d+(?=\\ ])\"),\n ('round', r\"(1st|2nd|3rd) Round|1/8|1/4|1/2\"),\n ('class', r\"WTA( [A-Za-z0-9]+)*|US Open|\"\n r\"French Open|Australian Open|Wimbledon\"),\n ('orderedname', r\"[A-Z][a-z]+(( |-)[A-Z][a-z]+)*\"\n r\" ([A-Z]+(( |-)[A-Z]+)*)(?= |$)\"),\n ('fullname', r\"(?:^| )[Bb][Yy][Ee](?:$| )|([A-Z]+(( |-)[A-Z]+)*,\\s\"\n r\"[A-Z][a-zA-Z]*(( |-)([A-Z][a-zA-Z]*[a-z]))*)\"),\n #('shortname', r\"[A-Z]\\. ?[A-Z]+(( |-)[A-Z]+)*\"),\n ('shortname', r\"[A-Z]\\. ?[A-Za-z]+(( |-)[A-Za-z]+)*\"),\n ('country', r\"(?:(?!RET)[A-Z]{3}|\\([A-Z]{3}\\))(?= |$)\"),\n ('score',\n r\"([0-7][/-]?[0-7](\\(\\d+\\))?)( [0-7][/-]?[0-7](\\(\\d+\\))?){0,2}\"\n r\" ([Rr]et\\.|[Rr]et'd|[Rr]etired|[Rr]et)\"\n r\"|([0-7][/-]?[0-7](\\(\\d+\\))?)( [0-7][/-]?[0-7](\\(\\d+\\))?){1,2}\"\n r\"|([0-7]/?[0-7](\\(\\d+\\))? ){2}[\\d+]/[\\d+]\"\n r\"|(wo.|[Ww]alkover)\"),\n ('prize', r\"\\$[0-9,]+(?= |$)\"),\n ('number', r\"\\d{1,3}\\.?(?= |$)\"),\n ('city', r\"[A-Z][A-Za-z]*( [A-Z][A-Za-z]+)*,\"\n r\"( [A-Z][A-Z],)? (USA|[A-Z][a-z]*)\"),\n ('status', r\"(^|(?<=\\[|\\(| ))(Q|LL|W|WC)((?=\\]|\\)| )|$)\"),\n ('string', r\"([A-Za-z&,\\']+)( [A-Z&a-z$,]+)*\"),\n )\n \n pattern = re.compile('|'.join([\"(?P<{}>{})\".format(k, v) \n for k, v in patterns]))\n data = { k: [] for k, v in patterns}\n\n short_to_fullnames = {}\n ordered_to_fullnames = {}\n def add_to_fullname_conversion_table(fullname, x, y):\n nm = re.match('(.*), (.)', fullname)\n name = nm.group(2) + \". \" + nm.group(1)\n if name not in short_to_fullnames:\n short_to_fullnames[name] = []\n\n short_to_fullnames[name] += [(fullname, (x,y))]\n\n nm = re.match('(.*), (.*)', fullname)\n name = nm.group(2) + \" \" + nm.group(1)\n ordered_to_fullnames[name] = fullname\n\n\n re_skip = re.compile(r'Seeded +Players')\n # Find scores, names, etc\n y = 0\n skipping_page = False\n\n # collect the data\n width = 0\n lines = text.split('\\n');\n for line in lines:\n if skipping_page:\n if chr(12) in line:\n skipping_page = False\n else:\n continue\n\n if (re_skip.search(line)):\n # skip the seeding/info section, it's useless\n skipping_page = True\n continue;\n\n for m in pattern.finditer(line):\n for group, match in m.groupdict().items():\n if match is not None:\n match = match.strip()\n x1 = m.start(group)\n x2 = m.end(group)\n\n if x2 > width:\n width = x2\n\n data[group] += [(match, ((x1, x2), y))]\n\n if group == 'fullname' and match.upper() != \"BYE\":\n add_to_fullname_conversion_table(match, (x1, x2), y)\n\n y += 1\n\n # hack to catch country codes that got attached to fullnames\n if len(data['country']) > 0:\n cc_re = re.compile(r'^([A-Z]{3}) (.*)')\n # find known country codes\n countries = set(list(zip(*data['country']))[0])\n if len(data['fullname']) > len(data['country']):\n for n, point in data['fullname']:\n m = cc_re.match(n)\n if m and m.group(1) in countries:\n country = m.group(1)\n name = m.group(2)\n idx = data['fullname'].index((n, point))\n del data['fullname'][idx]\n (x1, x2), y = point\n data['fullname'].insert(idx, (name, ((x1 + 4), x2, y)))\n data['country'].append((country, ((x1, x1 + 3), y)))\n add_to_fullname_conversion_table(name)\n if len(data['fullname']) == len(data['country']):\n # we're done\n break\n\n # find any possible country codes\n if len(data['fullname']) > len(data['country']):\n for n, point in data['fullname']:\n m = cc_re.match(n)\n if m:\n country = m.group(1)\n name = m.group(2)\n idx = data['fullname'].index((n, point))\n del data['fullname'][idx]\n (x1, x2), y = point\n data['fullname'].insert(idx, (name, ((x1 + 4, x2), y)))\n data['country'].append((country, ((x1, x1 + 3), y)))\n add_to_fullname_conversion_table(name)\n if len(data['fullname']) == len(data['country']):\n # we're done\n break\n\n orderednames = []\n for n, point in data['orderedname']:\n try:\n n = ordered_to_fullnames[n]\n orderednames += [(n, point)]\n except KeyError:\n data['string'] += [(n, point)]\n\n data['orderedname'] = orderednames\n\n def distance(a, b):\n ax1, ax2 = a[0]\n bx1, bx2 = b[0]\n ax = (ax1 + ax2) / 2\n bx = (bx1 + bx2) / 2\n dx = float(ax - bx) / 10\n dy = float(a[1] - b[1])\n\n return math.sqrt(dx * dx + dy * dy)\n\n # assign shortnames to longnames\n # some people share a shortname, so assign to \n # the longname that is closest\n shortnames = []\n for n, point in data['shortname']:\n n = n.upper()\n if n[2] != ' ':\n short = n[0:2] + ' ' + n[2:]\n else:\n short = n\n\n try:\n shorts = short_to_fullnames[short]\n\n short = min(shorts, key=lambda s: distance(s[1], point))\n shortnames += [(short[0], point)]\n except KeyError:\n data['string'] += [(n, point)]\n\n data['shortname'] = shortnames\n\n logging.debug(pprint.pformat(data))\n\n return data, width;", "def processSlideInfo(extractionCtx: ExtractionContext) -> None:\n\n\tlogger = extractionCtx.logger\n\n\t# get a list of mapping and slide info\n\tmappingInfo = _getMappingInfo(extractionCtx)\n\tif not mappingInfo:\n\t\treturn\n\n\t# Process each pair\n\tfor info in mappingInfo:\n\t\tif info.slideInfo.version == 2:\n\t\t\t_V2Rebaser(extractionCtx, info).run()\n\t\telif info.slideInfo.version == 3:\n\t\t\t_V3Rebaser(extractionCtx, info).run()\n\t\telse:\n\t\t\tlogger.error(\"Unknown slide version.\")", "def find_patches_from_slide(slide_path, filter_non_tissue=True):\n\n #sampletotal = pd.DataFrame([])\n #base_truth_dir = Path(BASE_TRUTH_DIR)\n #anno_path = Path(anno_path)\n #slide_contains_tumor = osp.basename(slide_paths[i]).startswith('tumor_')\n print (slide_path)\n\n dimensions = []\n \n with openslide.open_slide(slide_path) as slide:\n dtotal = (slide.dimensions[0] / 224, slide.dimensions[1] / 224)\n thumbnail = slide.get_thumbnail((dtotal[0], dtotal[1]))\n thum = np.array(thumbnail)\n ddtotal = thum.shape\n dimensions.extend(ddtotal)\n hsv_image = cv2.cvtColor(thum, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv_image)\n hthresh = threshold_otsu(h)\n sthresh = threshold_otsu(s)\n vthresh = threshold_otsu(v)\n # be min value for v can be changed later\n minhsv = np.array([hthresh, sthresh, 70], np.uint8)\n maxhsv = np.array([180, 255, vthresh], np.uint8)\n thresh = [minhsv, maxhsv]\n #extraction the countor for tissue\n\n rgbbinary = cv2.inRange(hsv_image, thresh[0], thresh[1])\n _, contours, _ = cv2.findContours(rgbbinary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n bboxtcols = ['xmin', 'xmax', 'ymin', 'ymax']\n bboxt = pd.DataFrame(columns=bboxtcols)\n for c in contours:\n (x, y, w, h) = cv2.boundingRect(c)\n bboxt = bboxt.append(pd.Series([x, x+w, y, y+h], index = bboxtcols), ignore_index=True)\n bboxt = pd.DataFrame(bboxt)\n \n xxmin = list(bboxt['xmin'].get_values())\n xxmax = list(bboxt['xmax'].get_values())\n yymin = list(bboxt['ymin'].get_values())\n yymax = list(bboxt['ymax'].get_values())\n\n xxxmin = np.min(xxmin)\n xxxmax = np.max(xxmax)\n yyymin = np.min(yymin)\n yyymax = np.max(yymax)\n\n dcoord = (xxxmin, xxxmax, yyymin, yyymax)\n\n dimensions.extend(dcoord)\n\n # bboxt = math.floor(np.min(xxmin)*256), math.floor(np.max(xxmax)*256), math.floor(np.min(yymin)*256), math.floor(np.max(yymax)*256)\n \n samplesnew = pd.DataFrame(pd.DataFrame(np.array(thumbnail.convert('L'))))\n print(samplesnew)\n # very critical: y value is for row, x is for column\n samplesforpred = samplesnew.loc[yyymin:yyymax, xxxmin:xxxmax]\n\n dsample = samplesforpred.shape\n\n dimensions.extend(dsample)\n\n np.save ('dimensions_%s' % (osp.splitext(osp.basename(slide_paths[i]))[0]), dimensions)\n\n print(samplesforpred)\n\n samplesforpredfinal = pd.DataFrame(samplesforpred.stack())\n\n print(samplesforpredfinal)\n\n samplesforpredfinal['tile_loc'] = list(samplesforpredfinal.index)\n\n samplesforpredfinal.reset_index(inplace=True, drop=True)\n\n\n samplesforpredfinal['slide_path'] = slide_paths[i]\n\n\n print(samplesforpredfinal)\n\n\n return samplesforpredfinal", "def __local_sp(soup):\n news = []\n titles = soup.find('section', class_='col-xs-12 maislidas-interno').find_all('h3', class_='fifth')\n\n for title in titles:\n news.append(dict(title=title.string, link=title.parent['href']))\n return news", "def parse(self):\n result = {}\n if self.detail_statu:\n sel = Selector(text=self.driver.page_source)\n\n fact_table = sel.xpath(\n '//div[@class=\"facts-table\"]//text()').extract()\n result['facts'] = [list(i)\n for i in zip(fact_table[:: 2],\n fact_table[1:: 2])]\n\n tax_table = sel.xpath(\n '//div[@class=\"tax-values\"]//text()').extract()\n result['taxs'] = [list(i)\n for i in zip(tax_table[:: 2],\n tax_table[1:: 2])]\n\n listing_detail = sel.xpath(\n '//div[@class=\"amenities-container\"]//text()').extract()\n result['detail'] = listing_detail\n result['page_source'] = self.driver.page_source\n self.detail_statu = False\n else:\n self.log.warning(\n '---- Detail page url out of reach, use .search() first to get the detail page')\n return result", "def get_annotation(file, pos_dict, ex_dict, tag):\n results = {}\n with open(file, 'r', encoding='utf-8') as f:\n par = 0\n par_results = []\n for line in f:\n if line is \"\\n\":\n if par_results:\n if \"paragraph\" + str(par) in results:\n results[\"paragraph\" + str(par)].append(par_results)\n else:\n results[\"paragraph\" + str(par)] = par_results\n par += 1\n par_results = []\n continue\n for q in pos_dict:\n qmatches = re.finditer(q, line, re.I)\n for qmatch in qmatches:\n exclude = 0\n for exItem in ex_dict:\n exMatches = re.finditer(exItem.rstrip('\\n'), line, re.I)\n for exMatch in exMatches:\n if exMatch and qmatch.start(1) is exMatch.start(1):\n exclude = 1\n # Save result to list of results with appropriate tag\n if (qmatch and exclude is 0):\n try:\n #results.append((int(qmatch.group(1)),int(qmatch.group(2)), int(qmatch.group(len(qmatch.groups()))), tag))\n par_results.append({\"sentID\": int(qmatch.group(1)), \"spanStart\":int(qmatch.group(2)), \"spanEnd\":int(qmatch.group(len(qmatch.groups()))), \"tag\": tag})\n except TypeError:\n # TypeErrors are usually raised when one of the capture groups of fields is empty (NoneType)\n # Simply throw a warning message and keep going\n print(\"Warning! Something went wrong while matching expression'\" + q + \"' in line '\" + line[0:50] + \"...'\")\n return results", "def return_figures():\n # Add New York Times API Key\n nyt = NYTAPI(\"AsjeHhqDYrePA2GMPpYoY1KAKAdG7P99\")\n\n # Select Year and Month of articles\n data = nyt.archive_metadata(\n date = datetime.datetime(2020, 7, 1)\n )\n\n def data_to_df(data):\n # Initiate list for restructured information\n data_list = []\n\n # Collect Data from API dictionary\n for article in data:\n new_data = [article.get(\"section_name\"),\n article.get(\"news_desk\"),\n article.get(\"pub_date\"),\n article.get(\"headline\").get(\"main\"),\n article.get(\"abstract\"),\n article.get(\"lead_paragraph\"),\n article.get(\"type_of_material\"),\n article.get(\"word_count\")]\n # Append list of information from article to data list\n data_list.append(new_data)\n\n # Convert data list to DataFrame\n df = pd.DataFrame(data_list, columns=[\"section_name\",\"news_desk\", \"pub_date\", \"headline\", \"abstract\", \"lead_paragraph\", \"type_of_material\", \"word_count\"])\n\n return df\n\n df = data_to_df(data)\n\n # first chart plots section distribution\n # as a pie chart\n graph_one = []\n df_one = df.copy()\n\n # filter and sort values for the visualization\n # filtering plots the articles in decreasing order by their values\n labels = df_one.section_name.value_counts().index\n values = df_one.section_name.value_counts().values\n\n graph_one.append(\n go.Pie(\n labels=labels,\n values=values,\n hole=.6,\n textposition=\"inside\"\n )\n )\n\n layout_one = dict(title = 'Distribution of sections of this months New York Times articles')\n\n # second chart plots section distribution\n # as a pie chart\n graph_two = []\n df_two = df.copy()\n\n # filter and sort values for the visualization\n # filtering plots the articles in decreasing order by their values\n labels = df_two.news_desk.value_counts().index\n values = df_two.news_desk.value_counts().values\n\n graph_two.append(\n go.Pie(\n labels=labels,\n values=values,\n hole=.6,\n textposition=\"inside\"\n )\n )\n\n layout_two = dict(title = 'Distribution of news desk of this months articles')\n\n # third chart plots section distribution\n # as a pie chart\n graph_three = []\n df_three = df.copy()\n\n # filter and sort values for the visualization\n # filtering plots the articles in decreasing order by their values\n labels = df_three.type_of_material.value_counts().index\n values = df_three.type_of_material.value_counts().values\n\n graph_three.append(\n go.Pie(\n labels=labels,\n values=values,\n hole=.6,\n textposition=\"inside\"\n )\n )\n\n layout_three = dict(title = 'Distribution for type of material of this months articles')\n\n # fourth chart plots section distribution\n # as a pie chart\n graph_four = []\n\n # Convert publishing date columns to datetime format\n df[\"pub_date\"] = pd.to_datetime(df[\"pub_date\"]).dt.date\n\n df_four = df.copy()\n df_four = df_four.pub_date.value_counts().to_frame().sort_index()\n\n # filter and sort values for the visualization\n # filtering plots the articles in decreasing order by their values\n x_val = df_four.index\n y_val = df_four.values\n\n graph_four.append(\n go.Scatter(\n x=df_four.index,\n y=df_four[\"pub_date\"],\n mode=\"lines\",\n name=\"Articles\"\n )\n )\n\n layout_four = dict(title = 'Number of articles published by days')\n\n # fourth chart plots section distribution\n # as a pie chart\n graph_five = []\n\n # Calculate average number of words for this months articles\n avg_word_count = round(df.word_count.mean(),0)\n\n graph_five.append(\n go.Table(\n header=dict(values=['Average Word Count']),\n cells=dict(values=[avg_word_count])\n )\n )\n\n layout_five = dict(title = '')\n\n # append all charts\n figures = []\n figures.append(dict(data=graph_one, layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n figures.append(dict(data=graph_three, layout=layout_three))\n figures.append(dict(data=graph_four, layout=layout_four))\n figures.append(dict(data=graph_five, layout=layout_five))\n\n return figures", "def import_landscape_section(self, filename_suffix='lan', ti_offset=0):\n with open('%s/%s.%s' % (self.model_path, self.model_name, filename_suffix)) as f:\n data = f.read()\n _data = re.search(r'\\*THEME.*', data, re.M|re.S).group(0) # strip leading junk\n t_data = re.split(r'\\*THEME.*\\n', _data)[1:] # split into theme-wise chunks\n for ti, t in enumerate(t_data, start=ti_offset):\n self._themes.append({})\n self._theme_basecodes.append([])\n defining_aggregates = False\n for l in [l for l in t.split('\\n') if not re.match('^\\s*(;|{|$)', l)]: \n if re.match('^\\s*\\*AGGREGATE', l): # aggregate theme attribute code\n tac = re.split('\\s+', l.strip())[1].lower()\n self._themes[ti][tac] = []\n defining_aggregates = True\n continue\n if not defining_aggregates: # line defines basic theme attribute code\n tac = re.search('\\S+', l.strip()).group(0).lower()\n self._themes[ti][tac] = tac\n self._theme_basecodes[ti].append(tac)\n else: # line defines aggregate values (parse out multiple values before comment)\n _tacs = [_tac.lower() for _tac in re.split('\\s+', l.strip().partition(';')[0].strip())]\n self._themes[ti][tac].extend(_tacs)\n self.nthemes = len(self._themes)", "def variable_dicts(self):\n \n def get_variable_text(rtf_file):\n \"Returns a list of variable_texts for each variable\"\n st='Pos. = '\n return rtf_file.split(st)[1:]\n \n def get_variable_name(variable_text):\n st='Variable = '\n b=variable_text.split(st)[1]\n return b[b.find(' ')+1:b.find('\\t')]\n \n def find_pos(rtf):\n a=rtf\n b=a\n return b[b.find(' ')+1:b.find('\\t')]\n \n def find_variable_label(rtf):\n try:\n a=rtf\n b=a.split('Variable label = ')[1]\n return b[b.find(' ')+1:b.find('\\\\par')]\n except IndexError:\n return None\n \n def find_variable_type(rtf):\n if not 'This variable is ' in rtf: return ''\n a=rtf\n b=a.split('This variable is ')[1]\n i1=b.find(' ')+1\n i2=i1+b[i1:].find('}')\n return b[i1:i2]\n \n def find_SPSS_measurement_level(rtf):\n if not 'the SPSS measurement level is ' in rtf: return ''\n a=rtf\n b=a.split('the SPSS measurement level is ')[1]\n i1=b.find(' ')+1\n i2=i1+b[i1:].find('\\\\par')\n return b[i1:i2]\n \n def find_SPSS_user_missing_values(rtf):\n if not 'SPSS user missing values = ' in rtf: return dict()\n a=rtf\n d=a.split('SPSS user missing values = ')\n if len(d)<2: return None\n e=d[1]\n i1=e.find(' ')+1\n i2=i1+e[i1:].find('\\\\par')\n f=e[i1:i2]\n g=f.split(' ')\n i=' '.join([g[0],g[2],g[4]])\n return i\n \n def find_value_labels(rtf):\n if not 'Value = ' in rtf: return dict()\n a=rtf\n d=a.split('Value = ')[1:]\n z={}\n for e in d:\n value=e[e.find(' ')+1:e.find('\\t')]\n value=float(value)\n f=e.split('Label = ')[1]\n label=f[f.find(' ')+1:f.find('\\\\par')]\n z[value]=label\n #print(z)\n return z\n \n variable_texts=get_variable_text(self.rtf)\n #pprint(variable_texts[0:2])\n \n result=[]\n for variable_text in variable_texts:\n d={'pos':find_pos(variable_text),\n 'variable':get_variable_name(variable_text),\n 'variable_label':find_variable_label(variable_text),\n 'variable_type':find_variable_type(variable_text),\n 'SPSS_measurement_level':find_SPSS_measurement_level(variable_text),\n 'SPSS_user_missing_values':find_SPSS_user_missing_values(variable_text),\n 'value_labels':find_value_labels(variable_text) \n }\n result.append(d)\n \n return result", "def pose_structure(pose, display_residues = []):\n # store the pose's number of residues, example Python syntax\n nres = pose.total_residue()\n\n # 1. obtain the pose's sequence\n sequence = pose.sequence()\n\n # 2. obtain a list of PDB numbering and icode as a single string\n pdb_info = pose.pdb_info()\n PDB_nums = [(str( pdb_info.number(i)) + pdb_info.icode(i)).strip()\n for i in range(1, nres + 1)]\n # 3. obtains a list of the chains organized by residue\n chains = [pdb_info.chain(i) for i in range(1, nres + 1)]\n # 4. extracts a list of the unique chain IDs\n unique_chains = []\n for c in chains:\n if c not in unique_chains:\n unique_chains.append(c)\n\n # start outputting information to screen\n print('\\n' + '='*80)\n print('Loaded from' , pdb_info.name())\n print(nres , 'residues')\n print(len(unique_chains), 'chain(s) ('+ str(unique_chains)[1:-1] + ')')\n print('Sequence:\\n' + sequence)\n\n # this object is contained in PyRosetta v2.0 and above\n # 5. obtain the pose's secondary structure as predicted by PyRosetta's\n # built-in DSSP algorithm\n DSSP = protocols.moves.DsspMover()\n DSSP.apply(pose) # populates the pose's Pose.secstruct\n ss = pose.secstruct()\n print( 'Secondary Structure:\\n' + ss )\n print( '\\t' + str(100. * ss.count('H') / len(ss))[:4] + '% Helical' )\n print( '\\t' + str(100. * ss.count('E') / len(ss))[:4] + '% Sheet' )\n print( '\\t' + str(100. * ss.count('L') / len(ss))[:4] + '% Loop' )\n\n # 6. obtain the phi, psi, and omega torsion angles\n phis = [pose.phi(i) for i in range(1, nres + 1)]\n psis = [pose.psi(i) for i in range(1, nres + 1)]\n omegas = [pose.omega(i) for i in range(1, nres + 1)]\n\n # this object is contained in PyRosetta v2.0 and above\n # create a PyMOLMover for exporting structures directly to PyMOL\n pymover = PyMOLMover()\n pymover.apply(pose) # export the structure to PyMOL (optional)\n\n # 7. output information on the requested residues\n # use a simple dictionary to make output nicer\n ss_dict = {'L':'Loop', 'H':'Helix', 'E':'Strand'}\n for i in display_residues:\n print( '='*80 )\n print( 'Pose numbered Residue', i )\n print( 'PDB numbered Residue', PDB_nums[i-1] )\n print( 'Single Letter:', sequence[i-1] )\n print( 'Chain:', chains[i-1] )\n print( 'Secondary Structure:', ss_dict[ss[i-1]] )\n print( 'Phi:', phis[i-1] )\n print( 'Psi:', psis[i-1] )\n print( 'Omega:', omegas[i-1] )\n # extract the chis\n chis = [pose.chi(j + 1, i) for j in range(pose.residue(i).nchi() )]\n for chi_no in range(len(chis)):\n print( 'Chi ' + str(chi_no + 1) + ':', chis[chi_no] )\n print( '='*80 )", "def samsemPlots17thru20(samsem_data, path, dict):\n \n path_res = path\n \n if 'subfolder' in dict:\n subfolder = dict['subfolder']\n path_res = os.path.join(path,subfolder)\n if not os.path.exists(path_res): os.makedirs(path_res)\n \n coldef_type = dict['coldef_type']\n print(\"Starting SAMSEM_RES#17+20: Analyzing data of all images for \" + str(settings.id2ColDefLong[dict['coldef_type']]) + \" simulation methods.\")\n \n if 'plot_types' in dict:\n plot_types = dict['plot_types']\n else:\n plot_types = ['RT_boxplots', 'RT_means', 'ACC_CIs', 'median']\n \n method_ids = sorted(set(samsem_data['sim_id'].values.astype(int)))\n image_ids = sorted(set(samsem_data['image_id'].values.astype(int)))\n #print method_ids\n \n for method_id in method_ids:\n if (method_id != 3) and (method_id != 99):\n whatArr_sim = [['sim_id',operator.eq,method_id],['coldef_type',operator.eq,coldef_type],['observer_coldef_type',operator.eq, coldef_type]]\n else:\n whatArr_sim = [['sim_id',operator.eq,method_id],['observer_coldef_type',operator.eq,coldef_type]]\n samsem_data_sim = organizeArray(samsem_data, whatArr_sim)\n \n boxes = []; labels = []; accuracies = {}; mean_cis = {}; order = {}; i=1\n \n for image_id in image_ids:\n whatArr_image = [['image_id', operator.eq, image_id]]\n samsem_data_image = organizeArray(samsem_data_sim,whatArr_image)\n \n dict.update({'filename':str(settings.id2ColDef[coldef_type])+'-simulation-method-'+str(settings.id2Sim[method_id])+'_images'})\n \n # 3. Get response time data\n alg_values = samsem_data_image[samsem_data_image['is_correct']==True]['resp_time'].values*1000; \n if ('RT_boxplots' in plot_types) or ('median-test' in plot_types):\n boxes.append(alg_values)\n labels.append(image_id) if alg_values.size else labels.append(str(image_id) + ' - No data'); \n \n # 4. Get CI of RT means\n if 'RT_means' in plot_types:\n alg_mean = getCIAverage(alg_values);\n alg_mean.append(labels[i-1])\n mean_cis.update({image_id: alg_mean})\n \n # 5. Get accuracy data\n if 'ACC_CIs' in plot_types:\n alg_acc = getAccuracy(samsem_data_image)\n alg_acc.append(labels[i-1])\n accuracies.update({image_id: alg_acc})\n \n order.update({i:image_id})\n i += 1\n if 'RT_boxplots' in plot_types:\n # 6. Plot response time data\n plotRTGraphs(boxes,labels,path_res,dict,order)\n \n if 'RT_means' in plot_types:\n # 7. Plot CI means of RT data\n plotCIAverageGraphs(mean_cis,path_res,dict,order)\n \n if 'ACC_CIs' in plot_types:\n # 8. Plot accuracy data\n plotAccuracyGraphs(accuracies,path_res,dict,order)\n \n if 'median-test' in plot_types:\n # 9. Make median test\n dict.update({'filename': dict['filename']+\"-RT\"})\n makeMedianTest(numpy.array(boxes), path_res, image_ids,dict)", "def break_up_pt_and_passage_comparisons(discordance_by_cohort_dict):\n pt_passage_dict = dict()\n early_late_dict = dict()\n for cohort in discordance_by_cohort_dict:\n discordance_dictionary = discordance_by_cohort_dict[cohort]\n pt_passage_discordance = []\n early_late_discordance = []\n for key in discordance_dictionary:\n if \"PT\" in key:\n pt_passage_discordance += discordance_dictionary[key]\n else:\n early_late_discordance += discordance_dictionary[key]\n pt_passage_dict[cohort] = pt_passage_discordance\n early_late_dict[cohort] = early_late_discordance\n return pt_passage_dict, early_late_dict", "def test_display_presentation(self):\n response = self._speaker_profile(True)\n self.assertContains(response, FIRST_PRESENTATION_TITLE)\n self.assertContains(response, SECOND_PRESENTATION_TITLE)", "def _parse_result(cls, design_folder: str) -> Dict[str, Any]:\n _, folder_name = os.path.split(design_folder)\n raw_folder = os.path.join(design_folder, '{}.raw'.format(folder_name))\n res = SpectreParser.parse(raw_folder)\n return res", "def overlay(self):\n # retrieve header for photometry keywords\n # from current frame only\n hdr_str = self.run('fits header', via='get')\n\n # read it in to a fits header\n phdr = fits.Header()\n hdr = phdr.fromstring(hdr_str, sep='\\n')\n\n try:\n srcposx = hdr['SRCPOSX'] + 1\n srcposy = hdr['SRCPOSY'] + 1\n s1 = 'point({:f} {:f}) # ' \\\n 'point=x ' \\\n 'color=blue tag={{srcpos}} '\\\n 'text=SRCPOS'.format(srcposx, srcposy)\n self.run('regions', s1)\n except (KeyError, ValueError):\n pass\n try:\n stcentx = hdr['STCENTX'] + 1\n stcenty = hdr['STCENTY'] + 1\n photaper = hdr['PHOTAPER']\n photskap = [float(x) for x in hdr['PHOTSKAP'].split(',')]\n s1 = 'point({:f} {:f}) # ' \\\n 'point=x ' \\\n 'color=cyan tag={{srcpos}}'.format(stcentx, stcenty)\n self.run('regions', s1)\n s2 = 'circle({:f} {:f} {:f}) # ' \\\n 'color=cyan tag={{srcpos}}'.format(\n stcentx, stcenty, photaper)\n self.run('regions', s2)\n s3 = 'annulus({:f} {:f} {:f} {:f}) # ' \\\n 'color=cyan tag={{srcpos}} text=STCENT'.format(\n stcentx, stcenty, photskap[0], photskap[1])\n self.run('regions', s3)\n except (KeyError, ValueError):\n pass\n try:\n stcentx = hdr['STCENTX'] + 1\n stcenty = hdr['STCENTY'] + 1\n flux = hdr['STAPFLX']\n sky = hdr['STAPSKY']\n s1 = 'text({:f} {:f}) # color=cyan ' \\\n 'text=\"Flux={:.2f}, Sky={:.2f}\"'.format(\n stcentx, stcenty - 40, flux, sky)\n self.run('regions', s1)\n except (KeyError, ValueError):\n pass\n\n # try overlaying apertures as well\n try:\n self.overlay_aperture(hdr)\n except ValueError: # pragma: no cover\n # may be encountered with extensions with\n # unexpected WCSs\n pass", "def process_sentiment(self, sentiment_data):\n new_utts_dict = {'1':[], '2':[], '3':[], '4':[], '5':[]}\n for l in sentiment_data:\n title = [\"<s>\"] + l[0] + [\"</s>\"]\n context = [\"<s>\"] + l[1] + [\"</s>\"]\n target = [\"<s>\"] + l[2] + [\"</s>\"]\n sentiment = l[3][0]\n new_utts_dict[sentiment].append([title, context, target, sentiment])\n return new_utts_dict", "def CreatePresentation(self, event):\n pass", "def population_text_parser():\n h = 480\n w = 640\n filenames = []\n # get corrispondence ppoints for all faces\n all_face_verticies = []\n for i in range(1, 41):\n if i == 2 or i == 3 or i == 4:\n # not in data set\n continue\n # get file name\n file_name = \"data/\"\n if i < 10:\n file_name += \"0\" + str(i)\n else:\n file_name += str(i)\n if i == 8 or i == 22 or i == 30 or i == 35 or i == 12 or i ==14 or i == 15:\n # female faces\n file_name += \"-1f.asf\"\n else:\n file_name += \"-1m.asf\"\n filenames.append(file_name[:-3] + \"bmp\")\n # Parse corrispondence points\n face_vertices = file_parser(file_name)\n all_face_verticies.append(np.array(face_vertices))\n\n # adding my face to the set\n # all_face_verticies.append(get_will_points())\n mean_vertices = np.array(sum(all_face_verticies)) / len(all_face_verticies)\n\n # face = plt.imread(filenames[0])\n # mean_vertices = ginput_to_array(appendCorners_other(face, mean_vertices))\n # Morph each of the faces in the dataset into the average shape.\n morphs = []\n for i in range(len(all_face_verticies)):\n # Read in src img\n print(\"Morphing face \" + str(i) + \" into the average shape.\")\n if i == len(all_face_verticies)-1:\n face = plt.imread(\"will_population.jpeg\")/255\n else:\n face = plt.imread(filenames[i])/255\n # print(face.shape)\n # print(mean_vertices)\n im_src_vertices = all_face_verticies[i]\n\n # Create Dalaunay triangulation for the ith morph set\n print(\"Computing Delaunay triangulation.\")\n # morph_verticies = morphPointSet(tri0_vertices, tri1_vertices, i/45)\n t = Delaunay(mean_vertices)\n trianguations = t.simplices\n\n # Compute Affine Transformation matrices for both transformations (src-->mid; dst-->mid)\n print(\"Computing Affine Transformation.\")\n affine_matrices_0 = computeAffines(trianguations, im_src_vertices, mean_vertices)\n affine_matrices_inv_0 = [linalg.inv(A) for A in affine_matrices_0]\n\n morph = warp(h, w, t, affine_matrices_inv_0, face)\n # if i == len(all_face_verticies) -1:\n # plt.imsave(\"will_in_danish_pop.png\", morph)\n morphs.append(morph)\n\n out = sum(morphs) * (1/len(morphs))\n return out", "def osp2():\n return dict(\n kloc= range(75,125),\n docu = [3,4], ltex = [2,5],\n sced = [2,3,4], Pmat = [4,5],\n Prec = [3,4, 5],\n Resl = [4], Team = [3],\n acap = [4], aexp = [4],\n cplx = [4], data = [4],\n Flex = [3], pcap = [3],\n pcon = [3], pexp = [4],\n pvol = [3], rely = [5],\n ruse = [4], site = [6],\n stor = [3], time = [3],\n tool = [5])", "def test_extract():\n print(\"Executing test_extract:\")\n\n theory_1=[\n (14,),\n (15,),\n (14,),\n (16,)\n ]\n theory_2=[\n (14,),\n (15,),\n (14,),\n (17,)\n ]\n theory_3=[\n (15,),\n (14,)\n ]\n\n mind=minds.new_mind(theories=[theory_1,theory_2,theory_3])\n\n print(\"Mind initial state:\")\n print(minds.mind_string(mind, show_claims=False, show_problems=False))\n\n minds.extract_new_routines(mind,1)\n print(\"Mind after 1 step of extraction:\")\n print(minds.mind_string(mind, show_claims=False, show_problems=False))\n\n minds.extract_new_routines(mind,1)\n print(\"Mind after 2 steps of extraction:\")\n print(minds.mind_string(mind, show_claims=False, show_problems=False))", "def save_annotations(self):\n for fp in self.ris_widget.flipbook_pages:\n if len(fp) == 0:\n # skip empty flipbook pages\n continue\n annotations = getattr(fp, 'annotations', {})\n pose = annotations.get('pose', (None, None))\n if pose is not None:\n center_tck, width_tck = pose\n if center_tck is not None:\n path = pathlib.Path(fp[0].name)\n with path.with_suffix('.pickle').open('wb') as f:\n pickle.dump(dict(pose=pose), f)\n\n # warp and save images from all flipbook pages\n for lab_frame in fp:\n lab_frame_image = lab_frame.data\n path = pathlib.Path(lab_frame.name)\n warp = worm_spline.to_worm_frame(lab_frame_image, center_tck, width_tck)\n warp_save_path = path.parent / (path.stem + '-straight.png')\n freeimage.write(warp, warp_save_path)\n\n # If the widths are drawn, then create a mask that allows the user to make an alpha channel later.\n # We create one mask for each flipbook page, in case the images were saved in different places.\n # If we wind up redundantly writing the same mask a few times, so be it.\n if width_tck is not None:\n mask = worm_spline.worm_frame_mask(width_tck, warp.shape)\n mask_save_path = path.parent / (path.stem + '-mask.png')\n freeimage.write(mask, mask_save_path)", "def get_soup_general_data(soup):\n data_dict = {}\n\n name = soup.find(class_='product_title')\n if name:\n data_dict['name_of_game'] = name.h1.text\n\n pub = soup.find('li', class_='summary_detail publisher')\n if pub:\n data_dict['publisher'] = pub.a.text.strip()\n\n rel_date = soup.find('li', class_='summary_detail release_data')\n if rel_date:\n rel_date = rel_date.find('span', class_='data')\n if rel_date:\n data_dict['release_date'] = rel_date.text.strip()\n\n num_p = soup.find(\"li\", class_=\"summary_detail product_players\")\n if num_p:\n data_dict['num_players'] = num_p.find(class_=\"data\").text\n\n genres = soup.find(\"li\", class_='summary_detail product_genre')\n if genres:\n genres = genres.find_all('span', class_='data')\n data_dict['genres'] = [genre.text for genre in genres]\n\n age = soup.find(\"li\", class_=\"summary_detail product_rating\")\n if age:\n data_dict['age_rating'] = age.find('span', class_=\"data\").text\n\n return data_dict", "def _build_study_info(user, study_proc=None, proc_samples=None):\n # Logic check to make sure both needed parts passed\n if study_proc is not None and proc_samples is None:\n raise IncompetentQiitaDeveloperError(\n 'Must pass proc_samples when study_proc given')\n elif proc_samples is not None and study_proc is None:\n raise IncompetentQiitaDeveloperError(\n 'Must pass study_proc when proc_samples given')\n\n # get list of studies for table", "def extract_spacy(self, text: str)->dict:\n ners=None\n try:\n persons=[]\n locations=[]\n orgs=[]\n misc=[]\n docs=[]\n if len(text)>1000000:\n docs=self._splitCount(text,1000000)\n else:\n docs.append(text)\n for doc in docs:\n doc_spacy = self.recognizer(doc)\n for token in doc_spacy:\n if token.ent_type_ == \"PER\":\n persons.append(token.text)\n if token.ent_type_ == \"LOC\":\n locations.append(token.text)\n if token.ent_type_ == \"ORG\":\n orgs.append(token.text)\n if token.ent_type_ == \"MISC\":\n misc.append(token.text)\n ners={\"persons\":list(set(persons)), \"locations\":list(set(locations)),\"orgs\":list(set(orgs)), \"misc\":list(set(misc))}\n except Exception as ex:\n print('Exception while extracting NERs')\n print(str(ex))\n finally:\n return ners", "def study(user_preferences: dict, matcher: NodeMatcher):\r\n studysame = user_preferences[\"importancia_estudios\"]\r\n equal_styles = list(matcher.match(\"User\", imp = studysame))\r\n return equal_styles", "def _get_summary_struct(self):\n model_fields = [\n ('Number of classes', 'num_classes'),\n ('Number of feature columns', 'num_features'),\n ('Input image shape', 'input_image_shape'),\n ]\n training_fields = [\n ('Number of examples', 'num_examples'),\n (\"Training loss\", 'training_loss'),\n (\"Training time (sec)\", 'training_time'),\n ]\n\n section_titles = ['Schema', 'Training summary']\n return([model_fields, training_fields], section_titles)", "def extract_relevant(self):\n item_extraction = self.data\n my_dict = {'tweeted_time': item_extraction['created_at'],\n 'tweet_id': item_extraction['id'],\n # If the time comes when the below becomes more significant, it will be no trouble at all to make an\n # additional column for it, but delimiting it with a ` creates less clutter in the Database\n 'in_reply_to':\n \"NAME/\" + str(item_extraction['in_reply_to_screen_name']) + \"`\" +\n \"STATUSID/\" + str(item_extraction['in_reply_to_status_id_str']) + \"`\" +\n \"USERID/\" + str(item_extraction['in_reply_to_user_id_str']),\n 'lang': item_extraction['lang'],\n 'place': item_extraction['place'], 'source': item_extraction['source']}\n if item_extraction['place'] is not None:\n my_dict['place'] = item_extraction['place']['full_name']\n if 'retweeted_status' in item_extraction.keys():\n my_dict['original_author_id'] = item_extraction['retweeted_status']['user']['id']\n my_dict['original_author_handle'] = item_extraction['retweeted_status']['user']['screen_name']\n tester = item_extraction['retweeted_status']['text']\n cleaned = ' '.join(re.sub(\"(RT : )|(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", tester).split())\n removed_others = \" \".join(re.sub(\"(#\\S+)\", ' ', cleaned).split())\n final_text = ''.join(list(filter(lambda x: x.isalpha() or x is ' ', removed_others)))\n # This final text will make it a lot easier to run NLP\n final_text = final_text.strip().replace(' ', ' ').replace(' ', ' ')\n my_dict['plain_text'] = final_text.lower()\n my_dict['tweet'] = cleaned\n else:\n my_dict['original_author_id'] = item_extraction['user']['id']\n my_dict['original_author_handle'] = item_extraction['user']['screen_name']\n cleaned = ' '.join(re.sub(\"(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", item_extraction['text']).split())\n removed_others = \" \".join(re.sub(\"(#\\S+)\", ' ', cleaned).split())\n final_text = ''.join(list(filter(lambda x: x.isalpha() or x is ' ', removed_others)))\n final_text = final_text.strip().replace(' ', ' ').replace(' ', ' ')\n my_dict['plain_text'] = final_text.lower()\n my_dict['tweet'] = cleaned\n return my_dict", "def playerStandings(t_name):\n t_id = getTournamentID(t_name, False)\n if t_id == -1:\n return []\n conn, cur = connect()\n cur.execute(\"SELECT create_summary();\")\n conn.commit()\n query = \"SELECT P_ID, P_NAME, WIN, MATCH FROM SUMMARY WHERE T_ID = %s\"\n param = (t_id, )\n cur.execute(query, param)\n ps = [(int(row[0]), str(row[1]), int(row[2]), int(row[3]))\n for row in cur.fetchall()]\n return ps", "def make_hst_spec_previews(input_file, flux_scale_factor=\n FLUX_SCALE_FACTOR_DEFAULT, fluxerr_scale_factor=\n FLUXERR_SCALE_FACTOR_DEFAULT, n_consecutive=\n N_CONSECUTIVE_DEFAULT, output_path=\n OUTPUT_PATH_DEFAULT, output_type=OUTPUT_TYPE_DEFAULT,\n dpi_val=DPI_VAL_DEFAULT, debug=DEBUG_DEFAULT,\n full_ylabels=FULL_YLABELS_DEFAULT, optimize=\n not NOOPTIMIZE_DEFAULT, verbose=VERBOSE_DEFAULT):\n\n # Print file name, if verbose is turned on.\n if verbose:\n print(\"Input file: \" + input_file)\n\n # Derive output file names from input file name.\n output_files = []\n for out_type in output_type:\n if out_type != \"screen\":\n if out_type != \"fits\":\n output_file = (path.join(output_path, \"\") +\n path.basename(input_file).split(\".fits\")[0] +\n \".\" + out_type)\n else:\n output_file = (path.join(output_path, \"\") +\n path.basename(input_file).split(\".fits\")[0] +\n \"_prev.\" + out_type)\n else:\n output_file = None\n\n output_files.append(output_file)\n\n # Print name of output file.\n if verbose:\n print(\"Output file names are:\")\n for ofile in output_files:\n if ofile is not None:\n if ofile[-4:] == '.png':\n print(\" Output file: \" + ofile)\n print(\" Output file: \" + ofile.strip('\\.png') +\n '_thumb.png')\n else:\n print(\" Output file: \" + ofile)\n else:\n print(\" Plotting to screen.\")\n\n # Read in the FITS file to determine which instrument it comes from.\n # Print the name of the instrument found in the header if verbose is turned\n # on.\n this_instrument = get_instrument_name(input_file)\n if verbose:\n print(\"Instrument: \" + this_instrument)\n\n # Read in the FITS files and create plots using the local package\n # appropriate for the instrument used in the input file.\n if this_instrument == \"COS\":\n # Get wavelengths, fluxes, flux uncertainties.\n cos_spectrum = specutils_cos.readspec(input_file)\n\n # Get a list of segment names sorted such that the bluest segment is\n # first.\n cos_segment_names = specutils_cos.get_segment_names(cos_spectrum)\n\n # Trim the wavelengths < 900 Angstroms for FUVB segment if optical\n # element used is G140L.\n if (\"FUVB\" in cos_spectrum.segments and cos_spectrum.optical_element ==\n \"G140L\"):\n specutils_cos.extract_subspec(cos_spectrum, \"FUVB\", min_wl=900.)\n\n # Create a stitched spectrum for use when making thumb-size plots.\n stitched_spectrum = specutils.stitch_components(cos_spectrum,\n n_consecutive,\n flux_scale_factor,\n fluxerr_scale_factor,\n segment_names=\n cos_segment_names)\n\n # Calculate plot metrics for the each segment.\n segment_plot_metrics = [\n specutils.calc_plot_metrics(\"cos\",\n cos_spectrum.segments[x].wavelengths,\n cos_spectrum.segments[x].fluxes,\n cos_spectrum.segments[x].fluxerrs,\n cos_spectrum.segments[x].dqs,\n n_consecutive, flux_scale_factor,\n fluxerr_scale_factor)\n for x in cos_segment_names]\n\n # Make \"large-size\" plot.\n for out_type, out_file in zip(output_type, output_files):\n if out_type != \"fits\":\n specutils_cos.plotspec(cos_spectrum, out_type, out_file,\n flux_scale_factor,\n fluxerr_scale_factor,\n segment_plot_metrics,\n dpi_val=dpi_val, output_size=1024,\n debug=debug, full_ylabels=full_ylabels,\n title_addendum=stitched_spectrum[\"title\"],\n optimize=optimize)\n else:\n # Write the spectrum that would be plotted to a binary FITS\n # table.\n specutils_cos.make_fits(cos_spectrum, out_file,\n segment_plot_metrics, input_file)\n\n if not debug and output_type != [\"fits\"]:\n # Calculate plot metrics for the stitched spectrum.\n stitched_plot_metrics = [\n specutils.calc_plot_metrics(\"cos\", stitched_spectrum[\"wls\"],\n stitched_spectrum[\"fls\"],\n stitched_spectrum[\"flerrs\"],\n stitched_spectrum[\"dqs\"],\n n_consecutive, flux_scale_factor,\n fluxerr_scale_factor)]\n\n # Make \"thumbnail-size\" plot, if requested.\n for out_type, out_file in zip(output_type, output_files):\n if out_type != 'fits':\n specutils_cos.plotspec(cos_spectrum, out_type,\n out_file, flux_scale_factor,\n fluxerr_scale_factor,\n stitched_plot_metrics,\n dpi_val=dpi_val, output_size=128,\n stitched_spectrum=stitched_spectrum,\n optimize=optimize)\n\n elif this_instrument == \"STIS\":\n # Get wavelengths, fluxes, flux uncertainties.\n stis_spectrum = specutils_stis.readspec(input_file)\n\n # Get the indices to plot. If the number of associations is <=3 then\n # we will plot all of them, but if >3 then only the first, middle, and\n # last association will be plotted, so it's not necessary to stitch or\n # calculate plot metrics for any of the others.\n indices_to_plot = specutils_stis.get_association_indices(\n stis_spectrum.associations)\n\n # Create a stitched spectrum for each association, used when making\n # thumb-size plots. The stitched spectrum joins the orders together (if\n # there is more than one). The length of the returned list is equal to\n # the number of associations that will be plotted.\n stitched_spectra = [\n specutils.stitch_components(x, n_consecutive, flux_scale_factor,\n fluxerr_scale_factor)\n for x in numpy.asarray(stis_spectrum.associations)[indices_to_plot]]\n\n # Calculate plot metrics for the each association that will be\n # plotted. The length of the returned list is equal to the number of\n # associations that will be plotted.\n association_plot_metrics = [\n specutils.calc_plot_metrics(\"stis\", x[\"wls\"], x[\"fls\"], x[\"flerrs\"],\n x[\"dqs\"], n_consecutive,\n flux_scale_factor, fluxerr_scale_factor)\n for x in stitched_spectra]\n\n # Make \"large-size\" plot.\n for out_type, out_file in zip(output_type, output_files):\n if out_type != 'fits':\n specutils_stis.plotspec(stis_spectrum, indices_to_plot,\n stitched_spectra, out_type, out_file,\n flux_scale_factor, fluxerr_scale_factor,\n association_plot_metrics,\n dpi_val=dpi_val, output_size=1024,\n debug=debug, full_ylabels=full_ylabels,\n optimize=optimize)\n\n # Make \"thumbnail-size\" plot, if requested. Notice that in this\n # case we always plot just the first association, by passing only\n # `stitched_spectra[0]`.\n if not debug:\n specutils_stis.plotspec(stis_spectrum, indices_to_plot,\n [stitched_spectra[0]], out_type,\n out_file, flux_scale_factor,\n fluxerr_scale_factor,\n association_plot_metrics,\n dpi_val=dpi_val,\n output_size=128, optimize=optimize)\n\n else:\n raise HSTSpecPrevError(\"'INSTRUME' keyword not understood: \" +\n this_instrument)", "def get_general(soup):\n \n general_info = {}\n general_info.update(get_route_name(soup))\n general_info.update(get_box_data(soup))\n general_info.update(get_description(soup))\n general_info.update(get_hierarchy(soup))\n general_info.update(get_first_img_source(soup))\n\n return general_info", "def get_sentiment_lexicon(datafile):\n user_dict = {}\n item_dict = {}\n feature_dict ={}\n aspect_index = 0\n\n for tupel in datafile.iterrows():\n line = tupel[0]\n user_id = datafile[\"user_id\"][line]\n item_id = datafile[\"business_id\"][line]\n list_len = len(datafile['absa'][line])\n if user_id not in user_dict:\n user_dict[user_id] = []\n if item_id not in item_dict:\n item_dict[item_id] = []\n for i in range(0, list_len):\n feature = datafile['absa'][line][i]['aspect']\n fetaure_confidence = datafile['absa'][line][i]['aspect_confidence']\n polarity = datafile['absa'][line][i]['polarity']\n polarity_confidence = datafile['absa'][line][i]['polarity_confidence']\n if feature not in feature_dict:\n feature_dict[feature] = aspect_index\n aspect_index = aspect_index+1\n user_dict[user_id].append([feature, fetaure_confidence, polarity, polarity_confidence])\n item_dict[item_id].append([feature, fetaure_confidence, polarity, polarity_confidence])\n return [feature_dict, user_dict, item_dict]", "def parse(u):\n rec = {}\n\n try:\n r = requests.get(u, headers=headers)\n\n if r.status_code == 200:\n html = r.text\n soup = BeautifulSoup(html, 'lxml')\n overview_section = soup.select('.Raw-s14xcvr1-0 gXqFYO')\n full_name_section = soup.select('.sc-iwsKbI kjxnCg')\n years_of_practice_section = soup.select('.DataField__Data-c3wc7f-1 gLHSHx')\n language_section = soup.select('.DataField__Data-c3wc7f-1 gLHSHx')\n office_location_section = soup.select('.Paragraph-fqygwe-0 cojhks')\n hospital_affiliation_section = soup.select('.Paragraph-fqygwe-0 fwayNy')\n specialties = soup.select('.DataField__Data-c3wc7f-1 gLHSHx')\n education_and_medical_training_section = soup.select('.EducationAndExperience__Item-xn5fll-0 bzYYRk')\n certification_and_licensure_section = soup.select('.Paragraph-fqygwe-0 bQPwuv')\n\n if overview_section:\n overview = overview_section[0].text.replace('\"', '')\n if full_name_section:\n full_name = full_name_section[0].text\n if years_of_practice_section:\n years_of_practice = years_of_practice_section[0].text.strip().replace('\"', '')\n if language_section:\n language = language_section[0].text.strip().replace('\"', '')\n if office_location_section:\n office_location = office_location_section[0].text\n if hospital_affiliation_section:\n hospital_affiliation = hospital_affiliation_section[0].text.strip().replace('\"', '')\n if specialties_section:\n specialties = specialties_section[0].text.replace('\"', '')\n if education_and_medical_training_section:\n education_and_medical_training = education_and_medical_training_section[0].text\n if certification_and_licensure_section:\n certification_and_licensure = certification_and_licensure_section[0].text\n\n\n rec = {'overview': overview, 'full_name': full_name, 'years_of_practice': years_of_practice, 'language': language,\n 'office_location': office_location, 'hospital_affiliation': hospital_affiliation, 'specialties':specialties,\n 'education_and_medical_training': education_and_medical_training,\n 'certification_and_licensure': certification_and_licensure}\n except Exception as ex:\n print('Exception while parsing')\n print(str(ex))\n finally:\n return json.dumps(rec)", "def processing_function(raw):\r\n\r\n # Sort stewarded & unstewarded depts\r\n STEWARDED_DEPTS = set(stewards.keys()) & kt.ALL_DEPTS\r\n UNSTEWARDED_DEPTS = kt.ALL_DEPTS - STEWARDED_DEPTS\r\n\r\n ##############################\r\n # Filter data in all the ways\r\n ##############################\r\n actives = kt.filterdata(raw, kt.selectors.allactives)\r\n nc = kt.filterdata(actives, kt.selectors.northcampus)\r\n\r\n # International Students\r\n itnl = kt.filterdata(actives, kt.selectors.itnl)\r\n permres = kt.filterdata(actives, kt.selectors.permres)\r\n\r\n # Stewarded / Unstewarded\r\n stewarded= kt.filterdata(\r\n actives,\r\n lambda person: kt.selectors.bydept(person,STEWARDED_DEPTS)\r\n )\r\n unstewarded= kt.filterdata(\r\n actives,\r\n lambda person: kt.selectors.bydept(person,UNSTEWARDED_DEPTS)\r\n )\r\n\r\n # Hire Date\r\n newhires = kt.filterdata(\r\n actives, \r\n lambda person: kt.selectors.hiredafter(person,NEW_HIRE_DATE)\r\n )\r\n\r\n oldhires = kt.filterdata(\r\n actives, \r\n lambda person: kt.selectors.hiredbefore(person,NEW_HIRE_DATE)\r\n )\r\n\r\n nohiredate = kt.filterdata(actives, kt.selectors.nohiredate)\r\n\r\n # Degree Program\r\n phd = kt.filterdata(\r\n actives, \r\n lambda person: kt.selectors.bydegree(person,['PhD'])\r\n )\r\n masters = kt.filterdata(\r\n actives, \r\n lambda person: kt.selectors.bydegree(person, kt.MASTERS)\r\n )\r\n\r\n ###############\r\n # Count things\r\n ###############\r\n\r\n # Unit sizes\r\n bargaining_unit_size = len(actives)\r\n overall_members = kt.count_duespayers(actives)\r\n\r\n # Number of actives currently stewarded\r\n total_stewarded = len(stewarded)\r\n total_unstewarded = len(unstewarded)\r\n stewarded_members = kt.count_duespayers(stewarded)\r\n unstewarded_members = kt.count_duespayers(unstewarded)\r\n\r\n # International students\r\n total_intl = len(itnl)\r\n total_permres = len(permres)\r\n intl_members = kt.count_duespayers(itnl)\r\n permres_members = kt.count_duespayers(permres)\r\n\r\n # New Hires\r\n total_newhires = len(newhires)\r\n total_oldhires = len(oldhires)\r\n total_nohiredate= len(nohiredate)\r\n newhire_members = kt.count_duespayers(newhires)\r\n oldhire_members = kt.count_duespayers(oldhires)\r\n\r\n\r\n # Degree Program\r\n total_phd = len(phd)\r\n total_masters = len(masters)\r\n phd_members = kt.count_duespayers(phd)\r\n masters_members = kt.count_duespayers(masters)\r\n \r\n\r\n ######################################\r\n # Derived Results\r\n ######################################\r\n labels = []\r\n results= []\r\n\r\n labels += ['Current Bargaining Unit Size']\r\n results+= [bargaining_unit_size]\r\n\r\n labels += ['Relative Number of GSIs with >1 Steward (%)']\r\n results+= [(100.0*total_stewarded)/bargaining_unit_size]\r\n\r\n labels += ['Relative Number of International Students (%)']\r\n results+= [(100.0*total_intl)/bargaining_unit_size]\r\n\r\n # labels += ['Relative Number of NC Permanent Resident Students (%)']\r\n # results+= [(100.0*total_permres)/bargaining_unit_size]\r\n\r\n labels += ['']\r\n results+= ['']\r\n\r\n labels += ['Overall GEO Membership (%)']\r\n results+= [(100.0*overall_members)/bargaining_unit_size]\r\n\r\n labels += ['Membership Among Stewarded Depts (%)']\r\n results+= [(100.0*stewarded_members)/total_stewarded]\r\n\r\n labels += ['Membership Among Unstewarded Depts (%)']\r\n results+= [(100.0*unstewarded_members)/total_unstewarded]\r\n\r\n labels += ['']\r\n results+= ['']\r\n\r\n labels += ['Relative # of International Students (%)']\r\n results+= [(100.0*total_intl)/bargaining_unit_size]\r\n\r\n labels += ['Membership Among International Students (%)']\r\n results+= [(100.0*intl_members)/total_intl]\r\n\r\n # labels += ['Membership Among Permanent Residents (%)']\r\n # results+= [(100.0*permres_members)/total_permres]\r\n\r\n labels += ['']\r\n results+= ['']\r\n\r\n labels += ['Relative # of New Hires (%)']\r\n results+= [(100.0*total_newhires)/bargaining_unit_size]\r\n\r\n labels += ['Membership Among New Hires (%)']\r\n results+= [(100.0*newhire_members)/total_newhires]\r\n\r\n labels += ['Membership Among Old Hires (%)']\r\n results+= [(100.0*oldhire_members)/total_oldhires]\r\n\r\n labels += ['Number of People w/o Known Hire Dates']\r\n results+= [total_nohiredate]\r\n\r\n labels += ['']\r\n results+= ['']\r\n\r\n labels += ['Relative # of Masters Students (%)']\r\n results+= [(100.0*total_masters)/bargaining_unit_size]\r\n\r\n labels += ['Membership Among Masters Students (%)']\r\n results+= [(100.0*masters_members)/total_masters]\r\n\r\n labels += ['Membership Among PhD Students (%)']\r\n results+= [(100.0*phd_members)/total_phd]\r\n\r\n labels += ['']\r\n results+= ['']\r\n\r\n\r\n\r\n # Display summary results\r\n print('\\n')\r\n display_results(labels,results)\r\n\r\n print('Unstewarded Departments:')\r\n for d in UNSTEWARDED_DEPTS: print(d)\r\n\r\n print('\\n')\r\n\r\n\r\n\r\n # Print summary results to csv\r\n kt.writecsv_summary(zip(labels,results), OUT_FILE)\r\n\r\n\r\n\r\n # dump all local variables to file\r\n # v = locals()\r\n\r\n\r\n\r\n return None" ]
[ "0.57579595", "0.5754335", "0.55619776", "0.545294", "0.5446325", "0.51942104", "0.51507777", "0.51323617", "0.5105886", "0.5052227", "0.5037823", "0.49726292", "0.4881835", "0.4851375", "0.48495308", "0.48304343", "0.48082933", "0.47642702", "0.4755273", "0.47469255", "0.47447446", "0.47133496", "0.46433732", "0.46357328", "0.46329084", "0.4603979", "0.45964986", "0.45959252", "0.45818478", "0.45591724", "0.4555059", "0.45512167", "0.45434", "0.4537788", "0.4531281", "0.4528349", "0.45185888", "0.45160326", "0.44937423", "0.4486711", "0.44865462", "0.44800955", "0.44783604", "0.44681746", "0.44622734", "0.4454868", "0.44529885", "0.44459757", "0.44457835", "0.4441328", "0.4440596", "0.4435347", "0.44337818", "0.44314295", "0.44213167", "0.4412668", "0.44091672", "0.44081798", "0.4404994", "0.44041163", "0.44038698", "0.4401886", "0.4401223", "0.43871608", "0.4387082", "0.4378438", "0.4372773", "0.4371993", "0.4370425", "0.43682024", "0.43658337", "0.43596742", "0.4353401", "0.4349448", "0.43488747", "0.43488744", "0.43460852", "0.43416244", "0.43415275", "0.4339209", "0.43301272", "0.43263757", "0.43241414", "0.43238935", "0.43218496", "0.43142238", "0.43141845", "0.43033308", "0.42981333", "0.42948568", "0.4292998", "0.42911696", "0.4283256", "0.42824557", "0.42808002", "0.42764026", "0.42735225", "0.42691842", "0.42672002", "0.42659274" ]
0.74647856
0
Return size of the dataset
def __len__(self): return self.get_num_sequence()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataset_size(self):\n return self.dataset.size", "def __len__(self):\n return self._dataset.size(dirs=self._dirs)", "def dataset_size(self):\n if not self._dataset_size:\n # pylint: disable=attribute-defined-outside-init\n self._dataset_size = count_file_lines(\n self._hparams.source_dataset.files)\n return self._dataset_size", "def __len__(self):\n return self.dataset.shape[0]", "def size(self):\r\n return len(self._train_datas)", "def size(self, index):\n return self.base_dataset.size(index)", "def __len__(self):\n\t\treturn min(len(self.dataset), self.opt.max_dataset_size)", "def size(self) -> int:\r\n return self.da.length()", "def size(self) -> int:\n size = self.da.length()\n return size", "def data_size(self) -> int:\n return len(self.__labels)", "def size(self):\n return self.data.size", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def __len__(self):\n return len(self.dataset)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def _get_dataset_size(loader):\n if isinstance(loader, (tuple, list)):\n return len(loader[0].dataset)\n else:\n return len(loader.dataset)", "def get_size(self):\n return self._data_size", "def __len__(self):\n if not self.opt.union:\n return min(len(self.dataset), self.opt.max_dataset_size)\n else:\n return len(self.batch_sampler)", "def size(self):\n return len(self._data)", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def count(self):\r\n return self.data_array.size", "def data_count(self):\n return(len(self.data))", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def __len__(self):\n return self.data.num_samples", "def __len__(self):\n return int(np.floor(len(self.dataset_df) / self.batch_size))", "def size(self):\n\t\t# Better to have this as a method rather than property, as self._dataframe may change\n\t\treturn self._dataframe.shape[0]", "def __len__(self):\n ret = self.data.shape[0]\n return ret", "def get_size(self):\n return (\n sys.getsizeof(self.children) +\n sys.getsizeof(self.parent) +\n sys.getsizeof(self.dataset_id) +\n sys.getsizeof(self.k) +\n self.filter.get_size()\n )", "def __len__(self):\r\n if self.is_superset:\r\n length = 0\r\n for ds in self.data:\r\n length += len(ds)\r\n return length\r\n else:\r\n return len(self.data)", "def __get_dataset_size(input_):\n in_type = __get_input_type(input_)\n b_unit = 1024.0 * 1024.0\n if in_type == \"numpy_array\":\n size_in_MB = input_.nbytes / b_unit\n elif in_type == \"hdf\":\n size_in_MB = os.path.getsize(input_) / b_unit\n else:\n list_file = losa.find_file(input_ + \"/*.tif*\")\n if list_file:\n size_1_file = np.asarray(Image.open(list_file[0])).nbytes / b_unit\n else:\n size_1_file = 0.0\n size_in_MB = len(list_file) * size_1_file\n return size_in_MB", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def __len__(self):\n if self.mode.lower() == 'train':\n return len(self.train_data)\n elif self.mode.lower() == 'val':\n return len(self.val_data)\n elif self.mode.lower() == 'test':\n return len(self.test_data)\n else:\n raise RuntimeError(\"Unexpected dataset mode. \"\n \"Supported modes are: train, val and test\")", "def __len__(self):\n return len(self.dataset) * self.samples_per_pair", "def dataSize(self) -> int:\n return self._dataSize", "def get_train_data_size(self):\n return len(self.pipeline.data['train'])", "def get_nbytes(dset):\n if 'nbytes' in dset.attrs:\n # look if the dataset has an attribute nbytes\n return dset.attrs['nbytes']\n elif hasattr(dset, 'value'):\n # else extract nbytes from the underlying array\n return dset.size * numpy.zeros(1, dset.dtype).nbytes", "def get_data_size(self):\n if self.doc_ftrs is not None:\n data = self.doc_ftrs\n elif self.query_ftrs:\n data = self.query_ftrs\n elif self.usr_ftrs:\n data = self.usr_ftrs\n else:\n raise ValueError('Cannot infer data size.')\n data_shape = tf.shape(data)\n return data_shape[0], data_shape[1]", "def size(self):\n return self.dtype.itemsize", "def __len__(self):\n if self.mode.lower() == 'train':\n return len(self.train_data)\n if self.mode.lower() == 'val':\n return len(self.val_data)\n if self.mode.lower() == 'test':\n return len(self.test_data)\n\n raise RuntimeError(\"Unexpected dataset mode. \"\n \"Supported modes are: train, val and test\")", "def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items", "def training_dataset_size(self):\n\n if not self.cs_learning and hasattr(FLAGS, 'orig_size'):\n return FLAGS.orig_size\n\n traindata_size_dir = os.path.join(self.cache_path, 'ds_sizes')\n ensure_dir(traindata_size_dir)\n if not hasattr(FLAGS, 'train_split'):\n setattr(FLAGS, 'train_split', 'train')\n\n size_cache_file = os.path.join(traindata_size_dir, '{}_{}'.format(FLAGS.dataset.lower(), FLAGS.train_split))\n\n if os.path.exists(size_cache_file):\n with open(size_cache_file) as f:\n ds_size = int(f.readline().strip())\n else:\n ds = load_ds() # Loads the dataset.\n [data_X, _, _] = ds.load()\n ds_size = len(data_X)\n with open(size_cache_file, 'w') as f:\n f.write(str(ds_size))\n\n return ds_size", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def size(self) -> Tuple[groupable, pdarray]:\n return self.count()", "def size(self):\n\t\treturn self.dims", "def ndarray_size(self) -> int:\n pass", "def nbytes(self):\n\n return self.data.type.datasize", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def size(self):\n return self.N", "def __len__(self):\n return len(self.rimgdataset)", "def __len__(self):\r\n return len(self.train_data)", "def size(self):\n size = 0\n size += self.data.size * sys.getsizeof(self.data)\n return size / 1024.0 / 1024.0 / 1024.0", "def size(self):\r\n return self.info().size", "def graph_data_size(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size or 0)", "def size(self) -> int:", "def size() -> int:\n ...", "def len (self):\n\t\treturn len (self.data)", "def dataDimensions(data):\n logging.info('Number of rows of data: %s' % len(data))\n logging.info('Number of columns of data: %s' % len(data[1]))", "def __len__(self) -> int:\n if self.preload:\n return len(self.data_ram)\n else:\n return len(self.data)", "def size(self):\n return len(self.records)", "def size(self):\r\n return self.__length", "def get_valid_data_size(self):\n return len(self.pipeline.data['test'])", "def nbytes(self) -> int:\n\n return self.data.nbytes + self.shape.nbytes", "def return_size(df):\n return round(sys.getsizeof(df) / 1e9, 2)", "def size(self):\n return self.__length", "def size(self) -> int:\n num_columns = len(self._internal.data_spark_columns)\n if num_columns == 0:\n return 0\n else:\n return len(self) * num_columns # type: ignore[arg-type]", "def dataset_length(data_loader):\n sample = next(iter(data_loader))\n batch_size = None\n\n if isinstance(sample, dict):\n try:\n if isinstance(sample[\"label\"], torch.Tensor):\n batch_size = sample[\"label\"].shape[0]\n else:\n # in case of sequence of inputs use first input\n batch_size = sample[\"label\"][0].shape[0]\n except:\n KeyError(\"Expects key to be 'label'.\")\n else:\n if isinstance(sample[1], torch.Tensor):\n batch_size = sample[1].shape[0]\n else:\n # in case of sequence of inputs use first input\n batch_size = sample[1][0].shape[0]\n return len(data_loader) * batch_size", "def size(self):\n pass", "def size(self):\n pass", "def size(self):\n pass", "def length(self, data: Sequence[Sequence[torch.Tensor]]) -> int:\n return self.n_batch", "def size(self):\r\n return self.size.data", "def size(self):\n return self._N", "def __len__(self):\n _, timesteps, height, width = self.data.shape\n height //= self.size\n width //= self.size\n\n if self.subset == 'train':\n out = self.length\n elif self.subset == 'all':\n out = height * width\n else:\n out = (height // 2) * (width // 2)\n\n if not self.time:\n out *= timesteps\n\n return out", "def size(self) -> int:\n return self.stat().size", "def get_size(self):\n # return the size along the index dimension\n size = 0\n if self._data is not None:\n size = shape(self._data)[self.index_dimension]\n\n return size", "def size(self):\n return self._length", "def shape(self):\n return self.dataset.shape", "def size(self) -> int:\n raise NotImplementedError", "def size(self):\n\t\treturn self._size", "def __len__(self):\n # type: () -> int\n return len(self.data)", "def size(self):\n raise NotImplementedError", "def size(self):\n return self.__size", "def GetDataSetSize(ds_type, name_len, num_elements, element_multipler):\n\n # Number of bytes in the data type\n datatype_size = 4\n if ds_type == 50: # Byte Datatype\n datatype_size = 1\n elif ds_type == 20: # Int Datatype\n datatype_size = 4\n elif ds_type == 10: # Float Datatype\n datatype_size = 4\n\n return ((num_elements * element_multipler) * datatype_size) + Ensemble.GetBaseDataSize(name_len)", "def get_size(self):\n return len(self.table)", "def __len__(self):\n return self.sample_df.shape[0]", "def __len__(self):\n return self.sample_df.shape[0]", "def size(self) -> int:\n return self.length", "def __len__(self): \r\n length = len(self.data) - 2* self.skip_window\r\n #print ('length', length)\r\n return length\r\n #raise NotImplementedError('Implement the __len__ method of the dataset')\r", "def __len__(self) -> int:\n return len(self.data)", "def __len__(self) -> int:\n return len(self.data)", "def __len__(self) -> int:\n return len(self.data)", "def length(self):\n return len(self.data)", "def length(self):\n return len(self.data)", "def length(self):\n return len(self.data)" ]
[ "0.8979698", "0.8526949", "0.82585645", "0.81426746", "0.8124066", "0.81118333", "0.8078935", "0.80428666", "0.8037988", "0.8028507", "0.8013156", "0.80074346", "0.80074346", "0.80074346", "0.80074346", "0.7973197", "0.7925562", "0.7925562", "0.7925562", "0.7925562", "0.7925562", "0.79165906", "0.7871422", "0.7866867", "0.78506833", "0.78419423", "0.7783012", "0.77493614", "0.7732343", "0.76998043", "0.76814306", "0.7636382", "0.7626852", "0.7621517", "0.7604528", "0.7592284", "0.7581702", "0.75778383", "0.75713325", "0.75685847", "0.75637156", "0.753051", "0.7523375", "0.75054306", "0.748043", "0.74688375", "0.74596196", "0.74427867", "0.74427867", "0.74415475", "0.74174124", "0.7413286", "0.74091834", "0.74024034", "0.7386499", "0.7375304", "0.73683214", "0.735782", "0.7354785", "0.7352165", "0.73517495", "0.73486227", "0.7346073", "0.73204917", "0.7312012", "0.73072386", "0.730197", "0.73018533", "0.72806895", "0.7279244", "0.72775817", "0.7276669", "0.7272834", "0.72649026", "0.72649026", "0.72649026", "0.7259475", "0.7243757", "0.7239879", "0.7235242", "0.7230251", "0.7227965", "0.7226957", "0.72254926", "0.7218193", "0.72177404", "0.7217473", "0.72159976", "0.72117305", "0.72080153", "0.7205657", "0.72025245", "0.72025245", "0.72000295", "0.7198731", "0.71976596", "0.71976596", "0.71976596", "0.7196381", "0.7196381", "0.7196381" ]
0.0
-1
Number of studies in a dataset.
def get_num_sequence(self): return len(self.study_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_datasets(self, data):\n dsets = set()\n for items in data:\n dsetid = items[3]\n dsets.add(dsetid)\n return len(dsets)", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def num_students(self,dd=\"\"):\n\t\tif dd==\"\":\n\t\t\tdd=datadrop.objects.all().filter(cohort=self.cohort)\\\n\t\t\t\t.order_by('-date')[0]\n\t\telif isinstance(dd,str):\n\t\t\tdd=datadrop.objects.get(name=dd,cohort=self.cohort)\n\t\treturn len(grade.objects.filter(subject=self).distinct()\\\n\t\t\t.values_list('upn'))", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def studNumber(self):\n return len(self.students)", "def num_test_samples(self):\n if self._num_test_samples is None:\n for key, value in self._test_data.items():\n self._num_test_samples[key] = len(value[0])\n return self._num_test_samples", "def get_n_trials(self, study_id: int, state: Optional[TrialState] = None) -> int:\n raise NotImplementedError", "def count_samples(self):\n return sum(SEQ_LENGTHS)", "def number_of_variables(dataset, name_of_variable):\r\n first_row = dataset[0].keys()\r\n num = 0\r\n for variable in first_row:\r\n if name_of_variable in variable:\r\n num += 1 \r\n return num", "def test_set_count(self) -> int:\n return pulumi.get(self, \"test_set_count\")", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def _num_samples(x: npt.ArrayLike) -> int:\n if not hasattr(x, \"__len__\") and not hasattr(x, \"shape\"):\n if hasattr(x, \"__array__\"):\n x = np.asarray(x)\n else:\n raise TypeError(\"Expected sequence or array-like, got %s\" % type(x))\n if hasattr(x, \"shape\"):\n if len(x.shape) == 0:\n raise TypeError(\"Singleton array %r cannot be considered\" \" a valid collection.\" % x)\n # Check that shape is returning an integer or default to len\n # Dask dataframes may not return numeric shape[0] value\n if isinstance(x.shape[0], numbers.Integral):\n return x.shape[0]\n else:\n return len(x)\n else:\n return len(x)", "def get_number_of_unique_students(self):\n unique_students = set()\n for row in self.responses:\n unique_students.add(row.student)\n return len(unique_students)", "def n(self):\n return len(self.data.sites)", "def count_scientists(project):\n return project.experiment_set.all().distinct('user').count()", "def countSites(self):\n self.ni = len(self.sites)\n return self.ni", "def getSampleCount(self,study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n count=0\n results=con.cursor().callproc('get_sample_count', [study_id,\\\n count])\n \n return results[1]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def training_set_count(self) -> int:\n return pulumi.get(self, \"training_set_count\")", "def getNrStations(self):\n return len(self.stationData)", "def count(self):\n return self.data_container.count", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def count(self,value = 1):\n n = 0\n for s in self.sample:\n if s == value:\n n += 1\n return n", "def getNbStations(self) :\n return len(self._stations)", "def num_train_samples(self):\n if self._num_training_samples is None:\n for key, value in self._training_data.items():\n self._num_training_samples[key] = len(value[0])\n return self._num_training_samples", "def num_sampled(self):\n return self._historical_data.num_sampled", "def getSampleCount(self):\r\n return len(self._data)", "def get_num_train_samples(self):\n raise NotImplementedError", "def num_samples(self):\n raise NotImplementedError()", "def num_classes(self,dd=\"\"):\n\t\tif dd==\"\":\n\t\t\tdd=datadrop.objects.all().filter(cohort=self.cohort)\\\n\t\t\t\t.order_by('-date')[0]\n\t\telif isinstance(dd,str):\n\t\t\tdd=datadrop.objects.get(name=dd,cohort=self.cohort)\n\t\treturn self.classgroup_set.all().count()", "def get_number_files(dataset):\n HOME = os.environ['HOME']\n # cmds = ['das_client.py', '--query', 'summary dataset=%s' % dataset, '--format=json',\n # '--key=%s/.globus/userkey.pem' % HOME, '--cert=%s/.globus/usercert.pem' % HOME]\n cmds = ['das_client.py', '--query', 'summary dataset=%s' % dataset, '--format=json']\n output = subprocess.check_output(cmds, stderr=subprocess.STDOUT)\n summary_dict = json.loads(output)\n return int(summary_dict['data'][0]['summary'][0]['nfiles'])", "def count_indications(self) -> int:\n return self._count_model(Indication)", "def well_count(self):\n return(len(self.wells))", "def get_number_of_stools(self):\n return len(self._stools)", "def getNumberOfTraces(self) -> int:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN?\")\n ret = self.myFieldFox.read()\n else:\n ret = 4\n return ret", "def number_of_sites(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"number_of_sites\")", "def number_of_sites(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"number_of_sites\")", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def get_number_samples(self):\n return self.samples.shape[0]", "def count(self):\n\n raise NotImplementedError", "def get_num_samples(self):\n return self._num_samples", "def data_count(self):\n return(len(self.data))", "def get_datasets_count(request):\n organization_id = request.GET.get('organization_id', '')\n datasets_count = Organization.objects.get(\n pk=organization_id).import_records.all().distinct().count()\n\n return {'status': 'success', 'datasets_count': datasets_count}", "def get_num_samples(self) -> int:\n # must be implemented in subclass\n raise NotImplementedError", "def num_samples(self):\n return self._ll_tree_sequence.get_num_samples()", "def __len__(self):\n\n if self.is_finite_set:\n size = 0\n for set in self.sets:\n size += len(set)\n return size\n else:\n raise ValueError(\"'%s' is not a finite set.\" % self)", "def __len__(self):\n return self._dataset.size(dirs=self._dirs)", "def count():", "def datacounts(self):\n return self._properties[\"datacounts\"]", "def count_standard_residues(self):\n n = 0\n for na in self.iter_standard_residues():\n n += 1\n return n", "def count(self):\r\n return self.data_array.size", "def test_count(self):\n return len(self.tests) + sum(suite.test_count for suite in self.suites)", "def getSampleCount(self):\r\n return len(self._biom_table.SampleIds)", "def count_measurements(database: Database) -> int:\n return int(database.measurements.count_documents(filter={}))", "def get_number_of_cheeses(self):\n number = 0\n for i in range(len(self._stools)):\n number += len(self._stools[i])\n return number", "def getNumEvents(dbsApi, dset):\n summary = getDsetSummary(dbsApi, dset)\n # it means the dataset was not produced\n if summary[0]['num_file'] == 0:\n return -1\n return summary[0]['num_event']", "def __len__(self):\n return self.data.num_samples", "def get_location_count_from_studies(cls, studies):\n\n activations = db.session.query(cls.location_id, func.count(cls.pmid\n )).filter(cls.pmid.in_(studies), cls.location_id < 81925\n ).group_by(cls.pmid).all()\n\n return activations", "def data_count(self):\r\n\r\n shp = self.df.shape\r\n row_count = shp[0]\r\n return row_count", "def GetTrainSampleCount(self) :\r\n\t\ttry :\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SampleSetCount'],(0,))\r\n\t\t\tCurSampleCount = self.DB_Cursor.fetchone()[0]\r\n\t\texcept Exception as detail:\r\n\t\t\tlogging.error(\"Failed to get count of training samples in database: %s\"%detail)\r\n\t\treturn CurSampleCount", "def num_tracked_samples(self, u=None):\n u = self.virtual_root if u is None else u\n return self._ll_tree.get_num_tracked_samples(u)", "def N_genes_in_dataset(self):\n return len(self.all_genes_in_dataset)", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def varCount(self, aKind):\n return self.counts[aKind]", "def num_streams(self):\n self._num_streams = self.lib.iperf_get_test_num_streams(self._test)\n return self._num_streams", "def num_training_examples(self):", "def get_num_classes(dataset: str):\n if dataset == \"imagenet\" or dataset == \"kitti\":\n return 1000\n elif dataset == \"cifar10\" or dataset == \"mnist\" or dataset == \"fashion_mnist\":\n return 10", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def studies(self):\n return self._study_queryset", "def count_sonata(self):\n return self.run_query(\"count( /mediawiki/page[starts-with (title, 'Sonata') ] )\")", "def total_exs(dataset):\n total = 0\n for article in dataset['data']:\n for para in article['paragraphs']:\n total += len(para['qas'])\n return total", "def count(self):\n # TODO not implemented yet\n return 0", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def dataCount(self, collectionName):\n count = collectionName.find().count()\n return count", "def size(self):\r\n return len(self._train_datas)", "def get_num_cams(self, data):\n cams = set()\n for items in data:\n camid = items[2]\n cams.add(camid)\n return len(cams)", "def num_samples(self, u=None):\n u = self.virtual_root if u is None else u\n return self._ll_tree.get_num_samples(u)", "def num_wells(self):\n return self.info_wells['well'].nunique()", "def count(self):\n return self._reduce_for_stat_function(F.count, only_numeric=False)", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def file_count(self) -> int:\n if self.dataset is None:\n raise ValueError('No known dataset found!')\n return self._max_file_count", "def numPostings(years):\n\tcount = []\n\tfor year in years:\n\t\tfilename = \"SmartEnergy\" +str(year) +\".xlsx\"\n\t\tDB = pd.read_excel(filename, sheet_name = 'Filters')\n\t\tcount.append(DB.iloc[10][1])\n\treturn count", "def num_injectors(self):\n injectors = self.info_wells.groupby('well_type').get_group('inj')\n return injectors['well'].nunique()", "def n_facets(self):\n return self.n_inequalities()", "def count_data_items(fileids, train=True):\n sizes = 28000 if train else 22500\n return len(fileids) * sizes", "def getNumStatDataFiles(self):\n return self.nStatDataFiles", "def getSegmentCount(self) -> int:\n ...", "def size(self, index):\n return self.base_dataset.size(index)", "def num_examples(self):\r\n raise NotImplementedError", "def getNrSamples(self): \r\n return self.numSamples", "def __len__(self):\n if self.mode.lower() == 'train':\n return len(self.train_data)\n elif self.mode.lower() == 'val':\n return len(self.val_data)\n elif self.mode.lower() == 'test':\n return len(self.test_data)\n else:\n raise RuntimeError(\"Unexpected dataset mode. \"\n \"Supported modes are: train, val and test\")", "def count(self, value: object) -> int:\n count = 0\n for _ in range(self.da.length()):\n if self.da[_] == value:\n count += 1\n return count", "def generic_record_count(data_df: Optional[DataFrame]) -> int:\n return len(data_df)", "def count_statements(self):\n query = read_query('content exploration/count_statements')\n response = self._submit_query(query)\n return response[0]['count']['value']", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def _count_parties(data_set): #DEMOCRATS, THEN REPUBLICANS\r\n reps = 0\r\n dems = 0\r\n for data_point in data_set:\r\n if data_point.dat_party == \"R\": reps+=1\r\n if data_point.dat_party == \"D\": dems+=1\r\n\r\n return (dems, reps)", "def n_replicates(self):\n return self.data.n_replicates.values" ]
[ "0.6843366", "0.67893463", "0.6728229", "0.64195675", "0.62901825", "0.6255575", "0.6206059", "0.6124002", "0.61171126", "0.6110871", "0.6106557", "0.6096237", "0.607423", "0.60537356", "0.6051849", "0.60323846", "0.6030147", "0.60260516", "0.5990028", "0.59870434", "0.5966542", "0.59301263", "0.5929231", "0.59264326", "0.5924072", "0.59232503", "0.59170884", "0.5909757", "0.59087133", "0.58707654", "0.58639145", "0.5838329", "0.5830604", "0.5825748", "0.58236796", "0.58236796", "0.581947", "0.5813168", "0.5812991", "0.581176", "0.5810716", "0.579176", "0.5789328", "0.57783586", "0.5777259", "0.57734346", "0.5768192", "0.57652175", "0.5763228", "0.5762", "0.5760115", "0.5749804", "0.5736761", "0.57253957", "0.57211304", "0.57150424", "0.56988484", "0.5678725", "0.5677924", "0.56739366", "0.5666858", "0.56617445", "0.56469136", "0.56468403", "0.5643477", "0.56390226", "0.5639004", "0.5639004", "0.5639004", "0.5639004", "0.5638628", "0.5637878", "0.5631459", "0.56283975", "0.5628013", "0.56242687", "0.562272", "0.5621303", "0.56212354", "0.56204736", "0.56096065", "0.5594964", "0.5594964", "0.55898833", "0.5582909", "0.5580671", "0.55763626", "0.5574547", "0.55739397", "0.5566914", "0.55475193", "0.55459744", "0.5543035", "0.55397576", "0.55382884", "0.5537173", "0.5536857", "0.55340225", "0.5530099", "0.552877" ]
0.55716085
89
Name of the dataset.
def get_name(self): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataset_name(self):\n return self.dataset.name", "def dataset_name(self):\n return self._dataset_name", "def get_dataset_name(self):\n raise NotImplementedError", "def get_dataset_name(self):\n return self.dataset_name", "def __get_dataset_name(self):\n d = gdal.Open(self.fname)\n # Get band metadata\n b = d.GetRasterBand(1)\n md = b.GetMetadata()\n\n if 'data_var' in md:\n return md['data_var']\n else:\n fnames = d.GetFileList()\n if len(fnames) > 2:\n d = gdal.Open(fnames[1])\n # Get band metadata\n b = d.GetRasterBand(1)\n md = b.GetMetadata()\n if 'data_var' in md:\n return md['data_var']\n else:\n return 'data'\n else:\n return 'data'", "def _dataset_name(self):\n return f'Libri{self.task}Mix'", "def construct_dataset_name(self, *args):\n raise NotImplementedError", "def get_archive_name(self) -> str:\n # TODO: Support for user-defined or metadata-based (e.g. title) name\n return \"dataset\"", "def get_dataset_name():\n return os.getenv(\"AICROWD_DATASET_NAME\", \"cars3d\")", "def name(self):\n return 'data_extraction_for_' + '_'.join(self.names).lower()", "def dataset_id(self) -> str:\n return self._dataset_id", "def dataset_id(self) -> str:\n return pulumi.get(self, \"dataset_id\")", "def dataset_id(self) -> str:\n return pulumi.get(self, \"dataset_id\")", "def dataset_id(self) -> str:\n return pulumi.get(self, \"dataset_id\")", "def get_data_name(self, idx):\n name = None\n if type(idx) is int:\n n = self.data_count()\n assert 0 <= idx <= n - 1, \"Bad data index\"\n name = self.data[idx].name\n return(name)", "def dataset_names(self) -> List[str]:\n return list(self._datasets.keys())", "def data_name(self):\n return \"Bispectrum\"", "def get_datasetID(self):\n\t\treturn self.dsDoc['about']['datasetID']", "def data_filename(self) -> str: # type: ignore[return-value]\n return os.path.abspath(self.name) # type: ignore", "def name(self) -> str:\n return self.data['name']", "def dataName(self, role):\n return None", "def name(self):\n return self._data.get('name')", "def name(self):\n return self._data.get('name')", "def name(self):\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self) -> str:\n return self._id_data.get(\"name\", \"\")", "def unique_dataset_name(prefix: str = \"selenium-dataset\"):\n return f'{prefix}-{uuid.uuid4().hex[:8]}'", "def name(self):\n raise NotImplementedError # pragma: no cover", "def name(self):\r\n return self._data['name']", "def getCoaddDatasetName(self):\n warpType = self.config.warpType\n suffix = \"\" if warpType == \"direct\" else warpType[0].upper() + warpType[1:]\n return self.config.coaddName + \"Coadd\" + suffix", "def dataset_by_name(name):\n return _datasets[name.lower()]", "def get_datasetID(self):\n\t\treturn self.prDoc['inputs']['data'][0]['datasetID']", "def Name(self, default=None):\n return self.data.get('name', default)", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self) -> str:\n raise NotImplementedError", "def set_filename(self, name):\n self.ds_filename = name", "def data_center_name(self) -> str:\n return pulumi.get(self, \"data_center_name\")", "def name(self):\r\n pass", "def get_dataset(name):\n if name == 'cityscapes':\n return Cityscapes", "def name(self):\n return self.NAME", "def name(self):\n raise NotImplementedError()", "def name(self):\n raise NotImplementedError()", "def get_dataset_names(self, include = ['*'], exclude = []):\n \n raise NotImplementedError('get_dataset_names')", "def name (self):\n return self._name", "def get_filename(self):\n return self.ds_filename", "def name(self):\n return self._filename", "def name(self):\n\t\treturn self._name", "def name(self):\n\t\treturn self._name", "def name(self):\r\n return self.data[\"name\"]", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def name(self) -> str:\n return self.__name", "def dataset_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dataset_id\")", "def name(self) -> str:\n raise NotImplementedError()", "def get_name(self):\n pass", "def get_name(self):\n pass", "def get_dataset(self):\n return", "def name(self, the_name):\n if (len(the_name) < TempDataset.MIN_LEN\n or len(the_name) > TempDataset.MAX_LEN):\n raise ValueError\n self._name = the_name", "def name(self):\n return None", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name", "def name(self):\n return self._name" ]
[ "0.919469", "0.8976905", "0.87083614", "0.8686752", "0.7850114", "0.7471211", "0.7451666", "0.74262846", "0.7378664", "0.73779565", "0.7199527", "0.70992815", "0.70992815", "0.70992815", "0.6962095", "0.6886322", "0.6757635", "0.6693697", "0.667641", "0.6673114", "0.6660524", "0.6659455", "0.6659455", "0.66143054", "0.6558511", "0.6558511", "0.6558511", "0.6558511", "0.6557192", "0.6557192", "0.6557192", "0.6557192", "0.6557192", "0.6557192", "0.6557192", "0.6557192", "0.6557192", "0.6557192", "0.65350753", "0.65318155", "0.6513573", "0.65109646", "0.6504513", "0.6493975", "0.64806944", "0.6479034", "0.6477732", "0.6477732", "0.6477732", "0.6477732", "0.6477732", "0.6477732", "0.6477732", "0.6477732", "0.6477732", "0.6477732", "0.6469988", "0.6446719", "0.64436626", "0.64417636", "0.64365053", "0.64120686", "0.64061016", "0.64061016", "0.6403698", "0.6403677", "0.6396787", "0.6395468", "0.6389454", "0.6389454", "0.6386928", "0.6381647", "0.6381647", "0.6381647", "0.6381647", "0.6381647", "0.6381647", "0.6381647", "0.6377967", "0.63692284", "0.6368052", "0.6368052", "0.6364419", "0.63530654", "0.6351876", "0.63485074", "0.63485074", "0.63485074", "0.63485074", "0.63485074", "0.63485074", "0.63485074", "0.63485074", "0.63485074", "0.63485074", "0.63485074", "0.63485074", "0.63485074", "0.63485074", "0.63485074", "0.63485074" ]
0.0
-1
Not to be used! Check get_frames() instead.
def __getitem__(self, item): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frames():\n raise RuntimeError('Must be implemented by subclasses.')", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def dispatch_frame(self, frame):", "def frame(self):", "def onFrameUpdated(self):\n pass", "def __init__(self, frames):\n self._frames = frames", "def __init__(self, frames):\n self._frames = frames", "def __init__(self, frames):\n self._frames = frames", "def __init__(self, frames):\n self._frames = frames", "def get_frame(self):\n\t\tframe = None\n\t\twhile not frame:", "def process_frame(self, frame):\n\t\treturn frame", "def _get_frame(self, key):\n pass", "def _getframe(depth=None): # real signature unknown; restored from __doc__\n pass", "def get_frame(self, ind):\n pass", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def process_frame():\n return \"OK\"", "def onMessageFrameEnd(self):", "def onMessageFrameBegin(self, length):", "def _current_frames(): # real signature unknown; restored from __doc__\n return {}", "def hasCurrentFrame(self):\n if self.currentFrame == []:\n return False\n return True", "def parse_frames(self):\r\n done = False\r\n self._ip = 13 + self.ct_len\r\n while not done:\r\n code = self.next_byte()\r\n if not code:\r\n raise ValueError(\"Unexcepted end of file\")\r\n if code == b\"\\x2C\":\r\n self.parse_frame()\r\n elif code == b\"\\x21\":\r\n code = self.next_byte()\r\n if code == b\"\\xF9\":\r\n self.g_ext.append(self.parse_gce())\r\n elif code == b\"\\xFF\":\r\n self.next_byte()\r\n app = self.next_bytes(11)\r\n if app == b\"NETSCAPE2.0\":\r\n self.parse_ne()\r\n else:\r\n self.skip()\r\n elif code == b\"\\xFE\":\r\n self.comments.append(self.parse_ce())\r\n else:\r\n self.next_bytes(13)\r\n self.skip()\r\n elif code == b\"\\x3B\":\r\n done = True", "def __init__(self, frame):\n super().__init__(frame)\n self.frames = None\n self.delay = None", "def init_all_frames(self) -> bool:\n raise NotImplementedError", "def inspect_frame(self, frame):\n while frame:\n self.inspect_single_frame(frame)\n frame = frame.f_back", "def frames(self) -> Optional[Tuple[int, ...]]:\n return self._frames", "def available_frames(self):\n if self._pipeline:\n #return [getattr(frame[0], \"name\", frame[0]) for frame in self._pipeline]\n return [step.frame if isinstance(step.frame, str) else step.frame.name for step in self._pipeline ]\n else:\n return None", "def frame_available(self):\n return type(self._frame) != type(None)", "def change_frame(self, frame):\r\n pass", "def _request_frame(self):\n self._send_command('GET_FRAME')", "def frames(self):\n return self._frames", "def testAnimationBFrames(self):\n try:\n bFramesValue = int(self.bFrames)\n except ValueError:\n self.assertNotEqual(\n self.bFrames,\n self.config.bFrames\n )\n self.assertEqual(\n None,\n self.config.bFrames\n )\n else:\n self.assertEqual(\n bFramesValue,\n self.config.bFrames\n )", "def frames(self) -> Set[int]:\n return self._frames", "def get_frame(self):\n return self.frames.get()", "def isGoodFrame(self, frame):\n if max(frame)<0.1:\n return False\n return True", "def __init__(self, frames=[], loop = 0):\n\t\t\n\t\tif isinstance(frames, (list, tuple)):\n\t\t\tself.frames = frames\n\t\telse:\n\t\t\traise TypeError\n\t\t\t\n\t\tif not loop:\n\t\t\tself.loop = 0\n\t\telse:\n\t\t\tself.loop = 1\n\t\t\t\n\t\tself.present_frame = None", "def count_frames():\n frames = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n frame_count = []\n for f_r in frames:\n frame_count.append(f_r)\n return len(frame_count)", "def _test_for_playback_frame_errors(self):\n steps = self.program.steps\n for s in steps:\n for index, pb_frame in enumerate(s.playback_frames):\n self.assertEqual(pb_frame.error, 0,\n f\"Step {s.name} frame number {index} has a simulation error: {pb_frame.error_string}\")", "def _process(self, frame, **kwargs):\n raise NotImplementedError()", "def __init__(self, frame):\n self.frame = frame", "def EventFrame (self):\n pass", "def get(self):\n return self.frames", "def check_queues(self) -> int:\r\n\r\n nframes = 0\r\n\r\n for queue in self.receive_queues:\r\n if not queue.empty():\r\n nframes += 1\r\n frame, img_bytes = queue.get_nowait()\r\n\r\n if frame < self.next_frame:\r\n raise ValueError('received frame we already processed! '\r\n + f'got {frame}, at {self.next_frame}')\r\n if frame in self.ooo_frames:\r\n raise ValueError(f'received duplicate frame: {frame}')\r\n\r\n self.ooo_frames[frame] = img_bytes\r\n if len(self.ooo_frames) > self.max_ooo_frames:\r\n raise ValueError('exceeded maximum frame cache (now have '\r\n + f'{len(self.ooo_frames)} frames waiting)')\r\n\r\n return nframes", "def captured_frames(self):\n return self._captured_frames", "def CHECK_transition_frames(self):\n tr_frames = []\n for i, frame in enumerate(self.y):\n if not np.all(frame == frame[0]):\n tr_frames.append(frame)\n\n print('there are ', len(tr_frames), ' frames containing a transition')\n return tr_frames", "def frames(self):\n return list(self._frames)", "def get_frame(self, frame):\n return self.frames[frame]", "def num_frames(self):\n return len(self.video)", "def getFrames():\n\t\tfor cam in Camera.CAMERAS: cam.getFrame()", "def get_frame_list(self):\r\n\r\n logger.debug('Executing frame extraction')\r\n\r\n frames_loaded = False\r\n\r\n # Try to load YAML file with frame list\r\n if os.path.exists(self.frames_file_path):\r\n\r\n print 'Loading YAML file with frame list'\r\n logger.debug('Loading YAML file with frame list')\r\n\r\n f_list = utils.load_YAML_file(self.frames_file_path)\r\n\r\n if f_list:\r\n self.frame_list = f_list\r\n\r\n print 'YAML file with frame_list loaded'\r\n logger.debug('YAML file with frame_list loaded')\r\n\r\n frames_loaded = True\r\n\r\n if not frames_loaded:\r\n\r\n print '\\n\\n### Frame extraction ###\\n'\r\n logger.debug('\\n\\n### Frame extraction ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n if not (os.path.exists(self.frames_path)):\r\n os.makedirs(self.frames_path)\r\n\r\n # Counter for all frames\r\n frame_counter = 0\r\n\r\n # Value of frame_counter for last analyzed frame\r\n last_anal_frame = 0\r\n\r\n # Open video file\r\n capture = cv2.VideoCapture(self.resource_path)\r\n\r\n self.frame_list = []\r\n\r\n # Save parameters for this video\r\n param_dict = {}\r\n\r\n if capture is None or not capture.isOpened():\r\n\r\n error = 'Error in opening video file'\r\n\r\n print error\r\n logger.debug(error)\r\n\r\n return\r\n\r\n else:\r\n\r\n video_fps = capture.get(cv2.cv.CV_CAP_PROP_FPS)\r\n\r\n param_dict[c.VIDEO_FPS_KEY] = video_fps\r\n\r\n # Original number of frames\r\n tot_frames = capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)\r\n\r\n param_dict[c.VIDEO_TOT_FRAMES_KEY] = tot_frames\r\n\r\n self.fps = video_fps\r\n\r\n self.video_frames = float(tot_frames)\r\n\r\n # Saved frames\r\n saved_frames = 0\r\n\r\n while True:\r\n\r\n # Read frame\r\n ret, frame = capture.read()\r\n\r\n # If no frame is read, abort\r\n if not ret:\r\n break\r\n\r\n used_fps = c.USED_FPS\r\n use_or_fps = c.USE_ORIGINAL_FPS\r\n use_or_res = c.USE_ORIGINAL_RES\r\n used_res_scale_factor = c.USED_RES_SCALE_FACTOR\r\n\r\n if self.params is not None:\r\n\r\n if c.USED_FPS_KEY in self.params:\r\n used_fps = self.params[c.USED_FPS_KEY]\r\n\r\n if c.USE_ORIGINAL_FPS_KEY in self.params:\r\n use_or_fps = self.params[c.USE_ORIGINAL_FPS_KEY]\r\n\r\n if c.USE_ORIGINAL_RES_KEY in self.params:\r\n use_or_res = self.params[c.USE_ORIGINAL_RES_KEY]\r\n\r\n if c.USED_RES_SCALE_FACTOR_KEY in self.params:\r\n used_res_scale_factor = self.params[\r\n c.USED_RES_SCALE_FACTOR_KEY]\r\n\r\n # Next frame to be analyzed\r\n next_frame = last_anal_frame + (video_fps / used_fps) - 1\r\n\r\n if use_or_fps or (frame_counter > next_frame):\r\n\r\n # Frame position in video in milliseconds\r\n elapsed_ms = capture.get(cv2.cv.CV_CAP_PROP_POS_MSEC)\r\n\r\n # print 'elapsed video s =', elapsed_video_s\r\n\r\n fr_name = '%07d.png' % frame_counter\r\n\r\n frame_path = os.path.join(self.frames_path, fr_name)\r\n\r\n # Resize frame\r\n if not use_or_res:\r\n fx = used_res_scale_factor\r\n\r\n fy = used_res_scale_factor\r\n\r\n interp = cv2.INTER_AREA\r\n\r\n frame = cv2.resize(src=frame, dsize=(0, 0),\r\n fx=fx, fy=fy,\r\n interpolation=interp)\r\n\r\n cv2.imwrite(frame_path, frame,\r\n [cv.CV_IMWRITE_PNG_COMPRESSION, 0])\r\n\r\n frame_dict = {c.SAVED_FRAME_NAME_KEY: fr_name,\r\n c.ELAPSED_VIDEO_TIME_KEY: int(elapsed_ms)}\r\n\r\n self.frame_list.append(frame_dict)\r\n\r\n last_anal_frame = frame_counter\r\n\r\n saved_frames += 1\r\n\r\n frame_counter += 1\r\n\r\n self.progress = 100 * (frame_counter / self.video_frames)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n del capture\r\n\r\n self.saved_frames = float(saved_frames)\r\n\r\n param_dict[c.VIDEO_SAVED_FRAMES_KEY] = self.saved_frames\r\n\r\n # Save frame list in YAML file\r\n utils.save_YAML_file(self.frames_file_path, self.frame_list)\r\n\r\n # Save video parameters in YAML file\r\n\r\n utils.save_YAML_file(self.params_file_path, param_dict)\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for frame extraction:', str(time_in_seconds), 's\\n'\r\n logger.debug(\r\n 'Time for frame extraction:', str(time_in_seconds), 's\\n')\r\n\r\n self.anal_times[c.FRAME_EXTRACTION_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)", "def half_frame(self) -> None:\n pass", "def half_frame(self) -> None:\n pass", "def frame_idx(self) -> int:\n pass", "def dummy_videoframe_handler(frame, userdata=None):\n sys.stdout.write('Got frame %d\\r' % userdata.count())\n sys.stdout.flush()\n userdata.increment()", "def run_frame(self, ti, img):\n pass", "def get_frame(self):\n return self.frame", "def store_frames(self):\n self.stored_frames = (self.active_call, self.active_frame)", "def get_still(self):\n _, frame = self.client.read()\n return frame", "def _next_frame(self):\n ret, self.frame = self.capture.read()\n if not ret:\n self.logger.warning('Failed to read frame')\n if self.show_video:\n cv2.imshow('frame', self.frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n exit(0)\n return ret", "def isframe(object):\r\n return isinstance(object, types.FrameType)", "def __init__(self):\n Frame.__init__(self, spec.FRAME_HEARTBEAT, 0)", "def on_delivered(self, frame):\n pass", "def exitFrame(self):\n #check whether any grabbed frame is retrievable\n #The getter may retrieve and cacha the frame.\n if self.frame is None:\n self._enteredFrame = False\n return\n #update the FPS estimate and related variables.\n if self._framesElapsed == 0:\n self._startTime = time.time()\n else:\n timeElapsed = time.time() - self._startTime\n self._fpsEstimate = self._frameElapsed / timeElapsed", "def _select_frames(self, frames):\n converted_frames = list()\n # Ignore some frame at begin and end.\n for i in np.linspace(0, self.video_size, self.frame_num + 2)[1:self.frame_num + 1]:\n img = frames[int(i)]\n img = img.resize((224, 224), Image.BILINEAR)\n frame_data = np.array(img)\n converted_frames.append(frame_data)\n return converted_frames", "def get_frames(self):\n\n log(\"Getting frames for {} at {}\".format(self._location, self._t0))\n fn_get = lambda time_str: self.get_wximg(time_str)\n pool0 = multiprocessing.dummy.Pool(self._frames)\n raw = pool0.map(fn_get, self.get_time_strs())\n wximages = [x for x in raw if x is not None]\n if not wximages:\n return None\n pool1 = multiprocessing.dummy.Pool(len(wximages))\n background = self.get_background()\n if background is None:\n return None\n fn_composite = lambda x: self._pilimg.alpha_composite(background, x)\n composites = pool1.map(fn_composite, wximages)\n legend = self.get_legend()\n if legend is None:\n return None\n loop_frames = pool1.map(lambda _: legend.copy(), composites)\n fn_paste = lambda x: x[0].paste(x[1], (0, 0))\n pool1.map(fn_paste, zip(loop_frames, composites))\n return loop_frames", "def get_frame(self, i: int):\r\n try:\r\n return self.frames[i]\r\n except IndexError:\r\n return None", "def stream_frames(video_capture):", "def getouterframes(frame, context=1):\r\n framelist = []\r\n while frame:\r\n framelist.append((frame,) + getframeinfo(frame, context))\r\n frame = frame.f_back\r\n return framelist", "def get_frames(self):\n if not self.video:\n return []\n # We cannot validate shape on construction as that happens inside graph\n # mode as we construct from a tf.data.Dataset, so we validate here.\n self.video[0].validate_shape_and_dtype()\n return self.video", "def dropped_frames(self):\n # type: () -> int\n return self._dropped_frames", "def _read_frames(self):\n cap = self._read_file()\n\n frame_list = []\n ret_list = []\n\n while True:\n ret, frame = cap.read()\n if ret:\n frame_list.append(np.array(frame))\n ret_list.append(ret)\n else:\n break\n if self.mode==\"np\":\n frame_list = np.array(frame_list)\n return frame_list", "def captureNextFrame(self):\r\n mainls = []\r\n\r\n\r\n ret, readFrame = self.capture.read()\r\n\r\n if (ret == True):\r\n self.currentFrame = cv2.cvtColor(readFrame, cv2.COLOR_BGR2RGB)\r\n self.faceDetection(self.currentFrame)\r\n self.currentFrame = self.bbFrame", "def get_frames(self,std_id, frame_ids, anno=None):\n raise", "def resent_frames(self):\n try:\n for k,f in self.frames.items():\n if time.time() - f['time'] > 0.500:\n self.log.warning(\"resend frame %d:%s\" % (k, f['msg']))\n self.__send_frame(k, f['msg'])\n except RuntimeError:\n pass # dictionary changed size during iteration", "def get_max_frames(self):\n return 8", "def switch_frames(self, new_frameName):\n\n if new_frameName in self.__frame_names:\n\n #Give rise to a new frame\n\n if new_frameName == \"image_frame\":\n image_frame= ImageFrame.Image_Frame(self.master,\n self.width,\n self.height)\n image_frame.place(x = 0, y = 0)\n \n\n elif new_frameName == \"audio_frame\":\n audio_frame = AudioFrame.Audio_Frame(self.master,\n self.width,\n self.height)\n audio_frame.place(x = 0, y = 0)\n\n elif new_frameName == \"doc_frame\":\n not_yet = notReadyYetFrame.Not_Ready_Yet_Frame(self.master,\n self.width,\n self.height)\n not_yet.place(x = 0, y = 0)\n\n else:\n not_yet = notReadyYetFrame.Not_Ready_Yet_Frame(self.master,\n self.width,\n self.height)\n not_yet.place(x = 0, y = 0)\n\n #Destroy the current frame\n self.place_forget()\n self.destroy()", "def frames(self):\n if self.integration is None:\n return None\n return self.integration.frames", "def is_full_frame(self):\n return self['application'] == 'ap3_250_fullframe' or self['application'] == 'ap9_250_fullframe_mindead'", "def remaining_frames(self):\n return self.sound.nframes - self.current_frame", "def prepareFrameCache(self, frame, cacheType): # real signature unknown; restored from __doc__\n pass", "def extract_frames():\n vc = cv2.VideoCapture(INPUT_FILE)\n c=1\n\n if vc.isOpened():\n rval , frame = vc.read()\n else:\n rval, frame = False, False\n\n while rval:\n # cv2.imwrite((MODIFIED_FRAMES_DIR + 'img' + str(c) + '.jpg'),frame)\n cv2.imwrite((MODIFIED_FRAMES_DIR + str(c) + '.jpg'),frame)\n c = c + 1\n cv2.waitKey(1)\n rval, frame = vc.read()\n vc.release()\n print(\"All frames extracted successfully...\")", "def get_frame(self, frame: int) -> BaseImage:\n return self.sequence[frame]", "def get_all_frames(self) -> List[str]:\n frames = self.allFramesAsString().split(\"\\n\")\n frames.remove(\"\")\n return frames", "def validate_timecode_input(self):\n frame = self.file_buffer.get_image(self.frame_offset)\n try:\n test = frame.shape\n except Exception as e:\n print(e)\n return False\n else:\n return True\n finally:\n test = None\n frame = None", "def validate_frame(frame):\n if frame not in TIME_FRAMES:\n raise ValueError(\"Time frame must be one of {0}, not '{1}'\"\n .format('|'.join(TIME_FRAMES), frame))", "def _postprocess_frames(self, frames):\n num_frames = frames.shape[0]\n if num_frames > 0:\n first_frame = self._frames[0]\n pos_start = self.get_frame_root_pos(first_frame)\n\n for f in range(num_frames):\n curr_frame = frames[f]\n\n root_pos = self.get_frame_root_pos(curr_frame)\n root_pos[0] -= pos_start[0]\n root_pos[1] -= pos_start[1]\n\n root_rot = self.get_frame_root_rot(curr_frame)\n root_rot = pose3d.QuaternionNormalize(root_rot)\n root_rot = motion_util.standardize_quaternion(root_rot)\n\n self.set_frame_root_pos(root_pos, curr_frame)\n self.set_frame_root_rot(root_rot, curr_frame)\n\n return", "def frame_forward(self):\n if self.playMode == FFMPEG:\n self.ffmpegTimerOut()", "def _collectFrames(self):\n self._sources = sources = self._resolveFramePaths(self._info['sources'])\n self.logger.debug('Sources: %r', sources)\n\n frameDict = {'byFrame': {}, 'byAxes': {}, 'axesAllowed': True}\n numChecked = 0\n\n self._associatedImages = {}\n self._sourcePaths = {}\n self._channels = self._info.get('channels') or []\n\n absLargeImagePath = os.path.abspath(self._largeImagePath)\n computedWidth = computedHeight = 0\n self.tileWidth = self._info.get('tileWidth')\n self.tileHeight = self._info.get('tileHeight')\n self._nativeMagnification = {\n 'mm_x': self._info.get('scale', {}).get('mm_x') or None,\n 'mm_y': self._info.get('scale', {}).get('mm_y') or None,\n 'magnification': self._info.get('scale', {}).get('magnification') or None,\n }\n # Walk through the sources, opening at least the first two, and\n # construct a frame list. Each frame is a list of sources that affect\n # it along with the frame number from that source.\n lastSource = None\n for sourceIdx, source in enumerate(sources):\n path = source['path']\n if os.path.abspath(path) == absLargeImagePath:\n msg = 'Multi source specification is self-referential'\n raise TileSourceError(msg)\n similar = False\n if (lastSource and source['path'] == lastSource['path'] and\n source.get('params') == lastSource.get('params')):\n similar = True\n if not similar and (numChecked < 2 or not self._info.get('uniformSources')):\n # need kwargs of frame, style?\n ts = self._openSource(source)\n self.tileWidth = self.tileWidth or ts.tileWidth\n self.tileHeight = self.tileHeight or ts.tileHeight\n if not numChecked:\n tsMag = ts.getNativeMagnification()\n for key in self._nativeMagnification:\n self._nativeMagnification[key] = (\n self._nativeMagnification[key] or tsMag.get(key))\n numChecked += 1\n tsMeta = ts.getMetadata()\n if 'bands' in tsMeta:\n if not hasattr(self, '_bands'):\n self._bands = {}\n self._bands.update(tsMeta['bands'])\n lastSource = source\n bbox = self._sourceBoundingBox(source, tsMeta['sizeX'], tsMeta['sizeY'])\n computedWidth = max(computedWidth, int(math.ceil(bbox['right'])))\n computedHeight = max(computedHeight, int(math.ceil(bbox['bottom'])))\n # Record this path\n if path not in self._sourcePaths:\n self._sourcePaths[path] = {\n 'frames': set(),\n 'sourcenum': set(),\n }\n # collect associated images\n for basekey in ts.getAssociatedImagesList():\n key = basekey\n keyidx = 0\n while key in self._associatedImages:\n keyidx += 1\n key = '%s-%d' % (basekey, keyidx)\n self._associatedImages[key] = {\n 'sourcenum': sourceIdx,\n 'key': key,\n }\n source['metadata'] = tsMeta\n source['bbox'] = bbox\n self._sourcePaths[path]['sourcenum'].add(sourceIdx)\n # process metadata to determine what frames are used, etc.\n self._addSourceToFrames(tsMeta, source, sourceIdx, frameDict)\n # Check frameDict and create frame record\n self._frames = self._frameDictToFrames(frameDict)\n self.tileWidth = min(max(self.tileWidth, self._minTileSize), self._maxTileSize)\n self.tileHeight = min(max(self.tileHeight, self._minTileSize), self._maxTileSize)\n self.sizeX = self._info.get('width') or computedWidth\n self.sizeY = self._info.get('height') or computedHeight\n self.levels = int(max(1, math.ceil(math.log(\n max(self.sizeX / self.tileWidth, self.sizeY / self.tileHeight)) / math.log(2)) + 1))", "def get_frame_clock(self): # real signature unknown; restored from __doc__\n pass", "def _validate_frames(frames: Sequence[int]) -> None:\n if not frames:\n raise ValueError('`frames` cannot be empty.')\n\n non_positive_frame_numbers = tuple(\n frame_number for frame_number in frames if frame_number < 1)\n if non_positive_frame_numbers:\n raise ValueError('Frame numbers must be positive. Found violations: '\n f'{non_positive_frame_numbers!r}')\n\n # Python uses Timsort which is `O(n)` in the best case, i.e., the overhead\n # is negligible assuming most inputs meet this specification. If the\n # specification is violated, `n` is small in case of DICOMs (few hundreds\n # in the worst case?). Here, the simplicity of the implementation outweighs\n # the (roughly constant time) overhead.\n if tuple(sorted(frames)) != tuple(frames):\n raise ValueError('Frame numbers must be in ascending order. Actual '\n f'order: {frames!r}')", "def clean_frames(self):\n for fn in os.listdir(self.frame_directory):\n if fn.endswith(\".png\") and fn in self.frame_fns:\n os.remove(fn)", "def process_frame(self) -> bool:\r\n if self.next_frame not in self.ooo_frames:\r\n return False\r\n\r\n img_bytes = self.ooo_frames.pop(self.next_frame)\r\n\r\n for kb_start in range(0, len(img_bytes), self.block_size):\r\n self.ffmpeg_proc.stdin.write(\r\n img_bytes[kb_start:kb_start + self.block_size])\r\n\r\n self.next_frame += 1\r\n return True", "def getinnerframes(tb, context=1):\r\n framelist = []\r\n while tb:\r\n framelist.append((tb.tb_frame,) + getframeinfo(tb, context))\r\n tb = tb.tb_next\r\n return framelist" ]
[ "0.7640561", "0.69378716", "0.69378716", "0.69378716", "0.69378716", "0.69378716", "0.69195557", "0.67941016", "0.67867607", "0.6755174", "0.6755174", "0.6755174", "0.6755174", "0.6722964", "0.6581867", "0.6483949", "0.6414792", "0.6398254", "0.63598907", "0.63598907", "0.63598907", "0.63598907", "0.63598907", "0.63598907", "0.6322751", "0.6307869", "0.6305777", "0.6301942", "0.6261233", "0.6187161", "0.6180312", "0.61780745", "0.6155257", "0.6151303", "0.61455387", "0.6130367", "0.6128891", "0.61150837", "0.6107453", "0.60829455", "0.60518783", "0.6021739", "0.6018323", "0.59724", "0.59704894", "0.5941176", "0.59248114", "0.5919355", "0.5907409", "0.5904859", "0.59027016", "0.58991843", "0.58969307", "0.5894568", "0.5883093", "0.58597237", "0.5810989", "0.5802099", "0.579829", "0.579829", "0.5767862", "0.5761386", "0.5749332", "0.57490814", "0.5748556", "0.5739464", "0.57277495", "0.57262045", "0.5718457", "0.570355", "0.5691906", "0.56888324", "0.5684109", "0.5669693", "0.5645044", "0.56390214", "0.563588", "0.5632126", "0.5632016", "0.5627276", "0.562713", "0.5624126", "0.56237334", "0.5618805", "0.5597357", "0.55844337", "0.5560488", "0.5560268", "0.5558785", "0.555625", "0.55481374", "0.5544944", "0.55415845", "0.55345964", "0.55275005", "0.5521124", "0.54948336", "0.5484428", "0.5476553", "0.54700446", "0.5463382" ]
0.0
-1
Return information about a particular squences.
def get_study_info(self,std_id): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSongTextInfo():\n sids = []\n documents = []\n sFile = open('../txt/two__Lastfm_song_Docs.txt')\n lines = sFile.readlines()\n index = 0\n for line in lines:\n line.strip('\\n')\n line.strip('\\r\\n')\n items = line.split('>>')\n sid = int(items[0])\n text = items[1]\n documents.append(text)\n sids.append(sid)\n sFile.close()\n print 'len = ',len(sids)\n print 'len = ',len(documents)\n return sids,documents", "def info(self):\n self.update_info()\n print('Number of electrodes: ' + str(self.n_elecs))\n print('Recording time in seconds: ' + str(self.dur))\n print('Sample Rate in Hz: '+ str(self.sample_rate))\n print('Number of sessions: ' + str(self.n_sessions))\n print('Date created: ' + str(self.date_created))\n print('Meta data: ' + str(self.meta))", "def getSentenceInfo(sentence):\n\tpass", "def details(self):\n \n sparql_results = self.query (\"\"\"\n select distinct * where {\n\n BIND (<%s> as ?rc)\n \n ?rc olac:speaker ?participant .\n ?participant austalk:id ?pid .\n ?participant austalk:recording_site ?site .\n ?site rdfs:label ?sitelabel .\n \n ?rc austalk:prototype ?component .\n ?component austalk:shortname ?shortname .\n ?rc dc:isPartOf ?rs .\n ?rs austalk:prototype ?session .\n ?session austalk:id ?sessionid .\n \n ?component austalk:name ?name . \n\\\n optional { ?rc austalk:audiorating ?arating .}\n optional { ?rc austalk:videorating ?vrating .}\n optional { ?rc austalk:comment ?comment .}\n }\"\"\" % (self.identifier, ))\n \n # we expect one binding\n bindings = sparql_results[\"results\"][\"bindings\"]\n if len(bindings) == 1:\n bindings = bindings[0]\n self.participantId = bindings['pid']['value']\n self.prototype = bindings['component']['value']\n self.name = bindings['name']['value']\n self.componentId = bindings['shortname']['value']\n self.site = bindings['sitelabel']['value']\n self.sessionId = bindings['sessionid']['value']\n if bindings.has_key('arating'):\n self.audiorating = bindings['arating']['value']", "def extract_information(preprocessed_sentences):\n parsed = list(map(lambda sentence: nlp(sentence), preprocessed_sentences))\n\n quantities = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'QUANTITY'), parsed))\n dates = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'DATE'), parsed))\n\n hurricane_name = eh.extract_frequent_regex_match(parsed, '[Hh]urricane ([A-Z][a-z]+)').most_common(1)[0][0]\n hurricane_category = eh.extract_frequent_regex_match(parsed, '[Cc]ategory ([0-9]+)').most_common(1)[0][0]\n\n tropical_storm_name = eh.extract_frequent_regex_match(parsed, '[Tt]ropical [Ss]torm ([A-Z][a-z]+)').most_common(1)[0][0]\n formation_date, middle_month = extract_storm_timeline(dates, hurricane_name)\n\n preperation_info = extract_preparation_information(parsed)\n prep_gpes = preperation_info[0].most_common(3)\n\n restore_info = extract_restoration_information(parsed)\n\n landfall_info = extract_landfall_information(parsed)\n\n wind_info = extract_wind_information(quantities)\n rain_info = extract_rain_information(quantities)\n size_info = extract_size_information(parsed)\n\n # formation_info = extract_formation_info(parsed)\n death_info = extract_death_damages_info(parsed)\n\n print(constants.HURRICANE_SENTENCE.format(hurricane_name, middle_month, hurricane_category))\n print(constants.LANDFALL_SENTENCE.format(hurricane_name, landfall_info[2], landfall_info[3], landfall_info[0], landfall_info[1]))\n print(constants.WIND_SENTENCE.format(wind_info[0], wind_info[1], wind_info[2]))\n print(constants.RAIN_SENTENCE.format(hurricane_name, rain_info[1], rain_info[0], rain_info[2]))\n print(constants.FORMATION_SENTENCE.format(formation_date, tropical_storm_name))\n print(constants.PREPARATION_SENTENCE.format(prep_gpes[0][0], prep_gpes[1][0], prep_gpes[2][0], preperation_info[1].\n most_common(1)[0][0]))\n print(constants.SIZE_SENTENCE.format(size_info[0], size_info[1]))", "def info(self) -> list[int]:", "def get_phrase(self):\n counter = 30 # give us a full 2 seconds of time to start\n\n with closing(self.Mic()) as mic:\n log.info('Recording phrase.')\n while True:\n frames = mic.next()\n\n score, has_disturbance = self.scorer.add(frames)\n\n if counter < 15 and has_disturbance:\n log.info('Recording more in phrase.')\n counter = 15\n else:\n counter -= 1\n\n if counter >= 1:\n yield frames", "def info(self):\n return self.__dict__[self.sid]", "def get_freq_details(diagnostics_dir, verbose=False):\n metafile_science = find_metadata_file(diagnostics_dir, 'mslist-scienceData*txt', verbose=False)\n if not metafile_science:\n return None, None, None\n\n with open(metafile_science, 'r') as mslist_file:\n lines = mslist_file.readlines()\n\n in_spw_block = False\n for line in lines:\n if in_spw_block:\n parts = line.split()\n chan_width = float(parts[10])*1000. # convert kHz to Hz\n cfreq = parts[12] #MHz\n nchan = parts[7]\n break\n else:\n in_spw_block = line.find('Frame') >= 0\n\n return chan_width, cfreq, nchan", "def read_ams(self):\n current = time.time()\n try:\n if current - self.start > 3:\n self.track_artist = not self.track_artist\n self.start = time.time()\n if self.track_artist:\n data = self.ams.artist\n if not self.track_artist:\n data = self.ams.title\n except (RuntimeError, UnicodeError):\n data = None\n\n if data:\n data = data[:16] + (data[16:] and '..')\n\n return data", "def infotodict(seqinfo):\n \n \"\"\"\n MCF Pilot Protocol acquired on Friday April 13th\n \n >>> hdc_look.py -s mfc001 -ss 1\n series_id sequence_name series_description dim1 dim2 dim3 dim4 TR TE is_derived is_motion_corrected\n 0 1-localizer *fl2d1 localizer 192 192 3 1 0.020 5.00 False False\n 1 2-pre_Neutral1_A>>P Resting 4X4X4 *epfid2d1_64 pre_Neutral1_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 2 3-pre_topup_A>>P *epse2d1_64 pre_topup_A>>P 64 64 140 1 2.400 38.00 False False\n 3 4-pre_topup_P>>A *epse2d1_64 pre_topup_P>>A 64 64 140 1 2.400 38.00 False False\n 4 5-Field_mapping 4X4X4 A>>P *fm2d2r Field_mapping 4X4X4 A>>P 64 64 35 1 0.488 4.92 False False\n 5 6-Field_mapping 4X4X4 A>>P *fm2d2r Field_mapping 4X4X4 A>>P 64 64 35 1 0.488 7.38 False False\n 6 7-pre+heat1_A>>P 4X4X4 *epfid2d1_64 pre+heat1_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 7 8-pre_Neutral2_A>>P Resting 4X4X4 *epfid2d1_64 pre_Neutral2_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 8 9-pre+heat2_A>>P 4X4X4 *epfid2d1_64 pre+heat2_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 9 10-MPRAGE_GRAPPA2 *tfl3d1_16ns MPRAGE_GRAPPA2 256 240 192 1 2.300 2.98 False False\n 10 11-post_Neutral3_A>>P Resting 4X4X4 *epfid2d1_64 post_Neutral3_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 11 12-post+heat3_A>>P 4X4X4 *epfid2d1_64 post+heat3_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 12 13-post_Neutral4_A>>P Resting 4X4X4 *epfid2d1_64 post_Neutral4_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 13 14-post+heat4_A>>P 4X4X4 *epfid2d1_64 post+heat4_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 14 15-post_topup_A>>P *epse2d1_64 post_topup_A>>P 64 64 140 1 2.400 38.00 False False\n 15 16-post_topup_P>>A *epse2d1_64 post_topup_P>>A 64 64 140 1 2.400 38.00 False False\n \n \"\"\"\n\n bids_prefix = 'sub-{subject}/{session}/'\n\n pre_neutral1_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral1_acq-epi_rec-fmap_bold.{item:01d}')\n pre_heat1_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat1_acq-epi_rec-fmap_bold.{item:01d}')\n pre_heat2_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat2_acq-epi_rec-fmap_bold.{item:01d}')\n pre_neutral2_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral2_acq-epi_rec-fmap_bold.{item:01d}')\n\n pre_neutral1_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral1_acq-epi_rec-topup_bold.{item:01d}')\n pre_heat1_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat1_acq-epi_rec-topup_bold.{item:01d}')\n pre_heat2_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat2_acq-epi_rec-topup_bold.{item:01d}')\n pre_neutral2_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral2_acq-epi_rec-topup_bold.{item:01d}')\n\n pre_topup_ap = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-preEpi_dir-ap_epi.{item:01d}')\n pre_topup_pa = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-preEpi_dir-pa_epi.{item:01d}')\n\n # The item was commented out for Phase Difference field maps. Conversion did not work correctly. I removed the item number to try to\n # isolate the problem.\n\n pre_fmap_magnitude1 = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-pre_magnitude1.{item:01d}')\n pre_fmap_phasediff = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-pre_phasediff.{item:01d}')\n\n t1w = create_key(bids_prefix + 'anat/sub-{subject}_{session}_T1w')\n\n post_neutral3_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral3_acq-epi_rec-fmap_bold.{item:01d}')\n post_heat3_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat3_acq-epi_rec-fmap_bold.{item:01d}')\n post_heat4_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat4_acq-epi_rec-fmap_bold.{item:01d}')\n post_neutral4_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral4_acq-epi_rec-fmap_bold.{item:01d}')\n\n post_neutral3_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral3_acq-epi_rec-topup_bold.{item:01d}')\n post_heat3_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat3_acq-epi_rec-topup_bold.{item:01d}')\n post_heat4_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat4_acq-epi_rec-topup_bold.{item:01d}')\n post_neutral4_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral4_acq-epi_rec-topup_bold.{item:01d}')\n\n post_topup_ap = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-postEpi_dir-ap_epi.{item:01d}')\n post_topup_pa = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-postEpi_dir-pa_epi.{item:01d}')\n\n # Create an empty dictionary called info for each key\n\n info = {pre_neutral1_ap_fmap: [],\n pre_heat1_ap_fmap: [],\n pre_heat2_ap_fmap: [],\n pre_neutral2_ap_fmap: [],\n\n pre_neutral1_ap_topup: [],\n pre_heat1_ap_topup: [],\n pre_heat2_ap_topup: [],\n pre_neutral2_ap_topup: [],\n\n pre_topup_ap: [],\n pre_topup_pa: [],\n\n pre_fmap_magnitude1: [],\n pre_fmap_phasediff: [],\n\n t1w: [],\n\n post_neutral3_ap_fmap: [],\n post_heat3_ap_fmap: [],\n post_heat4_ap_fmap: [],\n post_neutral4_ap_fmap: [],\n\n post_neutral3_ap_topup: [],\n post_heat3_ap_topup: [],\n post_heat4_ap_topup: [],\n post_neutral4_ap_topup: [],\n\n post_topup_ap: [],\n post_topup_pa: [],\n\n }\n\n # Loop over each sequence. Use if statements to determine which sequences should be linked to which key\n\n for idx, s in enumerate(seqinfo):\n\n if 'pre_Neutral1' in s.series_id:\n info[pre_neutral1_ap_fmap].append([s.series_id])\n info[pre_neutral1_ap_topup].append([s.series_id])\n\n if 'pre+heat1' in s.series_id:\n info[pre_heat1_ap_fmap].append([s.series_id])\n info[pre_heat1_ap_topup].append([s.series_id])\n\n if 'pre+heat2' in s.series_id:\n info[pre_heat2_ap_fmap].append([s.series_id])\n info[pre_heat2_ap_topup].append([s.series_id])\n\n if 'pre_Neutral2' in s.series_id:\n info[pre_neutral2_ap_fmap].append([s.series_id])\n info[pre_neutral2_ap_topup].append([s.series_id])\n\n if 'pre_topup_A>>P' in s.series_id:\n info[pre_topup_ap].append([s.series_id])\n\n if 'pre_topup_P>>A' in s.series_id:\n info[pre_topup_pa].append([s.series_id])\n\n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n (s.TE == 4.92)):\n info[pre_fmap_magnitude1].append([s.series_id])\n \n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n (s.TE == 7.38)):\n info[pre_fmap_phasediff].append([s.series_id])\n\n if 'MPRAGE_GRAPPA2' in s.series_id:\n info[t1w].append([s.series_id])\n\n if 'post_Neutral3' in s.series_id:\n info[post_neutral3_ap_fmap].append([s.series_id])\n info[post_neutral3_ap_topup].append([s.series_id])\n\n if 'post+heat3' in s.series_id:\n info[post_heat3_ap_fmap].append([s.series_id])\n info[post_heat3_ap_topup].append([s.series_id])\n\n if 'post+heat4' in s.series_id:\n info[post_heat4_ap_fmap].append([s.series_id])\n info[post_heat4_ap_topup].append([s.series_id])\n\n if 'post_Neutral4' in s.series_id:\n info[post_neutral4_ap_fmap].append([s.series_id])\n info[post_neutral4_ap_topup].append([s.series_id])\n\n if 'post_topup_A>>P' in s.series_id:\n info[post_topup_ap].append([s.series_id])\n\n if 'post_topup_P>>A' in s.series_id:\n info[post_topup_pa].append([s.series_id])\n\n return info", "def audiences(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"audiences\")", "async def get_information():\n return {\n \"message\": f\"You are the Genome Researcher. \"\n f\"You are meddling with Coronavirus Sars-Cov-2 RNA... \"\n f\"Try to change the RNA at your disposal to uncover as many medical breakthroughs as possible. \"\n f\"use GET /sample to see the original RNA strand \"\n f\"use COPY /sample to create exact duplicate of original to perform experiments. \"\n f\"Try to change the RNA at your disposal to uncover as many medical breakthroughs as possible. \"\n f\"Good luck researcher. \"\n f\"Our souls fates' depend on you! \"\n }", "def frequency():\n\n return make_simple_tsv_get_response(FREQ_FILE, 'frequency')", "def getLSASpace():\n sids,documents = getSongTextInfo()\n texts = [[word for word in document.lower().split()] for document in documents]\n dictionary = corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(text) for text in texts]\n tfidf = models.TfidfModel(corpus)\n corpus_tfidf = tfidf[corpus]\n lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=30)\n corpus_lsi = lsi[corpus_tfidf]\n songMap = {}\n index = 0\n for doc in corpus_lsi:\n sid = sids[index]\n rMap = {}\n for item in doc:\n wid = item[0]\n count = item[1]\n rMap[wid] = count\n songMap[sid] = rMap\n index += 1\n return songMap", "def getInfo():", "def dnasequence(self):\n return parseSingleFasta(open(self.dna_file).readlines())[1]", "def info():\n # -------- Task 1 -------------------------\n # Please complete the following information\n\n return {\"agent name\": \"?\", # COMPLETE HERE\n \"student name\": [\"?\"], # COMPLETE HERE\n \"student number\": [\"?\"]} # COMPLETE HERE", "def thesaurus(self, message):\n read_pointer = open('Thesaurus.txt')\n\n for line in read_pointer:\n split_line = line.split(':', 1)\n if split_line[0] == message:\n return split_line[1]", "def subject_info(intent, extra_info=[]):\n\n text = intent['inputTranscript'].lower()\n utterances = AS.load_file('sample_utterances.txt')\n\n # add \"book\" and \"books\" to every utterance\n for line in list(utterances):\n utterances.insert(0, line + \" book\")\n utterances.insert(0, line + \" books\")\n\n # tells how many characters needs to be dropped before the subject starts\n to_drop = 0\n\n for line in utterances:\n if text.startswith(line):\n to_drop = len(line)\n break\n\n # drops the characters and makes a list from the strings that are left\n text = text[to_drop:].strip()\n text_list = text.split(' ', len(text))\n\n subject_list = []\n keywords = [\"books\", \"book\", \"by\", \"published\", \"written\"]\n keyword = \"\"\n\n # Find out when the book name ends\n for word in text_list:\n if word not in keywords:\n subject_list.append(word)\n else:\n break\n\n subject = \" \".join(subject_list)\n\n # Get all the keywords in the middle, so they can be\n # all be dropped at once, eg written by, books by\n text_list = text_list[len(subject_list):]\n if text_list:\n word = text_list[0]\n while word in keywords:\n keyword += word + \" \"\n text_list = text_list[1:]\n if text_list:\n word = text_list[0]\n else:\n break\n\n # search for an author from the rest of the characters\n author_text = text[len(keyword):].strip()\n author = AS.search(author_text, False)\n if author is \"\":\n author = None\n\n # There might be old info in the extra_info (author), so \n # we need to clear it\n extra_info.clear()\n\n # add the author to extra info so it can be used in the Finna API call\n if author:\n extra_info += [\"author:\\\"\" + author + \"\\\"\"]\n elif intent['sessionAttributes'].get('author'):\n extra_info += [\n \"author:\\\"\" + intent['sessionAttributes']['author'] + \"\\\"\"\n ]\n\n # The Finna API call\n request = lookfor(term=subject, filter=extra_info)['json']\n\n return parse_subject(request, subject, {'author': author})", "def getSequencesFromSample(self, study_id, sample_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_sequences_for_fasta', [study_id, sample_id, results])\n seqs = {}\n for row in results:\n seqs[row[0]] = row[1]\n return seqs \n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def detail(self):\n url = '/question/%d' % self.id\n d = req.get(url)\n return parser.detail(d)", "def get_metadata(data):\n genres = list(data[\"genre\"])\n print(\"genres:\", len(set(genres)), set(genres))\n return genres", "def get_song(self): \n\n song = self.tracks.sample(n=1).to_dict('index')\n return list(song.values())[0]", "def show():\n\n quality_list = []\n\n conn = sqlite3.connect(\"person_database.bd\")\n c = conn.cursor()\n\n c.execute(\"SELECT *, oid FROM person_info\")\n records = c.fetchall()\n\n conn.commit()\n conn.close()\n\n for record in records:\n quality_list.append(str(record[2]) + \" \" + str(record[0]))\n\n return quality_list", "def report_seq(self, seq_map):\n sents = []\n for i in range(0, len(seq_map)):\n\n if seq_map[i]['paragraph']:\n # text += \"\\n \"\n quote_start = '\"'\n else:\n quote_start = \"\"\n if i > len(seq_map) - 2 or seq_map[i + 1]['paragraph']:\n quote_end = '\"'\n else:\n quote_end = \" \"\n if len(seq_map[i]['speech_act']) > 0:\n speech_act = seq_map[i]['speech_act'] + \",\"\n else:\n speech_act = seq_map[i]['speech_act']\n tokens = [utils.START_TOKEN]\n tokens.append(seq_map[i]['speaker_str'])\n tokens.append(speech_act)\n tokens.append(quote_start)\n tokens.extend(seq_map[i]['speech'][1:-1])\n tokens.append(quote_end)\n tokens.append(utils.END_TOKEN)\n sents.append(tokens)\n return sents", "def get_repeat_info(self, seq_descr: str, repeat_id: int) -> Optional[Tuple[str, Union[str, int]]]:\n seq_name: str = Sequencer.get_name(seq_descr)\n seq: Optional['Sequencer'] = self.get_seq_by_name(seq_name)\n if seq is None:\n return None\n return seq.get_repeat_info(repeat_id)", "def get(self):\n return orthanc.study(self.orthanc_id)", "def get_salience(self):\n return self.salience", "def print_songs(self):\n\t\tfor i,s in enumerate(self._songs):\n\t\t\tprint('{0}. {1}'.format(i, s.print_info()))", "def display_seqres_seq():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n with open(filepath, 'r') as file:\n for line in file:\n if line[:6] == 'SEQRES':\n line_split = line.split()[4:]\n print(line_split)\n return file", "def get_info(self):\n self.exists = self.check_subscr()\n return self.attrs", "def get_identifier(self):\n return 'Sequence SMNIST'", "def get_seq(self): # -> list[Unknown]:\n ...", "def getQiimeSffSamples(self, study_id,seq_run_id):\n try:\n con = self.getSFFDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('get_qiime_sff_samples', \\\n [study_id,seq_run_id,results])\n return results\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def scrutiny(raw_data=\"\"):\n # Per molecule in DW's listing file, extract count of stereo centres,\n # cistrans double bonds, and assigned diff_inchi label.\n survey = []\n with open(raw_data, mode=\"r\") as source:\n for line in source:\n line = str(line).strip()\n data = line.split(\"\\t\")\n\n stereo = data[0]\n cistrans = data[1]\n diff_inchi = data[9]\n\n retain = str(f\"{stereo} {cistrans} {diff_inchi}\")\n survey.append(retain)\n\n # remove the header line:\n del survey[0]\n\n # for a frequency count, build a dictionary:\n counting = {}\n for instance in survey:\n counting.setdefault(instance, 0)\n counting[instance] = counting[instance] + 1\n\n # convert the dictionary into a list which may be sorted:\n listing = []\n for key, value in counting.items():\n retain = str(\n f\"stereo, E/Z, label diff_inchi:\\t{key}\\tfrequency:\\t{value}\")\n listing.append(retain)\n listing.sort()\n\n # the eventual report:\n with open(\"frequency_list.txt\", mode=\"w\") as newfile:\n for element in listing:\n print(element)\n newfile.write(f\"{element}\\n\")\n\n print(\"\\nSee file 'frequency_list.txt' for a permanent record.\")", "def get_details(disease):\n\treturn d_desc_map[disease]", "def _get_sample_information(sample: domain.Sample) -> Dict[str, str]: # pragma: no cover\n # TODO: This ideally should be replaced with a constant, rather than a hard-coded string.\n return {\"SampleID\": sample.guid}", "def getResidueInformation(self, resIDs=None, atomIDs=None):\n if resIDs is None:\n resIDs = set ()\n else:\n resIDs = set (resIDs)\n\n if atomIDs is not None:\n for i in atomIDs:\n resIDs.add (self._atomInfo[i][\"residue\"])\n return self.getResidueInformation (resIDs=resIDs)\n\n resIDs = list (resIDs)\n resIDs.sort ()\n str=''\n for res in resIDs:\n str=str+self._residueInfo[res][\"name\"]\n print self._shortenResidue (str)\n str = ''\n return dict ((resID, self._residueInfo[resID]) for resID in resIDs)", "def instruments(self):\r\n return self.get_field('instrument')", "def info(self):\n txt = \"\"\"Lick Index {s.name}\n wavelength units: {s.wavelength_unit}\n Index Band: {s.band}\n Blue continuum band: {s.blue}\n Red continuum band: {s.red}\n Measurement unit: {s.index_unit}\"\"\".format(s=self)\n print(txt)", "def info(self):\n txt = \"\"\"Lick Index {s.name}\n wavelength units: {s.wavelength_unit}\n Index Band: {s.band}\n Blue continuum band: {s.blue}\n Red continuum band: {s.red}\n Measurement unit: {s.index_unit}\"\"\".format(s=self)\n print(txt)", "def get_technical_details(self):\n\n url = \"https://www.imdb.com/title/%s/reference\" % (self.film_id)\n return Scraper(url).scrape_technical_data()", "def get_song_info(self, song_id):\n return self.__get('song', song_id)", "def infotoids(seqsinfo, outdir):\n allids = [x.patient_id for x in seqsinfo]\n # TODO: check all patient_ids are the same\n s = allids[0]\n\n return({'subject': \"sub-\" + IDLOOKUP.get(s, 'UNKNOWN'),\n 'locator': None, 'session': None})", "def cds_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n accession = ''\n cdslen = 0\n for entry in gff3:\n if '\\tCDS\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n accession = re.search(r'accession=([^;\\n]+)', fields[8]).group(1)\n cdslen += int(fields[4]) - int(fields[3]) + 1\n elif entry.startswith('###'):\n if accession:\n cdsseq = seqs[accession]\n if len(cdsseq) != cdslen:\n message = 'CDS for \"%s\": length mismatch' % accession\n message += ' (gff3=%d, fa=%d)' % (cdslen, len(cdsseq))\n message += '; most likely a duplicated accession'\n message += ', discarding'\n print(message, file=sys.stderr)\n else:\n gccontent = gc_content(cdsseq)\n gcskew = gc_skew(cdsseq)\n ncontent = n_content(cdsseq)\n values = '%s %d %.3f %.3f %.3f' % (\n accession, cdslen, gccontent, gcskew, ncontent)\n yield values.split(' ')\n accession = ''\n cdslen = 0", "def show_info(self):\n # attr[0] attr[1]\n attrs = [(self.TYP.value, 'nam'),\n ('Skill', 'skl')]\n # voeg ook alle stats en skills in deze lijst toe.\n for stat in Minimals:\n attrs.append((stat.value, stat.name))\n attrs.append(('Spell Battery', 'cur_bat'))\n for stat in StatType:\n attrs.append((stat.value, stat.name))\n for skill in SkillType:\n attrs.append((skill.value, skill.name))\n\n # nu alle mogelijkheden geladen zijn, ga dan aan de slag met diegene die van toepassing zijn\n attr_list = []\n\n import enum\n for attr in attrs:\n value_of_attr = self.get_value_of(attr[1])\n # uitzondering, 'wht' altijd gewoon weergeven\n if attr[0] == StatType.wht.value:\n # deze uitzondering geldt niet voor weapons en shields.\n if not isinstance(self.get_value_of('skl'), enum.Enum): # niet wanneer 'skl' een waarde heeft\n attr_list.append((attr[0], str(value_of_attr)))\n elif value_of_attr:\n if isinstance(value_of_attr, enum.Enum): # uitzondering alleen voor 'skl'\n value_of_attr = value_of_attr.value\n elif attr[0] == StatType.hit.value: # uitzondering alleen voor 'hit'\n value_of_attr = str(value_of_attr)+\"%\"\n attr_list.append((attr[0], str(value_of_attr)))\n\n return attr_list", "def info(self):\r\n\r\n return self.sim_info", "def show_recs(self):\n if len(self.storage.records) == 0:\n return \"Records not found!\"\n else:\n string_of_records = \"\"\n for record in self.storage.records:\n string_of_records += record.to_string()\n return string_of_records", "def getFrequencyDict(sequence):\n # freqs: dictionary (element_type -> int)\n \n for x in sequence:\n hand[x] = hand.get(x,0) + 1\n updatehand(hand, word)\n print hand\n print \"freq function\"\n #return hand", "def get_patient(drs):\n for line in drs:\n if line.strip().startswith('sem'):\n datalist = line.split(':')\n for word in datalist: \n if word.count('patient') > 0:\n variable = word[6:7]\n for word in datalist:\n if word.startswith('pred({0}'.format(variable)):\n return word.split(',')[1]", "def get_experiment_speaker_info(db_root):\n seen_speakers = ['VCTK-speaker-p225-female',\n 'VCTK-speaker-p226-male',\n 'VCTK-speaker-p227-male',\n 'VCTK-speaker-p228-female',\n 'VCTK-speaker-p229-female',\n 'VCTK-speaker-p230-female',\n 'VCTK-speaker-p231-female',\n 'VCTK-speaker-p232-male',\n 'VCTK-speaker-p233-female',\n 'VCTK-speaker-p234-female',\n 'VCTK-speaker-p236-female',\n 'VCTK-speaker-p237-male',\n 'VCTK-speaker-p238-female',\n 'VCTK-speaker-p239-female',\n 'VCTK-speaker-p240-female',\n 'VCTK-speaker-p241-male',\n 'VCTK-speaker-p243-male',\n 'VCTK-speaker-p244-female',\n 'VCTK-speaker-p245-male',\n 'VCTK-speaker-p246-male',\n 'VCTK-speaker-p247-male',\n 'VCTK-speaker-p248-female',\n 'VCTK-speaker-p249-female',\n 'VCTK-speaker-p250-female',\n 'VCTK-speaker-p251-male',\n 'VCTK-speaker-p252-male',\n 'VCTK-speaker-p253-female',\n 'VCTK-speaker-p254-male',\n 'VCTK-speaker-p255-male',\n 'VCTK-speaker-p256-male',\n 'VCTK-speaker-p257-female',\n 'VCTK-speaker-p258-male',\n 'VCTK-speaker-p259-male',\n 'VCTK-speaker-p260-male',\n 'VCTK-speaker-p261-female',\n 'VCTK-speaker-p262-female',\n 'VCTK-speaker-p263-male',\n 'VCTK-speaker-p264-female',\n 'VCTK-speaker-p265-female',\n 'VCTK-speaker-p266-female',\n 'VCTK-speaker-p267-female',\n 'VCTK-speaker-p268-female',\n 'VCTK-speaker-p269-female',\n 'VCTK-speaker-p270-male',\n 'VCTK-speaker-p271-male',\n 'VCTK-speaker-p272-male',\n 'VCTK-speaker-p273-male',\n 'VCTK-speaker-p274-male',\n 'VCTK-speaker-p275-male',\n 'VCTK-speaker-p276-female',\n 'VCTK-speaker-p277-female',\n 'VCTK-speaker-p278-male',\n 'VCTK-speaker-p279-male',\n 'VCTK-speaker-p280-female',\n 'VCTK-speaker-p281-male',\n 'VCTK-speaker-p282-female',\n 'VCTK-speaker-p283-female',\n 'VCTK-speaker-p284-male',\n 'VCTK-speaker-p285-male',\n 'VCTK-speaker-p286-male',\n 'VCTK-speaker-p287-male',\n 'VCTK-speaker-p288-female',\n 'VCTK-speaker-p292-male',\n 'VCTK-speaker-p293-female',\n 'VCTK-speaker-p294-female',\n 'VCTK-speaker-p295-female',\n 'VCTK-speaker-p297-female',\n 'VCTK-speaker-p298-male',\n 'VCTK-speaker-p299-female',\n 'VCTK-speaker-p300-female',\n 'VCTK-speaker-p301-female',\n 'VCTK-speaker-p302-male',\n 'VCTK-speaker-p303-female',\n 'VCTK-speaker-p304-male',\n 'VCTK-speaker-p305-female',\n 'VCTK-speaker-p306-female',\n 'VCTK-speaker-p307-female',\n 'VCTK-speaker-p308-female',\n 'VCTK-speaker-p310-female',\n 'VCTK-speaker-p311-male',\n 'VCTK-speaker-p312-female',\n 'VCTK-speaker-p313-female',\n 'VCTK-speaker-p314-female',\n 'VCTK-speaker-p316-male',\n 'VCTK-speaker-p317-female',\n 'VCTK-speaker-p318-female',\n 'VCTK-speaker-p323-female',\n 'VCTK-speaker-p326-male',\n 'VCTK-speaker-p329-female',\n 'VCTK-speaker-p330-female',\n 'VCTK-speaker-p333-female',\n 'VCTK-speaker-p334-male',\n 'VCTK-speaker-p335-female',\n 'VCTK-speaker-p336-female',\n 'VCTK-speaker-p339-female',\n 'VCTK-speaker-p340-female',\n 'VCTK-speaker-p341-female',\n 'VCTK-speaker-p343-female',\n 'VCTK-speaker-p345-male',\n 'VCTK-speaker-p347-male',\n 'VCTK-speaker-p351-female',\n 'VCTK-speaker-p360-male',\n 'VCTK-speaker-p361-female',\n 'VCTK-speaker-p362-female',\n 'VCTK-speaker-p363-male',\n 'VCTK-speaker-p364-male',\n 'VCTK-speaker-p374-male',\n 'VCTK-speaker-p376-male']\n\n # speaker index list for training and validation\n n_speaker = len(seen_speakers)\n\n # take all speakers in train and validation!!!\n train_speakers = seen_speakers\n valid_speakers = seen_speakers\n print('number of VCTK speakers = %d' % n_speaker)\n\n sp2id = {sp: i for i, sp in enumerate(seen_speakers)}\n id2sp = {i: sp for i, sp in enumerate(seen_speakers)}\n\n return seen_speakers, sp2id, id2sp", "def _get_info(self, id, score=None):\n try:\n info_query = f\"\"\"\n SELECT m.primary_title, m.start_year, r.average_rating, r.num_votes\n FROM movies m\n JOIN ratings r ON m.movie_id = r.movie_id\n WHERE m.movie_id = '{id}'\"\"\"\n self.cursor_dog.execute(info_query)\n except Exception as e:\n return tuple([f\"Movie title unknown. ID:{id}\", None, None, None, None, None, None, id])\n\n t = self.cursor_dog.fetchone()\n if t:\n title = tuple([t[0], t[1], f\"https://www.imdb.com/title/tt{id}/\",\n f\"https://www.letterboxd.com/imdb/tt{id}/\", t[2], t[3], score, id])\n return title\n else:\n return tuple([f\"Movie title not retrieved. ID:{id}\", None, None, None, None, None, None, id])", "def info(self):\n past_shows = self.get_shows(Show.start_time <= datetime.now())\n upcoming_shows = self.get_shows(Show.start_time > datetime.now())\n\n return {\n 'id': self.id,\n 'name': self.name,\n 'genres': self.genres,\n 'address': self.address,\n 'city': self.city,\n 'state': self.state,\n 'phone': self.phone,\n 'website': self.website,\n 'facebook_link': self.facebook_link,\n 'seeking_talent': self.seeking_talent,\n 'seeking_description': self.seeking_description,\n 'image_link': self.image_link,\n 'past_shows': past_shows,\n 'upcoming_shows': upcoming_shows,\n 'past_shows_count': len(past_shows),\n 'upcoming_shows_count': len(upcoming_shows)\n }", "def get_raw_information(self):\n try:\n info = self.student_attendance_record.get_period_info(\n self.start_date, self.day_periods)\n return (self.student_name, self.student_gender, info)\n except AttributeError:\n raise AttributeError, \\\n \"Failed to get student attendance record for: %s\" \\\n %unicode(self.student)", "def getVSMSpace():\n sids,documents = getSongTextInfo()\n texts = [[word for word in document.lower().split()] for document in documents]\n dictionary = corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(text) for text in texts]\n songMap = {}\n index = 0\n for doc in corpus:\n sid = sids[index]\n rMap = {}\n for item in doc:\n wid = item[0]\n count = item[1]\n rMap[wid] = count\n songMap[sid] = rMap\n index += 1\n return songMap", "def print_freqs(self):\n words = list(self.freqs)[0:10]\n print()\n for word in words:\n print(word[0].rjust(15) + \" | \" + str(word[1]).ljust(3) + \" \" + (word[1] * \"*\"))", "def get_sequence(self):\n\n with open(self.input_file, 'r') as input_file:\n\n tree = ET.parse(input_file)\n root = tree.getroot()\n\n #print('Root:', root)\n\n # TODO: Expand to handle multiple parts\n part_list_idx = -1\n part_idx = -1\n\n # Find <part-list> and <part> element indexes\n for i, child in enumerate(root):\n if child.tag == 'part-list':\n part_list_idx = i\n elif child.tag == 'part':\n # Choose 1st part only to generate sequence\n part_idx = i if part_idx == -1 else part_idx\n\n # Check for bad MusicXML\n if part_list_idx == -1 or part_idx == -1:\n print('MusicXML file:', self.input_file,' missing <part-list> or <part>')\n return ['']\n sys.exit(0)\n\n # Get number of staves in the MusicXML\n num_staves = 1\n for e in root[part_idx][0][0]:\n if e.tag == 'staff-layout':\n num_staves = int(e.attrib['number'])\n staves = ['' for x in range(num_staves)]\n\n # Read each measure\n r_iter = iter(root[part_idx])\n for i, measure in enumerate(r_iter):\n\n # Gets the symbol sequence of each staff in measure\n measure_staves, skip = self.read_measure(measure, num_staves, i)\n\n for j in range(num_staves):\n staves[j] += measure_staves[j]\n\n for j in range(skip-1):\n next(r_iter)\n\n return staves", "def samples():\n f = open(config['samples'], \"r\")\n samp=[]\n for line in f:\n samp.append(line.strip().split()[0])\n return samp", "def instruments():\n instr_dict = {}\n #\n instr_dict['LRISr'] = 2**0\n instr_dict['LRISb'] = 2**1\n instr_dict['Kastb'] = 2**2\n instr_dict['shane_kast_red'] = 2**3\n instr_dict['shane_kast_red_ret'] = 2**3\n instr_dict['DEIMOS'] = 2**4\n instr_dict['NIRSPEC'] = 2**5\n instr_dict['GMOS'] = 2**6\n instr_dict['DBSP'] = 2**7\n #\n return instr_dict", "def sense(self):\n # Get all PIMAP data from the queue.\n pimap_data = []\n while not self.pimap_data_queue.empty():\n pimap_data.append(self.pimap_data_queue.get())\n\n # Sort the PIMAP data by timestamp. The PIMAP data can be out of order because we are\n # using multiple processes to sense it.\n pimap_data.sort(key=lambda x: float(pu.get_timestamp(x)))\n\n timestamps = (list(map(lambda x: float(pu.get_timestamp(x)), pimap_data)))\n self.latencies.extend(time.time() - np.array(timestamps))\n # Track the amount of sensed PIMAP data.\n self.sensed_data += len(pimap_data)\n\n # If system_samples is True and a system_sample was not created within the last\n # system_samples period, create a system_sample.\n pimap_system_samples = []\n if (self.system_samples and\n (time.time() - self.system_samples_updated > self.system_samples_period)):\n sample_type = \"system_samples\"\n if self.app != \"\":\n sample_type += \"_\" + self.app\n # Identify PIMAP Sense using the host and port.\n patient_id = \"sense\"\n device_id = (self.host, self.port)\n sensed_data_per_s = self.sensed_data/(time.time() - self.system_samples_updated)\n sample = {\"throughput\":sensed_data_per_s}\n if len(self.latencies) > 0:\n sample[\"latency\"] = np.mean(self.latencies)\n system_sample = pu.create_pimap_sample(sample_type, patient_id, device_id, sample)\n pimap_system_samples.append(system_sample)\n\n # Reset system_samples variables.\n self.system_samples_updated = time.time()\n self.sensed_data = 0\n self.latencies = []\n\n return pimap_data + pimap_system_samples", "def seqfreqs(seqs):\n #if \"seqfreqs\" in options.debug:\n # print(\"There are {} seqs\".format(len(seqs)))\n x = []\n #this block calculates the frequencies of each sequence\n for i in range(len(seqs)):\n this_x = 0\n for j in range(len(seqs)):\n if str(seqs[i]) == str(seqs[j]):\n #if \"seqfreqs\" in options.debug:\n # print(\"{} == {}\".format(i, j))\n this_x += 1\n x.append(this_x/len(seqs))\n #print(\"done with these seqfreqs\\n\")\n #if \"seqfreqs\" in options.debug:\n # print(\"the frequencies are {}\".format(x))\n return x", "def display_genre(self):\n \n # Get first 3 genres and join to a string.\n return ', '.join([ genre.name for genre in self.genre.all()[:3] ])", "def info(self):\n past_shows = self.get_shows(Show.start_time <= datetime.now())\n upcoming_shows = self.get_shows(Show.start_time > datetime.now())\n\n return {\n 'id': self.id,\n 'name': self.name,\n 'genres': self.genres,\n 'city': self.city,\n 'state': self.state,\n 'phone': self.phone,\n 'website': self.website,\n 'facebook_link': self.facebook_link,\n 'seeking_venue': self.seeking_venue,\n 'seeking_description': self.seeking_description,\n 'image_link': self.image_link,\n 'past_shows': past_shows,\n 'upcoming_shows': upcoming_shows,\n 'past_shows_count': len(past_shows),\n 'upcoming_shows_count': len(upcoming_shows)\n }", "def info(self):", "def info(self):", "def sr(self):\n return ''.join([str(seg) for seg in self.segments])", "def sosid(self):\r\n return self.word2idx.get(SOS, 0)", "def gather_sentences(self):\n sentences = Sentence.objects.all()\n return sentences", "def get_residue_info(self):\n return", "def s_metadata(self):\n index = self.var_index(s=True)\n return self.var_metadata(index)", "def __str__(self):\n spkr_appeared = set([])\n for path in self._filepaths:\n sid = path.split('/')[-2][1:]\n assert sid in self._spkr_table, f\"{sid} not a valid speaker!\"\n spkr_appeared.add(sid)\n phoncts = {p: 0 for p in PHONETABLE}\n mindur = {p: 100 for p in PHONETABLE}\n maxdur = {p: 0 for p in PHONETABLE}\n for path in self._filepaths:\n sr = audioinfo(os.path.join(self.root, path)).samplerate\n path = os.path.join(self.root, os.path.splitext(path)[0]+'.PHN')\n for (ts, te), pp in phnread(path):\n assert pp in phoncts, f\"[{pp}] not in phone dict!\"\n phoncts[pp] += 1\n dur = (te - ts) / sr * 1000\n if mindur[pp] > dur:\n mindur[pp] = dur\n if maxdur[pp] < dur:\n maxdur[pp] = dur\n totcts = sum(v for p, v in phoncts.items())\n report = \"\"\"\n +++++ Summary for [{}] partition [{}] +++++\n Total [{}] valid files to be processed.\n Total [{}/{}] speakers appear in this set.\n [Phoneme]: [counts], [percentage], [min-max duration (ms)]\\n{}\n \"\"\".format(self.__class__.__name__,\n self.partition if hasattr(self, 'partition') else None,\n len(self._filepaths), len(spkr_appeared),\n len(self._spkr_table),\n \"\\n\".join(\n f\"\\t\\t[{p:>4}]: [{c:>4}], [{c*100/totcts:2.2f}%], [{mindur[p]:.1f}-{maxdur[p]:.0f}]\"\n for p, c in phoncts.items()))\n\n return report", "def Show_Sequences( self ):\r\n self.system.Change_Seq( \"Sequence\" )", "def get_frequency(self):\r\n return self._api.get_frequency()", "def info(doc):\n\tinfo = {}\n\tinfo['sentences'] = [str(sent) for sent in doc.sents]\n\t#sentences : [sent1, sent2, ...]\n\tinfo['tokens'] = [str(token) for token in doc]\n\t#all tokens in info['tokens']\n\ttoken_vals = {}\n\tfor token in info['tokens']:\n\t\tcurrent_word = token\n\t\ti = 0\n\t\tcurrent_sent = info['sentences'][i]\n\t\tfor i in range(len(info['sentences'])): #for each sentence\n\t\t\tval = current_sent.count(str(current_word))\n\t\t\t#value is the number of times the current word is in the current sent\n\t\t\ttoken_vals[str(token)] = val\n\t\t\t#append to dictionary\n\tinfo['token_vals'] = token_vals\n\t#given a word and a sentence, val is how many times it appears in that sentence\n\treturn info", "def frequencies(self):\n return self._frequencies", "def get_list_frequencies(self):\r\n _debug('simq03b_api.get_list_frequencies')\r\n \r\n s = self.query('SOUR:LIST:FREQ?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_frequencies(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set", "def get_spices(self, key):\n spices = []\n if key == 'disease':\n spices.append('(disease|symptom|sign)')\n elif key == 'symptom':\n spices.append('(signs|symptoms)')\n elif key == 'treatment':\n spices.append('(treatment|medicine|operation)')\n return spices", "def info_scrambled(out: Export = Export(\"cwb.encoded_scrambled/data/.info\"),\n sentences: AnnotationCommonData = AnnotationCommonData(\"misc.<sentence>_count\"),\n firstdate: AnnotationCommonData = AnnotationCommonData(\"cwb.datefirst\"),\n lastdate: AnnotationCommonData = AnnotationCommonData(\"cwb.datelast\"),\n resolution: AnnotationCommonData = AnnotationCommonData(\"dateformat.resolution\"),\n protected: bool = Config(\"korp.protected\")):\n create_info_file(sentences, firstdate, lastdate, resolution, protected, out)", "def __str__(self):\n return self._seq", "def get_records(self, _count, random=False):\n\n abtracts=[]\n titles=[]\n\n max_id = [max[0] for max in self.patents.execute(\"SELECT MAX(Id) FROM Patents;\")][0]\n order = \" ORDER BY id ASC \"\n\n if (random == True):\n ids = \"\"\n rands = [randint(1, max_id) for x in range(0, _count)]\n for i, _id in enumerate(rands):\n ids += \"\" + str(_id) + \"\"\n if i != _count - 1:\n ids += \",\"\n\n order = \" AND id IN (\" + ids + \")\"\n\n query = \"SELECT * FROM Patents WHERE (Description!='')\" + \\\n order + \" LIMIT \" + str(_count)\n # print query\n\n for row in self.patents.execute(query):\n # print row[3]\n # remove numbers and some unwantable characters\n text = ''.join(\n [i for i in row[2] if not i.isdigit() and i not in stopwords])\n\n abtracts.append(str(text))\n titles.append(str(row[3]))\n\n return ids, abtracts, titles", "def get_passage(sample_name):\n #look for passage information pattern in sample_name\n regex_results = re.match(\"([A-Z0-9a-z_-]+).(P[T0-9]+)\", sample_name)\n #the passage information is the second element of the results\n passage = regex_results.groups()[1]\n return passage", "def get_sequence_names(self):\r\n return Wiki().sequences_for_article_url(self.url).keys()", "def get_type_of_studies(self) -> str:\n semestr = {\n 1: 'pierwszy',\n 2: 'drugi',\n 3: 'trzeci',\n 4: 'czwarty',\n 5: 'piąty',\n 6: 'szósty',\n 7: 'siódmy',\n 8: 'ósmy',\n 9: 'dziewiąty',\n 10: 'dziesiąty',\n 0: 'niezdefiniowany'}[\n self.semestr]\n return '%s, semestr %s' % (self.program, semestr)", "def at_frequency(self):\n result = str(self.seq).count(\"A\") + str(self.seq).count(\"T\")\n return result", "def get_info(self):\n return \"Malayalam Stemmer(Experimental)\"", "def display_genre(self):\n\n\t\treturn ', '.join(genre.name for genre in self.genre.all()[:3])", "def create_seq_record(self, s):\n gene_code = s['gene_code']\n length = self.gene_codes_metadata[gene_code]['length']\n sequence = s['sequences']\n length_difference = length - len(sequence)\n\n sequence += '?' * length_difference\n return sequence", "def get_sdesc(self):\n return self._sdesc", "def instruments(self) -> dict:\n return self._instruments", "def getGenres(movieInfo):\n if \"genres\" in movieInfo:\n return [ _format(genre[\"name\"]) for genre in movieInfo[\"genres\"] ]\n else:\n raise AttributeError(\"%s instance has no attribute genre\" % movieInfo)", "def sequence_items(self):\r\n seq_css = 'ol#sequence-list>li>a>p'\r\n return self.q(css=seq_css).map(self._clean_seq_titles).results", "def get_sentence(self):", "def display_genre(self):\n return ', '.join(genre.name for genre in self.genre.all()[:3])", "def GetFrequency(self):\n ...", "def __str__(self):\n print('=' * 20, \"Subject Information\", '=' * 20)\n print(\"Subject Name: {}\".format(self.name))\n print(\"Pulse Data Length for general questions\")\n print(self.pulse_length[0:20])\n print(\"Number of general Questions: {}\".format(\n len(self.pulse_data[0])))\n print(\"Pulse Data Length for video 1\")\n print(\"Number of questions for video 1: {}\".format(\n len(self.pulse_data[1])))\n print(self.pulse_length[20:40])\n print(\"Pulse Data Length for video 2\")\n print(\"Number of questions for video 2: {}\".format(\n len(self.pulse_data[0])))\n print(self.pulse_length[40:60])\n print('Label Data')\n print(self.label_data)\n print('Label Data shape: {}'.format(self.label_data.shape))\n\n return ''", "def _get_information(self):\n pass", "def human_readable_info(self) -> str:\n next_session = unix_str(self._stat.next_session)\n last_session = unix_str(self._stat.last_session)\n return \"\"\"\n Next Session: {}\n Last Session: {}\n Repetitions: {}\n Health: {}\n ------------------------\n Past Quality (last 20):\n ------------------------\n {}\n \"\"\".format(\n next_session,\n last_session,\n self._stat.actual_repetitions,\n self._health(),\n self._past_quality_graph(),\n )", "def display_genre(self):\n return ', '.join([ genre.name for genre in self.genre.all()[:3] ])" ]
[ "0.5863178", "0.5489143", "0.54585916", "0.5428158", "0.536509", "0.53436375", "0.53430116", "0.53390235", "0.532284", "0.529443", "0.52938986", "0.5290507", "0.52477586", "0.5244924", "0.5206089", "0.5205835", "0.5202415", "0.51880074", "0.51695544", "0.513454", "0.51211977", "0.5120762", "0.51196593", "0.5117652", "0.511687", "0.511681", "0.51096773", "0.5102836", "0.5066955", "0.5058819", "0.5051457", "0.50453436", "0.50288993", "0.50253433", "0.50217795", "0.50151855", "0.5004227", "0.49967584", "0.4988832", "0.4979121", "0.49738884", "0.49738884", "0.49697825", "0.49692628", "0.49677506", "0.49650586", "0.49612984", "0.49560234", "0.49414974", "0.49320355", "0.49163145", "0.49154377", "0.4911237", "0.490847", "0.4897634", "0.48922706", "0.4885011", "0.48709437", "0.48699763", "0.48649937", "0.48622316", "0.48510954", "0.4846766", "0.48450065", "0.48428595", "0.48428595", "0.48378295", "0.48375216", "0.4837156", "0.48370525", "0.48285666", "0.4823623", "0.48192805", "0.48169255", "0.48150566", "0.481291", "0.4809518", "0.48072904", "0.48063147", "0.48050195", "0.48023525", "0.47992772", "0.4793497", "0.47871128", "0.477798", "0.47773302", "0.47744292", "0.47738916", "0.47737592", "0.4773427", "0.47665843", "0.4765616", "0.47631076", "0.47628227", "0.47606197", "0.47594115", "0.47575003", "0.47555542", "0.47538933", "0.47532484" ]
0.55018765
1
Get a set of frames from a particular study.
def get_frames(self,std_id, frame_ids, anno=None): raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_frames_for_sample(sample):\n path = os.path.join('data', sample[0])\n filename = sample[1]\n images = sorted(glob.glob(os.path.join(path, filename + '*jpg')))\n return images", "def getFrames():\n\t\tfor cam in Camera.CAMERAS: cam.getFrame()", "def getQiimeSffSamples(self, study_id,seq_run_id):\n try:\n con = self.getSFFDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('get_qiime_sff_samples', \\\n [study_id,seq_run_id,results])\n return results\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def get(self):\n return self.frames", "def get_frames(data: Union[sc.DataArray, sc.Dataset], **kwargs) -> sc.Dataset:\n\n # if data is not None:\n # return frames_peakfinding(data=data,\n # instrument=instrument,\n # plot=plot,\n # **kwargs)\n # else:\n\n return frames_analytical(data=data, **kwargs)", "def get_frames(comkey):\n logger.info('Topic Group: {}'.format(browser.title))\n\n # Fetch frames one at a time until no more found\n more_pages = True\n while more_pages:\n base_name = \"{}.html\".format(frame.frame_id())\n frame_name = os.path.join(FRAMESDIR, base_name)\n logger.info('Creating {}'.format(frame_name))\n frame.write_page(frame_name)\n more_pages = frame.next_page()\n\n return None", "def get_frames(self):\n\n log(\"Getting frames for {} at {}\".format(self._location, self._t0))\n fn_get = lambda time_str: self.get_wximg(time_str)\n pool0 = multiprocessing.dummy.Pool(self._frames)\n raw = pool0.map(fn_get, self.get_time_strs())\n wximages = [x for x in raw if x is not None]\n if not wximages:\n return None\n pool1 = multiprocessing.dummy.Pool(len(wximages))\n background = self.get_background()\n if background is None:\n return None\n fn_composite = lambda x: self._pilimg.alpha_composite(background, x)\n composites = pool1.map(fn_composite, wximages)\n legend = self.get_legend()\n if legend is None:\n return None\n loop_frames = pool1.map(lambda _: legend.copy(), composites)\n fn_paste = lambda x: x[0].paste(x[1], (0, 0))\n pool1.map(fn_paste, zip(loop_frames, composites))\n return loop_frames", "def getSFFFiles(self, study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n items = []\n con.cursor().callproc('qiime_assets.get_sff_files', [study_id, results])\n for row in results:\n items.append(row[0])\n return items\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def getSampleIDsFromStudy(self, study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_sample_ids_from_study', [study_id, results])\n metadata_fields = []\n for row in results:\n metadata_fields.append(row[0])\n return metadata_fields\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def get_all_frames(self):\n #pdb.set_trace()\n frame_size=(512,512)\n frame_data = []\n variant_type = (pythoncom.VT_BYREF | pythoncom.VT_ARRAY | pythoncom.VT_UI2)\n for i in xrange(self.defaults['EXP_SEQUENTS']):\n frame_data.append(win32com.client.VARIANT(variant_type, numpy.empty(frame_size)))\n frame_data[i]=self.appdoc.GetFrame(i+1,frame_data[i])\n \n return numpy.array(frame_data, dtype=numpy.uint16)", "def get_frames_rep_hdf5(file_hdf5, time_hdf5, filename, time_begin_s, time_end_s):\n get_features = Features_Accessor(time_hdf5, file_hdf5).get_features_from_raw\n return get_features(dict({'file': [filename], 'onset': [time_begin_s], 'offset': [time_end_s]}))[1]", "def experiment_frames(request,id):\n\texp = Experiment.objects.get(id=id)\n\tmovies_glob = PROJECT_DIR + exp.frames_url() + '*.swf'\n\tmovies = glob.glob(movies_glob)\n\truns = map(os.path.basename,movies)\n\truns.sort()\n\treturn render_to_response('experiments/experiment_frames.html',\n\t\t\t\t\t\t\t{'exp':exp,'runs':runs},\n\t\t\t\t\t\t\tcontext_instance=RequestContext(request))", "def get_frames_by_filename(self, filename, data_type):\n # First, find the sample row.\n sample = None\n\n for row in self.data:\n if row[1] == filename:\n sample = row\n break\n if sample is None:\n raise ValueError(\"Couldn't find sample: %s\" % filename)\n\n # Get the sequence from disk.\n sequence = self.get_extracted_sequence(data_type, sample)\n if sequence is None:\n raise ValueError(\"Can't find sequence. Did you generate them?\")\n \n return sequence", "def getFrames(job, **options):\n criteria = search.FrameSearch.criteriaFromOptions(**options)\n framesSeq = Cuebot.getStub('frame').GetFrames(\n job_pb2.FrameGetFramesRequest(job=job, r=criteria), timeout=Cuebot.Timeout).frames\n return [Frame(f) for f in framesSeq.frames]", "def getSampleList(self, study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_sample_list', [study_id, results])\n sample_list = {}\n for sample_name, sample_id in results:\n sample_list[sample_id] = sample_name\n return sample_list\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def get_frame():\n\tall_frames = {}\n\tkeys = get_utt()\n\tfor i, matrix_id in enumerate(mfcc_h5.read(keys)):\t\n\t\tmatrix = np.asarray(matrix_id)\t\n\t\tall_frames[keys[i]] = matrix \n\t\n\treturn all_frames", "def get_frames_with_manifests() -> Generator[dict, dict, list[FrameWithManifest]]:\n response = yield {\"method\": \"ApplicationCache.getFramesWithManifests\", \"params\": {}}\n return [FrameWithManifest.from_json(f) for f in response[\"frameIds\"]]", "def extract_frames():\n vc = cv2.VideoCapture(INPUT_FILE)\n c=1\n\n if vc.isOpened():\n rval , frame = vc.read()\n else:\n rval, frame = False, False\n\n while rval:\n # cv2.imwrite((MODIFIED_FRAMES_DIR + 'img' + str(c) + '.jpg'),frame)\n cv2.imwrite((MODIFIED_FRAMES_DIR + str(c) + '.jpg'),frame)\n c = c + 1\n cv2.waitKey(1)\n rval, frame = vc.read()\n vc.release()\n print(\"All frames extracted successfully...\")", "def _subsample_frames(self, video_clip_frames):\n subsampled_frames = []\n current_ix = 0\n step_size = len(video_clip_frames) / float(config.RGB_N_FRAMES)\n for _ in range(config.RGB_N_FRAMES):\n frame = video_clip_frames[int(current_ix)]\n subsampled_frames.append(frame)\n current_ix += step_size\n\n return np.array(subsampled_frames)", "def _select_frames(self, frames):\n converted_frames = list()\n # Ignore some frame at begin and end.\n for i in np.linspace(0, self.video_size, self.frame_num + 2)[1:self.frame_num + 1]:\n img = frames[int(i)]\n img = img.resize((224, 224), Image.BILINEAR)\n frame_data = np.array(img)\n converted_frames.append(frame_data)\n return converted_frames", "def get_experiment_frames(experiments, datadir=None):\n import pandas as pd\n\n exp_frames = dict()\n\n if not datadir:\n datadir = os.getcwd()\n\n print 'reading profiles in %s' % datadir\n\n for exp in experiments:\n print \" - %s\" % exp\n exp_frames[exp] = list()\n\n for sid, label in experiments[exp]:\n print \" - %s\" % sid\n \n import glob\n for prof in glob.glob (\"%s/%s-pilot.*.prof\" % (datadir, sid)):\n print \" - %s\" % prof\n frame = pd.read_csv(prof)\n exp_frames[exp].append ([frame, label])\n \n return exp_frames", "def frames(self):\n return list(self._frames)", "def get_lidc_dataframes(data_path: str, num_sectors: int):\n sorted_studies = sorted(glob(os.path.join(data_path, \"*\")))\n frames = []\n for study in sorted_studies:\n df_temp = pd.read_csv(\n os.path.join(study, \"annotations.txt\"), sep=\": \", header=None, dtype=str\n )\n df_temp.columns = [\"File\", \"Label\"]\n df_temp[\"File\"] = df_temp[\"File\"].apply(lambda e: f\"{study}/{e}.png\")\n frames.append(df_temp)\n\n return [balance_frame(frame) for frame in slice_sector(frames, num_sectors)]", "def _read_frames(self):\n cap = self._read_file()\n\n frame_list = []\n ret_list = []\n\n while True:\n ret, frame = cap.read()\n if ret:\n frame_list.append(np.array(frame))\n ret_list.append(ret)\n else:\n break\n if self.mode==\"np\":\n frame_list = np.array(frame_list)\n return frame_list", "def condition_frames(run_evs, skip=0):\n frames_list = []\n for ev in run_evs: # loop through runs\n if not bool(ev): # empty EV file (e.g. when there were no error trials or no no-response trials)\n frames_list.append(np.array([]))\n else:\n # Determine when trial starts, rounded down\n start = np.floor(ev[\"onset\"] / TR).astype(int)\n\n # Use trial duration to determine how many frames to include for trial\n duration = np.ceil(ev[\"duration\"] / TR).astype(int)\n\n # Take the range of frames that correspond to this specific trial\n if type(start) == np.ndarray: # many trials\n frames = [s + np.arange(skip, d) for s, d in zip(start, duration)] # loop through different onsets for each trial\n elif type(start) == float or type(start) == np.int64:\n # contains only one onset: it is either the full block (with many trials inside,\n # but just one onset value) or just a single trial\n frames = [start + np.arange(skip, duration)]\n\n frames_list.append(np.concatenate(frames))\n\n return frames_list", "def get_images_in_frame(self, frame: int) -> np.ndarray:\n\n # Check if it's the first time the frames are requested and the videos are not unrolled\n if not os.path.exists(self.videos_dir):\n self.unroll_videos()\n\n # print(\"Searching for images of frame \", frame, \"...\")\n # Create the string of the name of the frame that we are going to search for in all camera folders\n frame_name = \"frame\" + ''.zfill(9)\n frame_string = str(frame*2-1) if self.half_resolution else str(frame)\n number_of_chars = len(frame_string)\n frame_name = frame_name[:-number_of_chars] + frame_string + \".\" + self.image_format\n \n # print(\"Frame name: \" + frame_name)\n\n # Get the paths to all cameras inside the videos folder sorted by name\n cameras_paths = [os.path.join(self.videos_dir, name) for name in os.listdir(self.videos_dir) if os.path.isdir(os.path.join(self.videos_dir,name))]\n cameras_paths.sort()\n\n # Get the frame_name image from those paths\n images = []\n # print(cameras_paths)\n\n for path in cameras_paths:\n image = cv2.imread(os.path.join(path, frame_name), cv2.IMREAD_COLOR)\n # print(os.path.join(path, frame_name))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n images.append(image)\n\n # print(\"Images of frame \", frame, \" retrieved.\")\n return np.array(images)", "def sample_frames(frame_dir, fps, visualize_sample_rate):\n visualize_every_x_frames = visualize_sample_rate * int(fps)\n sampled_frames = np.empty((0, 3, IMG_DIM, IMG_DIM), dtype=np.float32) # B, C, H, W\n i = 0\n for file in sorted(os.listdir(frame_dir)):\n if i % visualize_every_x_frames == 0:\n img = skimage.img_as_float(skimage.io.imread(os.path.join(frame_dir, file))).astype(np.float32)\n img = skimage.transform.resize(img, (IMG_DIM, IMG_DIM)) # H, W, C\n img = img.swapaxes(1, 2).swapaxes(0, 1) # C, H, W\n sampled_frames = np.append(sampled_frames, np.array([img]), axis=0)\n i += 1\n logger.debug(\"total number of frames: {}\".format(i))\n return sampled_frames", "def get_frame(self, f):\n return self._frames[f, :]", "def getSampleDetailList(self, study_id):\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('get_sample_detail_list', [study_id, results])\n sample_details = []\n for sample_name, sample_id, public, collection_date, run_prefix, sequence_count, otu_count, otu_percent_hit in results:\n sample_details.append((sample_name, sample_id, public, collection_date, run_prefix, sequence_count, otu_count, otu_percent_hit))\n return sample_details", "def get_fixation_frames(subject, run=0):\n\n trial_frames = np.append(condition_frames(load_evs(subject, 'wm', 'all_bk_cor'))[run],\n condition_frames(load_evs(subject, 'wm', 'all_bk_err'))[run]) # TODO: include no response trials\n trial_frames = np.sort(trial_frames)\n\n fixation_start = np.array([], dtype=int) # initialize\n\n for idx, i in enumerate(trial_frames):\n if idx == 0:\n continue\n\n # find frames with difference greater than 10s\n if i - trial_frames[idx - 1] > 10 / TR:\n fixation_start = np.append(fixation_start, trial_frames[idx - 1])\n\n fixation_duration = np.ceil(15 / TR) # always 15s duration\n\n # get range of frames corresponding to duration of fixation block\n fixation_frames = np.concatenate([i + np.arange(0, fixation_duration, dtype=int) for i in fixation_start])\n\n return fixation_frames", "def _getDataSetForFCSFileSample(self):\n\n # Get the dataset for current FCS file sample\n dataSets = searchService.getDataSet(self._entityId)\n if dataSets is None:\n self._message = \"Could not retrieve datasets for \" \\\n \"FCS file with identifier \" + self._entityId + \"!\"\n self._logger.error(self._message)\n else:\n dataSets = [dataSets]\n\n # Return\n return dataSets", "def getSequencesFromSample(self, study_id, sample_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_sequences_for_fasta', [study_id, sample_id, results])\n seqs = {}\n for row in results:\n seqs[row[0]] = row[1]\n return seqs \n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def studies(self):\n return self._study_queryset", "def find_frames(self, ftype, calib_ID=None, index=False):\n if 'framebit' not in self.keys():\n msgs.error('Frame types are not set. First run get_frame_types.')\n if ftype == 'None':\n return self['framebit'] == 0\n # Select frames\n indx = self.type_bitmask.flagged(self['framebit'], ftype)\n\n if calib_ID is not None:\n # Select frames in the same calibration group\n indx &= self.find_calib_group(calib_ID)\n\n # Return\n return np.where(indx)[0] if index else indx", "def _read_frames(filename: str) -> Iterator[Tuple[CritterType, numpy.ndarray]]:\n frame_skip = 0\n last_section = None\n last_frame = None\n\n good_frames: Dict[Tuple[CritterType, int], numpy.ndarray] = {}\n\n cap = cv2.VideoCapture(filename)\n while True:\n ret, frame = cap.read()\n if not ret:\n break # Video is over\n\n if frame_skip > 0:\n frame_skip -= 1\n continue\n\n if frame.shape[:2] == (1080, 1920):\n frame = cv2.resize(frame, (1280, 720))\n\n assert frame.shape[:2] == (720, 1280), \\\n 'Invalid resolution: {1}x{0}'.format(*frame.shape)\n\n if not detect(frame):\n continue # Skip frames that are not showing critterpedia.\n\n # Detect a dark line that shows up only in Pictures Mode.\n mode_detector = frame[20:24, 600:800].mean(axis=(0, 1))\n if numpy.linalg.norm(mode_detector - (199, 234, 237)) > 50:\n raise AssertionError('Critterpedia is in Pictures Mode.')\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if filename.endswith('.jpg'): # Handle screenshots\n yield _detect_critter_section(gray), frame[149:623, :]\n continue\n\n if last_frame is None:\n last_frame = frame\n continue\n\n critter_section = _detect_critter_section(gray)\n if critter_section != last_section:\n if last_section is not None:\n frame_skip = 15\n last_section = critter_section\n continue\n\n # Grab the last frame for each side and section combination.\n if last_frame[570:600, :70, 2].min() > 230:\n good_frames[critter_section, 0] = last_frame\n elif last_frame[570:600, -70:, 2].min() > 230:\n good_frames[critter_section, 1] = last_frame\n\n last_frame = frame\n\n cap.release()\n\n for (critter_type, _), frame in good_frames.items():\n # Crop the region containing critter icons.\n yield critter_type, frame[149:623, :]", "def img_filenames(self, matricule):\n proj, sid = next((proj, proj.Matricule(matricule).to('AmcStudentId'))\n for proj in self.projects_by_serie.values()\n if proj.Matricule(matricule).exists('AmcStudentId'))\n return [\n (int(num), filename.replace('%PROJET', proj.path))\n for num, filename in proj.dbs['capture'].execute('select page, src from capture_page where student=? order by page', [sid])\n ]", "def frame_list_fixture():\n return [[4, 3, 5, 7], [8, 6, 3], [6, 7]]", "def getBitstreamFrames(self):\n \n return self.bitstream_frames", "def frames(self) -> Set[int]:\n return self._frames", "def get_stack_frames(self, threadId=0, startFrame=0, levels=0, format=None):\n\n # format is ignored, TODO?\n # threadId is ignored since renpy is single threaded for stuff we need\n\n clevel = 0\n slevel = 0 if startFrame is None else startFrame\n elevel = None if levels is None or levels == 0 else levels\n\n frames = []\n cframe = self.active_frame\n while cframe is not None:\n if clevel >= slevel:\n finfo = {}\n\n finfo[\"id\"] = clevel\n finfo[\"name\"] = cframe.f_code.co_name + self.format_method_signature(cframe.f_locals, cframe.f_code)\n finfo[\"source\"] = {\"path\": cframe.f_code.co_filename}\n finfo[\"line\"] = cframe.f_lineno\n finfo[\"presentationHint\"] = \"normal\"\n finfo[\"column\"] = 0\n\n dis_info = {}\n finfo[\"subsource\"] = dis_info\n\n disassembled = dis(cframe.f_code, cframe.f_lasti)\n dis_info[\"sources\"] = [{\"text\": self.format_disassembly(cframe.f_lineno, *de), \"line\": de[1], \"source\": finfo[\"source\"]} for de in disassembled]\n ord = 0\n for de in disassembled:\n if de[0]:\n break\n ord += 1\n finfo[\"subsourceElement\"] = ord\n\n frames.append(finfo)\n clevel += 1\n if elevel is not None and clevel >= elevel:\n break\n cframe = cframe.f_back\n\n return frames", "def get_frames(self):\n if not self.video:\n return []\n # We cannot validate shape on construction as that happens inside graph\n # mode as we construct from a tf.data.Dataset, so we validate here.\n self.video[0].validate_shape_and_dtype()\n return self.video", "def getDataFrames(sampleparams, shakeparams, predictors, outparams):\n coverage = sampleparams['coverage']\n f = fiona.collection(coverage, 'r')\n cbounds = f.bounds\n f.close()\n dx = sampleparams['dx']\n cb = sampleparams['cb']\n nmax = sampleparams['nmax']\n nsamp = sampleparams['nsamp']\n touch_center = sampleparams['touch_center']\n testpercent = sampleparams['testpercent']\n extent = sampleparams['extent']\n h1 = sampleparams['h1']\n h2 = sampleparams['h2']\n\n yestest, yestrain, notest, notrain, xvar, yvar, pshapes, proj = sampleFromFile(coverage, dx=dx,\n nmax=nmax, testPercent=testpercent, touch_center=touch_center, classBalance=cb, extent=extent,\n Nsamp=nsamp, h1=h1, h2=h2)\n\n traincolumns = OrderedDict()\n testcolumns = OrderedDict()\n\n if (100-testpercent) > 0:\n traincolumns['lat'] = np.concatenate((yestrain[:, 1], notrain[:, 1]))\n traincolumns['lon'] = np.concatenate((yestrain[:, 0], notrain[:, 0]))\n traincolumns['coverage'] = np.concatenate((np.ones_like(yestrain[:, 1]),\n np.zeros_like(notrain[:, 1])))\n\n if testpercent > 0:\n testcolumns['lat'] = np.concatenate((yestest[:, 1], notest[:, 1]))\n testcolumns['lon'] = np.concatenate((yestest[:, 0], notest[:, 0]))\n testcolumns['coverage'] = np.concatenate((np.ones_like(yestest[:, 1]), np.zeros_like(notest[:, 1])))\n\n for predname, predfile in predictors.items():\n ftype = getFileType(predfile)\n if ftype == 'shapefile':\n attribute = predictors[predname+'_attribute']\n shapes = subsetShapes(predfile, cbounds)\n yes_test_samples = sampleShapes(shapes, yestest, attribute)\n no_test_samples = sampleShapes(shapes, notest, attribute)\n yes_train_samples = sampleShapes(shapes, yestrain, attribute)\n no_train_samples = sampleShapes(shapes, notrain, attribute)\n testcolumns[predname] = np.squeeze(np.concatenate((yes_test_samples, no_test_samples)))\n traincolumns[predname] = np.squeeze(np.concatenate((yes_train_samples, no_train_samples)))\n elif ftype == 'grid':\n method = 'nearest'\n if predname+'_sampling' in predictors:\n method = predictors[predname+'_sampling']\n\n if testpercent > 0:\n yes_test_samples = sampleGridFile(predfile, yestest, method=method)\n no_test_samples = sampleGridFile(predfile, notest, method=method)\n testcolumns[predname] = np.squeeze(np.concatenate((yes_test_samples, no_test_samples)))\n\n if (100-testpercent) > 0:\n yes_train_samples = sampleGridFile(predfile, yestrain, method=method)\n no_train_samples = sampleGridFile(predfile, notrain, method=method)\n traincolumns[predname] = np.squeeze(np.concatenate((yes_train_samples, no_train_samples)))\n else:\n continue # attribute or sampling method key\n\n #sample the shakemap\n layers = ['mmi', 'pga', 'pgv', 'psa03', 'psa10', 'psa30']\n shakegrid = ShakeGrid.load(shakeparams['shakemap'], adjust='res')\n for layer in layers:\n yes_test_samples = sampleFromMultiGrid(shakegrid, layer, yestest)\n no_test_samples = sampleFromMultiGrid(shakegrid, layer, notest)\n yes_train_samples = sampleFromMultiGrid(shakegrid, layer, yestrain)\n no_train_samples = sampleFromMultiGrid(shakegrid, layer, notrain)\n if testpercent > 0:\n testcolumns[layer] = np.squeeze(np.concatenate((yes_test_samples, no_test_samples)))\n if (100-testpercent) > 0:\n traincolumns[layer] = np.squeeze(np.concatenate((yes_train_samples, no_train_samples)))\n\n dftest = pd.DataFrame(testcolumns)\n dftrain = pd.DataFrame(traincolumns)\n\n return (dftrain, dftest)", "def getEMPStudyList(self):\n try:\n studies = []\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_emp_study_list', [results])\n for row in results:\n # study_id, sample_id, sample_name, project_name, study_title, email, sample_count, metadata_complete,\n # study_score, sample_score, s.number_samples_promised, s.number_samples_in_freezer, \n # s.principal_investigator\n studies.append((row[0], row[1], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[9], row[10], row[11], row[12]))\n return studies\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)", "def get_files(self, subj_id, study, modality, series):\n if type(series) is int:\n series = str(series)\n\n url = 'files?' + self._login_code + '&projectCode=' + \\\n self.proj_code + '&subjectNo=' + subj_id + '&study=' + \\\n study + '&modality=' + modality + '&serieNo=' + series\n output = self._send_request(url)\n\n # Split at '\\n'\n file_list = output.split('\\n')\n # Remove any empty entries!\n file_list = [x for x in file_list if x]\n\n return(file_list)", "def stream_frames(video_capture):", "def getFrameList(self):\n with self.frameLock:\n return list(self.frameList)", "def frames(self):\n return self._frames", "def getSubframe( self, x, y, w, h):\n\n return TrackedObject.frame[y:y+h,x:x+w,:], \\\n TrackedObject.frameG[y:y+h,x:x+w]", "def get_frames_image_for_display(self, mouse_x, mouse_y):\n barcode_shape = self.barcode.get_barcode().shape\n # Get the middle position of the saved frame\n cur_pos = (mouse_x * barcode_shape[0] + mouse_y) / (barcode_shape[0] * barcode_shape[1])\n frame_pos = round(cur_pos * len(self.barcode.saved_frames))\n\n # Get another four frames around the middle frame\n # Make sure the frame positions/indexes are valid\n if frame_pos < 2:\n frame_pos = 2\n if frame_pos > len(self.barcode.saved_frames) - 3:\n frame_pos = len(self.barcode.saved_frames) - 3\n frames = self.barcode.saved_frames[frame_pos - 2: frame_pos + 3]\n\n # Get the combined five frames image\n combine_image = frames[0]\n for frame in frames[1:]:\n # Combine the frames into one image\n combine_image = np.concatenate((combine_image, frame), axis=1)\n\n return combine_image", "def _get_headers_by_study(\n files: Set[Path], file_errors: DefaultDict[Path, List[str]]\n):\n study_key_type = Tuple[str, ...]\n studies: Dict[study_key_type, Dict[str, Any]] = {}\n indices: Dict[str, Dict[study_key_type, int]] = {}\n\n for file in files:\n if not file.is_file():\n continue\n with file.open(\"rb\") as f:\n try:\n # Read header only, skip reading the pixel data for now\n ds = pydicom.dcmread(f, stop_before_pixels=True)\n\n # Group by series instance uid or by stack ID (for 4D images)\n # Additionally also group by SOP class UID to skip over extra\n # raw data (dose reports for example) that are sometimes stored\n # under the same series instance UID.\n key: study_key_type = (\n ds.StudyInstanceUID,\n getattr(ds, \"StackID\", ds.SeriesInstanceUID),\n ds.SOPClassUID,\n )\n\n studies[key] = studies.get(key, {})\n indices[ds.StudyInstanceUID] = indices.get(\n ds.StudyInstanceUID, {}\n )\n\n try:\n index = indices[ds.StudyInstanceUID][key]\n except KeyError:\n index = len(indices[ds.StudyInstanceUID])\n indices[ds.StudyInstanceUID][key] = index\n\n headers = studies[key].get(\"headers\", [])\n headers.append({\"file\": file, \"data\": ds})\n studies[key][\"headers\"] = headers\n\n # Since we might need to combine multiple images with different\n # series instance UID (in 4D images), we cannot use the series\n # as the unique file name - instead, we use the study instance\n # uid and a counter (index) per study\n studies[key][\"name\"] = f\"{ds.StudyInstanceUID}-{index}\"\n\n except Exception as e:\n file_errors[file].append(format_error(str(e)))\n\n return studies", "def get_snapshot_list(self, base, snappref=\"SPECTRA_\"):\n #print('Looking for spectra in', base)\n powerspectra = FluxPower(maxk=self.max_k)\n for snap in range(30):\n snapdir = os.path.join(base,snappref+str(snap).rjust(3,'0'))\n #We ran out of snapshots\n if not os.path.exists(snapdir):\n snapdir = os.path.join(base,\"PART_\"+str(snap).rjust(3,'0'))\n if not os.path.exists(snapdir):\n snapdir = os.path.join(base, \"snap_\"+str(snap).rjust(3,'0'))\n if not os.path.exists(snapdir):\n continue\n #We have all we need\n if powerspectra.len() == np.size(self.zout):\n break\n try:\n ss = self._get_spectra_snap(snap, base)\n# print('Found spectra in', ss)\n if ss is not None:\n powerspectra.add_snapshot(snap,ss)\n except IOError:\n print(\"Didn't find any spectra because of IOError\")\n continue\n #Make sure we have enough outputs\n if powerspectra.len() != np.size(self.zout):\n raise ValueError(\"Found only\",powerspectra.len(),\"of\",np.size(self.zout),\"from snaps:\",powerspectra.snaps)\n return powerspectra", "def get_stim_onset_times(sessions, metadata_dict):\n if not isinstance(sessions, list):\n sessions = list(sessions)\n\n for line in sessions:\n session_id = line['Sess.ID']\n if session_id: # we loaded a line with session info\n session_name = '{}_{}'.format(line['Experiment'], line['Sess.ID'])\n\n # Check if session is already in database\n if database is not None and session_name in database.index:\n continue\n session_stimuli = {}\n session_stimuli['session_id'] = session_id\n session_stimuli['stimuli'] = {}\n session_stimuli['stimuli']['visual'] = []\n session_stimuli['stimuli']['audio'] = []\n session_stimuli['stimuli']['digital'] = []\n videopaths = []\n # load data from .tdms and .avi fils\n for recording in line['Recordings']:\n path = os.path.join(line['Base fld'], line['Exp fld'], recording)\n for f in os.listdir(path):\n if '.avi' in f:\n videopaths.append(os.path.join(path, f))\n print(videopaths)\n elif '.tdms' == f[-5:]:\n tdmspath = os.path.join(path, f)\n # Loop over each .tdms file and extract stimuli frames\n print(colored('Loading {}: {}'.format(session_name,os.path.basename(tdmspath)),'yellow'))\n tdms = TdmsFile(tdmspath)\n if metadata_dict[session_name]['software'] == 'behaviour':\n visual_rec_stims, audio_rec_stims, digital_rec_stims = [], [], []\n for group in tdms.groups():\n for obj in tdms.group_channels(group):\n if 'stimulis' in str(obj).lower():\n for idx in obj.as_dataframe().loc[0].index:\n if \"/' \" in idx:\n framen = int(idx.split(\"/' \")[1].split('-')[0])\n elif \"/' \" in idx:\n framen = int(idx.split(\"/' \")[1].split('-')[0])\n else:\n framen = int(idx.split(\"/'\")[2].split('-')[0])\n if 'visual' in str(obj).lower():\n visual_rec_stims.append(framen)\n elif 'audio' in str(obj).lower():\n audio_rec_stims.append(framen)\n elif 'digital' in str(obj).lower():\n digital_rec_stims.append(framen)\n else:\n print(colored('Couldnt load stim correctly','yellow'))\n # Now use the AI channels to find the *real* stimulus onset times and replace them\n if audio_rec_stims:\n stimulus_on_idx = np.where(tdms.group_channels('AI')[3].data > .55)[0] #in first data sets this is AI 1, later AI 2\n idx_since_last_stimulus_on = np.diff(stimulus_on_idx)\n if stimulus_on_idx.size:\n stimulus_start_idx = stimulus_on_idx[np.append(np.ones(1).astype(bool),idx_since_last_stimulus_on>2*10000)] #usually 10 or 30\n stimulus_start_frame = np.ceil(stimulus_start_idx / 10000 / (33 + 1 / 3) * 1000).astype(int)\n stimulus_start_frame = stimulus_start_frame[stimulus_start_frame > 300]\n else:\n stimulus_start_frame = np.array(audio_rec_stims)\n print('NO STIMULI FOUND!!')\n\n if len(stimulus_start_frame) != len(audio_rec_stims):\n print('audio AI channel does not match number of timestamps by ' + str(len(audio_rec_stims)-len(stimulus_start_frame)) )\n else:\n discrepancy = stimulus_start_frame - audio_rec_stims\n if sum(discrepancy>8):\n print('audio AI channel does not match values of timestamps')\n else:\n print(discrepancy)\n # for conditioning experiment, just use what the tdms says\n # if 'food' in line['Experiment']:\n # stimulus_start_frame = np.array(audio_rec_stims)\n audio_rec_stims = list(stimulus_start_frame)\n\n session_stimuli['stimuli']['visual'].append(visual_rec_stims)\n session_stimuli['stimuli']['audio'].append(audio_rec_stims)\n session_stimuli['stimuli']['digital'].append(digital_rec_stims)\n\n else:\n \"\"\" HERE IS WHERE THE CODE TO GET THE STIM TIMES IN MANTIS WILL HAVE TO BE ADDEDD \"\"\"\n pass\n\n # Add to dictionary (or update entry)\n stimulus_dict[session_name] = session_stimuli\n return stimulus_dict", "def _scan_and_sample_dataset(self, dives):\n roots = [os.path.join(self.p.data_root, n) for n in dives]\n ret = []\n for root in roots:\n h5_files = glob.glob(os.path.join(root, '*.h5'))\n for h5 in h5_files:\n try:\n fgroup = FrameGroup(h5, self.meta)\n except (AssertionError, KeyError, OSError) as e:\n if type(e) == AssertionError:\n print_warn('Unmatched time: {}'.format(h5))\n else:\n print_warn('Corrupted h5: {}'.format(h5))\n continue\n num_samples = int(self.p.downsample * fgroup.num_frames)\n indices = np.random.choice(\n fgroup.num_frames, size=num_samples, replace=False)\n ret.extend([(h5, int(idx)) for idx in indices])\n random.shuffle(ret)\n return ret", "def get_frame_list(self):\r\n\r\n logger.debug('Executing frame extraction')\r\n\r\n frames_loaded = False\r\n\r\n # Try to load YAML file with frame list\r\n if os.path.exists(self.frames_file_path):\r\n\r\n print 'Loading YAML file with frame list'\r\n logger.debug('Loading YAML file with frame list')\r\n\r\n f_list = utils.load_YAML_file(self.frames_file_path)\r\n\r\n if f_list:\r\n self.frame_list = f_list\r\n\r\n print 'YAML file with frame_list loaded'\r\n logger.debug('YAML file with frame_list loaded')\r\n\r\n frames_loaded = True\r\n\r\n if not frames_loaded:\r\n\r\n print '\\n\\n### Frame extraction ###\\n'\r\n logger.debug('\\n\\n### Frame extraction ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n if not (os.path.exists(self.frames_path)):\r\n os.makedirs(self.frames_path)\r\n\r\n # Counter for all frames\r\n frame_counter = 0\r\n\r\n # Value of frame_counter for last analyzed frame\r\n last_anal_frame = 0\r\n\r\n # Open video file\r\n capture = cv2.VideoCapture(self.resource_path)\r\n\r\n self.frame_list = []\r\n\r\n # Save parameters for this video\r\n param_dict = {}\r\n\r\n if capture is None or not capture.isOpened():\r\n\r\n error = 'Error in opening video file'\r\n\r\n print error\r\n logger.debug(error)\r\n\r\n return\r\n\r\n else:\r\n\r\n video_fps = capture.get(cv2.cv.CV_CAP_PROP_FPS)\r\n\r\n param_dict[c.VIDEO_FPS_KEY] = video_fps\r\n\r\n # Original number of frames\r\n tot_frames = capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)\r\n\r\n param_dict[c.VIDEO_TOT_FRAMES_KEY] = tot_frames\r\n\r\n self.fps = video_fps\r\n\r\n self.video_frames = float(tot_frames)\r\n\r\n # Saved frames\r\n saved_frames = 0\r\n\r\n while True:\r\n\r\n # Read frame\r\n ret, frame = capture.read()\r\n\r\n # If no frame is read, abort\r\n if not ret:\r\n break\r\n\r\n used_fps = c.USED_FPS\r\n use_or_fps = c.USE_ORIGINAL_FPS\r\n use_or_res = c.USE_ORIGINAL_RES\r\n used_res_scale_factor = c.USED_RES_SCALE_FACTOR\r\n\r\n if self.params is not None:\r\n\r\n if c.USED_FPS_KEY in self.params:\r\n used_fps = self.params[c.USED_FPS_KEY]\r\n\r\n if c.USE_ORIGINAL_FPS_KEY in self.params:\r\n use_or_fps = self.params[c.USE_ORIGINAL_FPS_KEY]\r\n\r\n if c.USE_ORIGINAL_RES_KEY in self.params:\r\n use_or_res = self.params[c.USE_ORIGINAL_RES_KEY]\r\n\r\n if c.USED_RES_SCALE_FACTOR_KEY in self.params:\r\n used_res_scale_factor = self.params[\r\n c.USED_RES_SCALE_FACTOR_KEY]\r\n\r\n # Next frame to be analyzed\r\n next_frame = last_anal_frame + (video_fps / used_fps) - 1\r\n\r\n if use_or_fps or (frame_counter > next_frame):\r\n\r\n # Frame position in video in milliseconds\r\n elapsed_ms = capture.get(cv2.cv.CV_CAP_PROP_POS_MSEC)\r\n\r\n # print 'elapsed video s =', elapsed_video_s\r\n\r\n fr_name = '%07d.png' % frame_counter\r\n\r\n frame_path = os.path.join(self.frames_path, fr_name)\r\n\r\n # Resize frame\r\n if not use_or_res:\r\n fx = used_res_scale_factor\r\n\r\n fy = used_res_scale_factor\r\n\r\n interp = cv2.INTER_AREA\r\n\r\n frame = cv2.resize(src=frame, dsize=(0, 0),\r\n fx=fx, fy=fy,\r\n interpolation=interp)\r\n\r\n cv2.imwrite(frame_path, frame,\r\n [cv.CV_IMWRITE_PNG_COMPRESSION, 0])\r\n\r\n frame_dict = {c.SAVED_FRAME_NAME_KEY: fr_name,\r\n c.ELAPSED_VIDEO_TIME_KEY: int(elapsed_ms)}\r\n\r\n self.frame_list.append(frame_dict)\r\n\r\n last_anal_frame = frame_counter\r\n\r\n saved_frames += 1\r\n\r\n frame_counter += 1\r\n\r\n self.progress = 100 * (frame_counter / self.video_frames)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n del capture\r\n\r\n self.saved_frames = float(saved_frames)\r\n\r\n param_dict[c.VIDEO_SAVED_FRAMES_KEY] = self.saved_frames\r\n\r\n # Save frame list in YAML file\r\n utils.save_YAML_file(self.frames_file_path, self.frame_list)\r\n\r\n # Save video parameters in YAML file\r\n\r\n utils.save_YAML_file(self.params_file_path, param_dict)\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for frame extraction:', str(time_in_seconds), 's\\n'\r\n logger.debug(\r\n 'Time for frame extraction:', str(time_in_seconds), 's\\n')\r\n\r\n self.anal_times[c.FRAME_EXTRACTION_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)", "def _construct_frame_list(self):\n\n chunks = self._chunk_list(list(range(self._start_frame, self._max_frame + 1)),\n int((self._max_frame - self._start_frame) / self._sub))\n frame_list = [[c[0], c[len(c) - 1]] for c in chunks]\n\n # Fix the frame_list in case of rounding errors\n if len(frame_list) > self._sub:\n frame_list = self._restructure_frames(frame_list)\n return frame_list", "def test_get_studies(self):\n states = [\n study_pb2.StudySpec.STATE_ENABLED, study_pb2.StudySpec.STATE_DISABLED\n ]\n studies = []\n for i in range(5):\n study_spec = sample_study_spec(name=str(i), state=states[i % len(states)])\n self.storage.create_study(study_spec)\n studies.append(study_spec)\n\n self.assertListEqual(studies, self.storage.get_studies())\n for state in states:\n self.assertListEqual([study for study in studies if study.state is state],\n self.storage.get_studies(state=state))", "def get_video_frames(self):\r\n\r\n vid_dir = self._video_dir\r\n vid_frames = [str(img_path) for img_path in\r\n Path(vid_dir).glob('*.jpg')]\r\n if len(vid_frames) == 0:\r\n vid_frames = [str(img_path) for img_path in\r\n Path(vid_dir).glob('*.png')]\r\n list_of_frames = sorted(vid_frames)\r\n\r\n self._vid_frames = [list_of_frames]\r\n\r\n return self._vid_frames", "def get_files(self, sid):\n try:\n return self.datas.get(sid)\n except Exception as ex:\n raise ex", "def get_frames_from_video_capture(video_capture):\n while video_capture.isOpened():\n success, frame = video_capture.read()\n if not success:\n break\n else:\n yield frame", "def load_frames(self, path, frames, convert_alpha=False):\n dir_path = self.get_path(path)\n extension = \"\"\n for name in listdir(dir_path):\n file = join(dir_path, name)\n if isfile(file):\n striped, file_ext = splitext(name)\n if striped == \"1\":\n extension = file_ext\n break\n surfs = []\n for c in range(1, frames + 1):\n file = join(dir_path, str(c) + extension)\n if convert_alpha:\n surfs.append(load(file).convert_alpha())\n continue\n surfs.append(load(file).convert())\n return surfs", "def get_activations_from_studies(cls, studies):\n\n activations = cls.query.filter(\n cls.pmid.in_(studies), cls.location_id < 81925).all()\n\n return activations", "def frames(self):\n while True:\n ret, frame = self.classification()\n if ret == True:\n yield cv2.imencode('.jpg', frame)[1].tobytes()\n else:\n break", "def get_source_studies(self):\n return list(set([trait.source_dataset.source_study_version.study for trait in self.get_all_source_traits()]))", "def _read_frame(path: str, *, format):\n if not os.path.isfile(path):\n tpath = resources_path(path)\n if not os.path.isfile(path):\n raise IOError(\"Unable to find file.\\nTried paths:\\n%s\\n%s\" % (path, tpath))\n path = tpath\n\n data = scio.loadmat(path)\n res = []\n for (matkey, consumer) in format:\n res.append(consumer(data[matkey]))\n return res", "def readFrames(video):\n frames = []\n while True:\n _, frame = video.read()\n\n if frame is None:\n break\n else:\n frames.append(frame)\n video.release()\n return frames", "def get_measured_subframes(self):\r\r\n loggerCmw = logging.getLogger('get_measured_subframes')\r\r\n numSf_str = self.read('FETCh:WCDMa:SIGN:HACK:MSFRames?')\r\r\n numSf_list = numSf_str.split(',')\r\r\n\r\r\n num = -1\r\r\n measured_subframes = self.NO_MEASURED_FRAMES_STR\r\r\n reliability = numSf_list[0]\r\r\n if reliability == '0':\r\r\n measured_subframes = numSf_list[1]\r\r\n else:\r\r\n loggerCmw.info(\"Measurements are not reliable, reliability indicator %s\" %reliability)\r\r\n\r\r\n return measured_subframes", "def frames(self) -> Optional[Tuple[int, ...]]:\n return self._frames", "def get_frame_sequence(captured_file):\n frame_seq = []\n get_all_frame = \"tshark -r {} -Y 'http.request || http.response' -T fields -e frame.number\".format(captured_file)\n frames = run_command(get_all_frame, True)\n for f in frames:\n fn = int(f.decode('utf8').rstrip('\\n'))\n frame_seq.append(HTTPNode(fn))\n \n return frame_seq", "def search_research_studies_with_observations():\n return ResearchStudy.where(struct={}).include('focus', Observation, reverse=True)", "def getStudyTemplates(self, study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n items = []\n con.cursor().callproc('qiime_assets.get_study_templates', [study_id, results])\n for row in results:\n items.append(row[0])\n return items\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def get_all_frames(self) -> List[str]:\n frames = self.allFramesAsString().split(\"\\n\")\n frames.remove(\"\")\n return frames", "def find_records():\r\n\r\n print(\"begin find records\")\r\n\r\n study_list = retrieve_ref('study_list')\r\n sensor_list = retrieve_ref('sensor_list')\r\n # sensor_unit_list = retrieve_ref('sensor_unit_list')\r\n\r\n for study in study_list:\r\n # print('study = ' + str(study))\r\n source_path = os.path.join(study, 'source')\r\n # print('source_path = ' + str(source_path))\r\n\r\n source_folders = os.listdir(source_path)\r\n # print(str(study) + ' source_folders = ')\r\n # print(source_folders)\r\n\r\n df_meta = pd.DataFrame()\r\n df_meta['source_path'] = source_folders\r\n save_meta(study, df_meta)\r\n record_to_summary(study, 'Records found', str(len(source_folders)))\r\n\r\n print(\"completed find records\")", "def get_frame(self, frame: int) -> BaseImage:\n return self.sequence[frame]", "def get_windows(timsData):\n conn = timsData.conn\n pasef_ms2_id = 8 # diaPASEF ms2 scans are denoted by 8 instead of 2\n cycle_length = get_cycle_length(conn, pasef_ms2_id)\n wpf = get_windows_per_frame(conn, pasef_ms2_id)\n q = timsData.conn.execute(\"SELECT * FROM Frames INNER JOIN PasefFrameMsMsInfo ON Frames.Id=PasefFrameMsMsInfo.Frame WHERE MsMsType=%d LIMIT %d\" % (pasef_ms2_id, cycle_length*wpf))\n frames = q.fetchall()\n colnames = [description[0] for description in q.description]\n resframe = pd.DataFrame(data = frames, columns = colnames)\n return resframe", "def scenes_to_frames():\n # Scene 001 from frames 1-150\n cmd.scene('001', animate=0)\n cmd.mview('store', 1)\n cmd.mview('store', 150)\n # Scene 002 from frames 250-400\n cmd.scene('002', animate=0)\n cmd.mview('store', 250)\n cmd.mview('store', 400)", "def frames_df(task, conditions):\n\n frames_df = pd.DataFrame([])\n\n for subject in subjects:\n for condition in conditions:\n evs = load_evs(subject, task, condition)\n df = pd.DataFrame(evs) # load evs into df\n df['run'] = [0, 1]\n df['subject'] = subject\n df['condition'] = condition\n df['frames'] = condition_frames(evs)\n frames_df = frames_df.append(df, ignore_index=True)\n\n return frames_df", "def get_tracks(num=1):\n pass", "def find_frame_files(self, ftype, calib_ID=None):\n return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID))", "def getFrameByName(self, frameName):\n return self.data.frames[frameName]", "def rend_samples(data,start_frame,cap,fs=[30,51,78,90],show=False):\n row1 = [];\n row2 = [];\n row3 = [];\n for f in fs:\n cap.set(cv2.CAP_PROP_POS_FRAMES,start_frame+f);\n _,data['frame'] = cap.read();\n data['i'] = f;\n row1.append(extract_head(data));\n row2.append(data['plts']['y'][f]);\n render = cv2.imread(cf.renders_path + \"{:04d}.png\".format(f+1));\n render = cv2.resize(render,(256,256));\n row3.append(render);\n row1 = np.concatenate(row1,axis=1);\n row2 = np.concatenate(row2,axis=1);\n row3 = np.concatenate(row3,axis=1);\n grid = np.concatenate([row1,row2,row3]);\n if show:\n ut.show(grid);\n return grid;", "def get_studies(self, subj_id, modality=None, unique=False):\n\n url = 'studies?' + self._login_code + \\\n '&projectCode=' + self.proj_code + '&subjectNo=' + subj_id\n output = self._send_request(url)\n\n # Split at '\\n'\n stud_list = output.split('\\n')\n # Remove any empty entries!\n stud_list = [x for x in stud_list if x]\n\n if modality:\n for ii, study in enumerate(stud_list):\n url = 'modalities?' + self._login_code + \\\n '&projectCode=' + self.proj_code + '&subjectNo=' + \\\n subj_id + '&study=' + study\n output = self._send_request(url).split('\\n')\n\n if modality in output:\n if unique:\n return([study, ]) # always return a list\n else:\n stud_list[ii] = None\n\n # In Py3, filter returns an iterable object, but here we want list\n stud_list = list(filter(None, stud_list))\n\n return(stud_list)", "def get_frame(self):\n return self.frames.get()", "def subsample(self, se):\n\t\tdf = ReadDF('noname', self.readdict.refmap)\n\t\tfor i in random.sample(xrange(1, self.n+1), min(se, self.n)):\n\t\t\tpos, read = self.partial_sampling_func(i)\n\t\t\tdf.add_read_to_vec(read,copy=1) # important to remember to use just this ONE copy!!!\n\t\treturn df", "def _crop_frames(self, frames, center_crop=True):\n cropped_frames = []\n crop_location = 0.5 if center_crop else np.random.random_sample()\n for frame in frames:\n cropped_frame = self._crop_frame(frame, crop_location)\n cropped_frames.append(cropped_frame)\n\n return np.array(cropped_frames)", "def getframe(self, num):\n if num < 0 or num > self.nframes:\n raise RuntimeError(\"Requested frame number is out of range\")\n # Do a deep copy of the header to make a new one\n frame = hdf5image(header=self.header.copy())\n frame.header_keys = self.header_keys[:]\n for key in (\"dim1\", \"dim2\", \"nframes\", \"bytecode\", \"hdf5\", \"ds\"):\n frame.__setattr__(key, self.__getattribute__(key))\n frame.hdf5_location = copy.deepcopy(self.hdf5_location)\n frame.hdf5_location.set_index(num)\n if self.hdf5_location.slice:\n self.data = self.ds[tuple(self.hdf5_location.slice)]\n self.nframes = self.ds.shape[self.hdf5_location.last_index]\n else:\n self.data = self.ds[:]\n return frame", "def get_frame(self,t):\n\n return pyfx.util.to_array(self._img_list[t],dtype=np.uint8,\n num_channels=4)", "def getSubsampleList(vcfname, ss_count):\n\n vcf_o = pysam.VariantFile(vcfname)\n rec = next(vcf_o)\n vcf_o.close()\n lst = []\n for samp in rec.samples:\n lst.append(samp)\n return lst[:int(ss_count)]", "def get_frame(self, indices: List[int]) -> List[np.ndarray]:\n if isinstance(indices, int):\n indices = [indices]\n img_list = []\n if self.frame_zip_fid is None:\n self._check_available(self.zip_path)\n self.frame_zip_fid = zipfile.ZipFile(self.zip_path, 'r')\n\n for idx in indices:\n file_name = self.frame_fmt.format(int(idx) + 1)\n img = self.load_image_from_zip(self.frame_zip_fid, file_name, cv2.IMREAD_COLOR)\n img_list.append(img)\n return img_list", "def available_frames(self):\n if self._pipeline:\n #return [getattr(frame[0], \"name\", frame[0]) for frame in self._pipeline]\n return [step.frame if isinstance(step.frame, str) else step.frame.name for step in self._pipeline ]\n else:\n return None", "def get_srcs():\n global ms\n global srcs\n\n if not srcs:\n # Update both of them if one was not already declared\n ms, srcs = stixhelpers.get_stix_memory_stores() \n \n return srcs", "def GetSubContoursByFrame(watershed, allValsByFrame):\n scListByFrame = []\n for frame in range(len(watershed)):\n scList = []\n for v in allValsByFrame[frame]:\n boundingRect = ImageContour.GetBoundingRect(watershed[frame], v)\n # No longer needed: #contour,turns,vals = ImageContour.GetContour(watershed[0],v,boundingRect=boundingRect,byNeighbor=True)\n (\n perimeterVals,\n perimeterList,\n scPoints,\n ) = ImageContour.GetPerimeterByNeighborVal(\n watershed[frame], v, boundingRect=boundingRect, getSubContours=True\n )\n scPointsAdj = [\n (np.array(scp) + [boundingRect[0][0], boundingRect[1][0]]).tolist()\n for scp in scPoints\n ] # Will need to - 0.5 to line up on an overlay\n if len(perimeterList) > 0:\n scList += [\n SubContour(\n points=scPointsAdj[i],\n numPoints=len(scPointsAdj[i]),\n adjusted_length=perimeterList[i],\n values=tuple(sorted([v, perimeterVals[i]])),\n startPointValues=GetValuesAroundSCPoint(\n watershed[frame], scPointsAdj[i][0]\n ),\n endPointValues=GetValuesAroundSCPoint(\n watershed[frame], scPointsAdj[i][-1]\n ),\n )\n for i in range(len(perimeterVals))\n ]\n scList.sort(key=lambda x: x.values)\n for i in range(len(scList) - 1, 0, -1):\n # if 2 subcoutours are the same, keep only the one with the minimum length computation\n if scList[i - 1].values == scList[i].values:\n scList[i - 1].adjusted_length = min(\n scList[i - 1].adjusted_length, scList[i].adjusted_length\n )\n del scList[i]\n scListByFrame.append(scList)\n return scListByFrame", "def get_recorded_audio(self):\n return self.frames", "def get_frame(self, frame):\n return self.frames[frame]", "def get(self, filter_params, after, limit):\n filter_params.pop('study_id', None)\n\n q = (Study.query\n .filter_by(**filter_params))\n\n return (StudySchema(many=True)\n .jsonify(Pagination(q, after, limit)))", "def _get_all_spectra(self):\n pass", "def list_data_frames():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_data_frames\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)", "def get_dataframe(self):\n for i, study_id in enumerate(self.studies_to_combine):\n copy = repr(self.original_study_location).strip(\"'\")\n study_location = copy.replace(\"MTBLS1\", study_id)\n\n for maf in self.sort_mafs(study_location, study_id):\n maf_temp = None\n try:\n maf_temp = pandas.read_csv(os.path.join(study_location, maf), sep=\"\\t\", header=0, encoding='unicode_escape')\n except pandas.errors.EmptyDataError as e:\n logger.error(f'EmptyDataError Issue with opening maf file {maf}: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n except Exception as e:\n logger.error(f'Issue with opening maf file {maf}, cause of error unclear: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n\n cleanup_function = getattr(DataFrameUtils, f'{self.method}_maf_cleanup')\n maf_temp = cleanup_function(maf_temp, study_id, maf)\n maf_as_dict = totuples(df=maf_temp, text='dict')['dict']\n\n yield maf_as_dict", "def get_bodyparts(project_dir):\n print(f\"\\n\\n\\nLoading data\")\n df_paths = sorted(glob.glob(os.path.join(project_dir, '*.h5')))\n points_2d_df = utils.create_dlc_points_2d_file(df_paths)\n arr = points_2d_df[points_2d_df[\"frame\"]==0][[\"marker\"]][points_2d_df[\"camera\"]==0].values\n final_arr = arr.flatten().tolist()\n return(final_arr)", "def query_and_read_frame(frame_type, channels, start_time, end_time):\n logging.info('querying datafind server')\n paths = frame_paths(frame_type, start_time, end_time)\n logging.info('found files: %s' % (' '.join(paths)))\n return read_frame(paths, channels, \n start_time=start_time, \n end_time=end_time)", "def __setup_video_frames_extraction(self, video_file, scene_ratio,\n start_duration=None, stop_duration=None,\n frames_folder=None):\n frames = _ExtractFrames(video_file, scene_ratio, start_time=start_duration,\n stop_time=stop_duration, frames_path=frames_folder)\n\n print \"Initialize video frames extraction\"\n return frames" ]
[ "0.6049885", "0.59706116", "0.57190984", "0.5599101", "0.5508475", "0.55001473", "0.54696465", "0.5453543", "0.5443962", "0.54344064", "0.5425413", "0.53727776", "0.53502667", "0.53314114", "0.5238596", "0.5207204", "0.51428056", "0.5139112", "0.51072145", "0.509223", "0.5054078", "0.50375295", "0.5009608", "0.50031275", "0.49796918", "0.49614277", "0.49533322", "0.49530774", "0.4947847", "0.4940793", "0.49178955", "0.49153247", "0.49121943", "0.48996004", "0.4896901", "0.4891801", "0.4890971", "0.4879641", "0.48794425", "0.48638797", "0.4840882", "0.48408586", "0.47971347", "0.47776544", "0.47766024", "0.47742635", "0.47710678", "0.47706273", "0.47688955", "0.4763027", "0.4760436", "0.4759032", "0.47543895", "0.47519603", "0.47431007", "0.47389412", "0.4723116", "0.47200224", "0.47196072", "0.47157457", "0.47096613", "0.4706132", "0.46955422", "0.46924964", "0.46756834", "0.46682844", "0.46670142", "0.46419388", "0.4639011", "0.463821", "0.46340752", "0.46294498", "0.46243757", "0.46197662", "0.46168935", "0.46095663", "0.4608431", "0.46003556", "0.45971084", "0.458257", "0.4570562", "0.4565017", "0.456453", "0.45630226", "0.45584396", "0.4557779", "0.45565143", "0.4554588", "0.45538265", "0.45522344", "0.45513755", "0.45499104", "0.45397067", "0.4537952", "0.4535386", "0.4533202", "0.4526452", "0.4524087", "0.4516339", "0.45141032" ]
0.58449024
2
Returns definitions of module output ports.
def output_types(self) -> Optional[Dict[str, NeuralType]]: return { 'input_ids': NeuralType(('B', 'T'), ChannelType()), 'segment_ids': NeuralType(('B', 'T'), ChannelType()), 'input_mask': NeuralType(('B', 'T'), MaskType()), "labels": NeuralType( tuple('B'), RegressionValuesType() if self.task_name == 'sts-b' else CategoricalValuesType() ), }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getOutputPortsInfo(self):\n return [(gport.parentItem().module, gport.port, gport.controller.get_connections_from(gport.controller.current_pipeline, [gport.parentItem().module.id], gport.port.name), (gport.parentItem().boundingRect().right()-gport.parentItem().boundingRect().left())/2) for gport in self.pipelineView.getSelectedOutputPorts()]", "def get_node_output_ports(node: Node):\n consumers = []\n for port in node.out_ports().values():\n for dst_port in port.get_destinations():\n consumers.append(dst_port)\n return consumers", "def output_ports(self):\n return {\n 'audio_signal': NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),\n 'a_sig_length': NeuralType(tuple('B'), LengthsType()),\n 'label': NeuralType(tuple('B'), LabelsType()),\n 'label_length': NeuralType(tuple('B'), LengthsType()),\n }", "def get_number_of_output_ports(self):\n return 1", "def get_ports(self) -> tuple:\n raise NotImplementedError", "def message_ports_out(self):\n return _spacegrant_swig.hdlc_deframer_sptr_message_ports_out(self)", "def message_ports_out(self):\n return _spacegrant_swig.udp_debug_sptr_message_ports_out(self)", "def message_ports_out(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_message_ports_out(self)", "def get_ports():\r\n ports = serial.tools.list_ports.comports()\r\n return ports", "def port_list(self):\n return self._port_list", "def _ports(self):\n try:\n return self._graph.node[self.node_id][\"_ports\"]\n except KeyError:\n log.debug(\"No interfaces initialised for %s\" % self)\n return", "def get_port_list(self):\r\n self.ports = Manager().dict()\r\n self.value = Manager().dict()\r\n self.sensors = dict()\r\n for p in self.device.ports['input']:\r\n if p.enabled:\r\n self.ports[p.number] = p\r\n self.value[p.number] = 'Connexion à la carte'\r\n self.sensors[p.number] = Sensor.get(p._type)", "def message_ports_out(self):\n return _spacegrant_swig.hdlc_framer_sptr_message_ports_out(self)", "def message_ports_out(self):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_message_ports_out(self)", "def message_ports_out(self):\n return _spacegrant_swig.binary_sink_sptr_message_ports_out(self)", "def get_ports(self) -> tuple:\n return self._current_dev_manager.get_ports()", "def get_all_node_outputs(node: Node):\n return [port.node for port in get_node_output_ports(node)]", "def output_ports(self):\n return {\n 'dialog_history': NeuralType(\n axes=(AxisType(kind=AxisKind.Batch, is_list=True), AxisType(kind=AxisKind.Time, is_list=True),),\n elements_type=AgentUtterance(),\n ),\n }", "def list_ports(self):\n return self.ironic_client.port.list()", "def message_ports_out(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_message_ports_out(self)", "def list_ports(state):\n\tstate.report()", "def message_ports_out(self):\n return _spacegrant_swig.message_debug_sptr_message_ports_out(self)", "def message_ports_out(self):\n return _spacegrant_swig.ax25_pdu_unpacker_sptr_message_ports_out(self)", "def get_ports(self):\n return self._ports", "def exposed_ports(self) -> list[\"Port\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"exposedPorts\", _args)\n _ctx = Port(_ctx)._select_multiple(\n _description=\"description\",\n _port=\"port\",\n _protocol=\"protocol\",\n )\n return _ctx.execute_sync(list[Port])", "def message_ports_out(self):\n return _uhd_swig.usrp_source_sptr_message_ports_out(self)", "def message_ports_out(self):\n return _spacegrant_swig.ax25_pdu_packer_sptr_message_ports_out(self)", "def message_ports_out(self):\n return _spacegrant_swig.general_burster_2_sptr_message_ports_out(self)", "def message_ports_out(self):\n return _uhd_swig.usrp_sink_sptr_message_ports_out(self)", "def message_ports_out(self):\n return _spacegrant_swig.invert_bit_sptr_message_ports_out(self)", "def _GetPorts(self):\n ports = []\n for start, end in self.term.destination_port:\n if start == end:\n ports.append(str(start))\n else:\n ports.append('%d-%d' % (start, end))\n return ports", "def ports(self):\r\n # check connected to chip\r\n if not self._core.is_connected():\r\n raise ka_exceptions.NotConnectedError()\r\n\r\n # check symbols are loaded\r\n self._core.sym.assert_have_symbols()\r\n\r\n # get the symbols we need\r\n READ_OFFSET_ADDR = self._core.sym.varfind('$cbuffer.read_port_offset_addr')\r\n WRITE_OFFSET_ADDR = self._core.sym.varfind('$cbuffer.write_port_offset_addr')\r\n READ_LIMIT_ADDR = self._core.sym.varfind('$cbuffer.read_port_limit_addr')\r\n WRITE_LIMIT_ADDR = self._core.sym.varfind('$cbuffer.write_port_limit_addr')\r\n READ_BUFFER_SIZE = self._core.sym.varfind('$cbuffer.read_port_buffer_size')\r\n WRITE_BUFFER_SIZE = self._core.sym.varfind('$cbuffer.write_port_buffer_size')\r\n\r\n def read_dm(addr):\r\n return self._core.dm[addr]\r\n\r\n # get the read and write offset\r\n read_offset_addr = self._read_var_with_size_check(READ_OFFSET_ADDR.addr, READ_OFFSET_ADDR.size_in_addressable_units)\r\n write_offset_addr = self._read_var_with_size_check(WRITE_OFFSET_ADDR.addr, WRITE_OFFSET_ADDR.size_in_addressable_units)\r\n read_offset = map(read_dm, read_offset_addr)\r\n write_offset = map(read_dm, write_offset_addr)\r\n\r\n # get the read and write limit\r\n read_limit_addr = self._read_var_with_size_check(READ_LIMIT_ADDR.addr, READ_LIMIT_ADDR.size_in_addressable_units)\r\n write_limit_addr = self._read_var_with_size_check(WRITE_LIMIT_ADDR.addr, WRITE_LIMIT_ADDR.size_in_addressable_units)\r\n read_limit = map(read_dm, read_limit_addr)\r\n write_limit = map(read_dm, write_limit_addr)\r\n\r\n # get the port size\r\n read_size = self._read_var_with_size_check(READ_BUFFER_SIZE.addr, READ_BUFFER_SIZE.size_in_addressable_units)\r\n write_size = self._read_var_with_size_check(WRITE_BUFFER_SIZE.addr, WRITE_BUFFER_SIZE.size_in_addressable_units)\r\n # calculate size mask (size-1) for non-zero sizes\r\n read_mask = map(lambda s: s - (s>0), read_size)\r\n write_mask = map(lambda s: s - (s>0), write_size)\r\n\r\n # calculate data/space in port\r\n read_data = map(lambda l,o,m: (l - o) & m, read_limit, read_offset, read_mask)\r\n write_space = map(lambda l,o,m: (l - o) & m - 1, write_limit, write_offset, write_mask)\r\n\r\n # read port configs\r\n READ_CONF_BASE = self._core.sym.constfind('$READ_PORT0_CONFIG').value\r\n WRITE_CONF_BASE = self._core.sym.constfind('$WRITE_PORT0_CONFIG').value\r\n read_conf = self._read_var_with_size_check(READ_CONF_BASE, READ_OFFSET_ADDR.size_in_addressable_units)\r\n write_conf = self._read_var_with_size_check(WRITE_CONF_BASE, WRITE_OFFSET_ADDR.size_in_addressable_units)\r\n\r\n # extract data size (in octets) from config\r\n read_data_size = map(lambda c: (c & 0x3) + 1, read_conf)\r\n write_space_size = map(lambda c: (c & 0x3) + 1, write_conf)\r\n\r\n # decode configs into strings\r\n read_conf_str = map(lambda c,s: (\"8\" if s==1 else (\"16\" if s==2 else (\"24\" if s==3 else \"??\"))) + \"-bit, \" \\\r\n + (\"Big Endian\" if (c & 0x4) else \"Little Endian\") + \", \" \\\r\n + (\"No Sign Ext\" if (c & 0x8) else \"Sign Ext\" ), \\\r\n read_conf, read_data_size)\r\n write_conf_str = map(lambda c,s: (\"8\" if s==1 else (\"16\" if s==2 else (\"24\" if s==3 else \"??\"))) + \"-bit, \" \\\r\n + (\"Big Endian\" if (c & 0x4) else \"Little Endian\") + \", \" \\\r\n + (\"Saturate\" if (c & 0x8) else \"No Saturate\"), \\\r\n write_conf, write_space_size)\r\n\r\n # print information\r\n print \"Read ports:\\n Port Status Offset Address Size(Bytes) Data Config\"\r\n for i in range(len(read_offset_addr)):\r\n if read_offset_addr[i]:\r\n print \" %2i Enabled %6i (0x%04X) %5i (0x%04X) %5i %s\" % \\\r\n (i, read_offset_addr[i], read_offset_addr[i], read_size[i], read_size[i], read_data[i]/read_data_size[i], read_conf_str[i])\r\n else:\r\n print \" %2i Disabled\" % i\r\n\r\n print \"Write ports:\\n Port Status Offset Address Size(Bytes) Space Config\"\r\n for i in range(len(write_offset_addr)):\r\n if write_offset_addr[i]:\r\n print \" %2i Enabled %6i (0x%04X) %5i (0x%04X) %5i %s\" % \\\r\n (i, write_offset_addr[i], write_offset_addr[i], write_size[i], write_size[i], write_space[i]/write_space_size[i], write_conf_str[i])\r\n else:\r\n print \" %2i Disabled\" % i", "def get_ports(cls):\n return cls._open_ports.copy()", "def serial_ports():\r\n return list(map(lambda listportinfo: listportinfo.device, list_ports.comports()))", "def definitions(self) -> Dict[str, GraphOutput]:\n # Get the right output dictionary.\n d = self._manual_outputs if len(self._manual_outputs) > 0 else self._default_outputs\n\n # Extract port definitions (Neural Types) and return an immutable dictionary,\n # so the user won't be able to modify its content by an accident!\n return frozendict({k: v.ntype for k, v in d.items()})", "def get_outputs(self):\n return [x[1] for x in self.io_mapping]", "def get_ports_list() -> List[str]:\n return [comport.device for comport in serial.tools.list_ports.comports()]", "def list_ports():\n print '\\nHere is the list of available ports on this machine:'\n # lp.comports returns a list of (port, description, hardware ID) tuples\n iterator = sorted(lp.comports())\n for port, desc, hwid in iterator:\n print port\n exit()", "def message_ports_out(self):\n return _spacegrant_swig.DeNRZI_sptr_message_ports_out(self)", "def enumerate_midi_devices(self):\n\n ports = self.midiout.get_ports()\n\n return ports", "def get_target_ports(self):\n return self.targets", "def message_ports_out(self):\n return _spacegrant_swig.NRZI_sptr_message_ports_out(self)", "def ports(self): # type: () -> t.Dict[str, t.List[t.Dict[str, str]]]\n return self.network_settings['Ports']", "def raw_interfaces(self):\n return self._ports", "def get_all_port(self, conf, dpid):\n\t\tpass", "def resolve(self):\n # Find defines for all ports\n for port_name, port in self.ports.items():\n if not isinstance(port, ElemExportPort):\n print('hha')\n for k, p in port.items():\n name, bit_range = k\n if p:\n continue\n not_find = True\n for io_list in self.all_io.values():\n if name in io_list:\n port[k] = io_list[name]\n not_find = False\n break\n if not_find:\n raise VlogSyntaxError('A port defined in port list of module is NOT defined as input/output/inout')", "def getListOfPorts(self):\n return _libsbml.CompModelPlugin_getListOfPorts(self)", "def message_ports_out(self):\n return _TestA_swig.my_qpsk_demod_cb_sptr_message_ports_out(self)", "def list_port(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/ports.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server, while listing ports.\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get port list Failed with status %s\"\n % response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Port List : %s \" % output)\n return output[\"ports\"]", "def getViewPorts(self):\n return self._viewPorts", "def _generate_expose_services(self):\n ports = []\n for p in self.image['ports']:\n if p.get('expose', True):\n\n r = \"{}/{}\".format(p['value'], p.get('protocol', 'tcp'))\n\n if 'service' in p:\n r += \":{}\".format(p['service'])\n ports.append(r)\n else:\n # attempt to supply a service name by looking up the socket number\n try:\n service = socket.getservbyport(p['value'], p.get('protocol','tcp'))\n r += \":{}\".format(service)\n ports.append(r)\n\n except OSError: # py3\n pass\n except socket.error: # py2\n pass\n\n return \",\".join(ports)", "def message_ports_out(self):\n return _TestA_swig.cleanslate_sptr_message_ports_out(self)", "def read_all_rom_ports(self):\n return self.ROM_PORT", "def display_port(self):\n ports=os.popen(\"sudo netstat -ntlp\").read().strip().splitlines()[2:]\n for port in ports:\n split=re.split('[\\s]+',port)\n self.portDic[\"Protcol\"]=split[0]\n self.portDic[\"Receive Q\"]=split[1]\n self.portDic[\"Send Q\"]=split[2]\n split_port=split[3].split(\":\")\n if split_port[1]==\"\":\n self.portDic[\"port\"]=\"No Port\" \n else:\n self.portDic[\"port\"]=split_port[1]\n self.portDic[\"Foreign Address\"]=split[4]\n self.portDic[\"State\"]=split[5]\n split_ID=split[6].split(\"/\")\n self.portDic[\"PID\"]=split_ID[0]\n self.portDic[\"Programme Name\"]=split_ID[1]\n self.portList.append(self.portDic.copy())\n return self.portList", "def all_ports(self, **kwargs) -> t.Any:\n\n return tools.all_ports(**kwargs)", "def ports(self):\n return self.attrs.get('NetworkSettings', {}).get('Ports', {})", "def trafficOutboundPorts(self):\n #\n # TODO: Reimplement this if possible\n #\n return client.trafficOutboundPorts(self)", "def serial_ports(self):\n if os.name == 'nt':\n # windows\n for i in range(256):\n try:\n s = serial.Serial(i)\n s.close()\n yield 'COM' + str(i + 1)\n except serial.SerialException:\n pass\n else:\n # unix\n for port in list_ports.comports():\n yield port[0]", "def serial_ports(self):\n if os.name == 'nt':\n # windows\n for i in range(256):\n try:\n s = serial.Serial(i)\n s.close()\n yield 'COM' + str(i + 1)\n except serial.SerialException:\n pass\n else:\n # unix\n for port in list_ports.comports():\n yield port[0]", "def getInputPortsInfo(self):\n return [(gport.parentItem().module, gport.port, gport.controller.get_connections_to(gport.controller.current_pipeline, [gport.parentItem().module.id], gport.port.name), (gport.parentItem().boundingRect().right()-gport.parentItem().boundingRect().left())/2) for gport in self.pipelineView.getSelectedInputPorts()]", "def get_outputs(self):\n raise NotImplementedError", "def determine_ports():\n ports = [config('admin-port'), config('service-port')]\n return list(set(ports))", "def output_definitions(self) -> models.QuerySet:\n return self.output_specification.output_definitions", "def outputs(self):\n\n outputs = []\n for arg in self.arguments:\n if arg.OUT:\n outputs.append(arg)\n\n return outputs", "def _out_connections(self, g, tick):\n # outputs could be connected to many different input ports - this is not yet covered\n out_connections=[]\n output_map = {}\n # get the out connections of the given task\n for source,dest in g.get_out_connections(tick):\n if source.port not in output_map.keys():\n output_map[source.port]=[]\n output_map[source.port].append(dest)\n for source,dest in self.body_graph.get_in_connections(graph.FINAL_TICK):\n out_source=graph.Endpoint(source.tick << tick, source.port)\n portname=dest.port\n for out_dest in output_map[portname]:\n out_connections.append((out_source, out_dest))\n return out_connections", "def port_out(self) -> int:\n return self.proto.port_out", "def outputs(self):\r\n return self._outputs", "def message_ports_out(self) -> \"pmt::pmt_t\":\n return _beamforming_swig.doaesprit_sptr_message_ports_out(self)", "def message_ports_out(self) -> \"pmt::pmt_t\":\n return _beamforming_swig.beamformer_sptr_message_ports_out(self)", "def output_fields(self):\r\n if not len(self.inputs) == 1:\r\n raise ValueError(\"Can not get default list of output fields: node has more than one input\"\r\n \" or no input is provided. Subclasses should override this method\")\r\n\r\n if not self.input.fields:\r\n raise ValueError(\"Can not get default list of output fields: input pipe fields are not \"\r\n \"initialized\")\r\n\r\n return self.input.fields", "def outputs(self):\n return self.outputs", "def get_outputs(self, flatten=False):\n ret = [x[1] for x in self.io_mapping]\n if flatten: return sum(ret,[])\n else: return ret", "def _list_outputs(self):\n \n outputs = self._outputs().get()\n return outputs", "def list_available_ports():\n ports = [u\"COM%s\" % (i + 1) for i in range(16)]\n results = []\n\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n results.append(port)\n print(u\"Find {0} device.\".format(port))\n except (OSError, serial.SerialException):\n pass\n\n return results", "def get_output_descriptions(self):\n raise NotImplementedError", "def message_ports_out(self) -> \"pmt::pmt_t\":\n return _beamforming_swig.phasedarray_sptr_message_ports_out(self)", "def get_node_output(node: Node, out_port: int):\n consumers = []\n for dst_node in node.out_port(out_port).get_destinations():\n consumers.append(dst_node.node)\n return consumers", "def enumerate_serial_devices(self):\n\n ports = list(list_ports.comports())\n\n return ports", "def message_ports_in(self):\n return _spacegrant_swig.binary_sink_sptr_message_ports_in(self)", "def message_ports_out(self):\n return _add_vector_swig.add_vector_2_cpp_sptr_message_ports_out(self)", "def outputs(self):\n return self._outputs", "def outputs(self):\n return self._outputs", "def get_node_input_ports(node: Node):\n sources_ports = [parent.get_source() for parent in node.in_ports().values()]\n return [port for port in sources_ports if port is not None]", "def getPortList(self):\n return [(portDetail[1], \"In Use\" in str(portDetail[2]) and int(1) or int(0), portDetail[2], portDetail[0]) for portDetail in self.portLines]", "def message_ports_in(self):\n return _spacegrant_swig.udp_debug_sptr_message_ports_in(self)", "def serial_ports():\n if os.name == 'nt':\n # windows\n for i in range(256):\n try:\n s = serial.Serial(i)\n s.close()\n yield 'COM' + str(i + 1)\n except serial.SerialException:\n pass\n else:\n # unix\n for port in list_ports.comports():\n yield port[0]", "def message_ports_in(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_message_ports_in(self)", "def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerPortArgs']]]]:\n return pulumi.get(self, \"ports\")", "def OutputPort(*args, **kw):\n return Port.make_shared(OutputPortInterface(*args, **kw))", "def read_all_ram_ports(self):\n return self.RAM_PORT", "def look_for_available_ports():\n available_ports = glob.glob('/dev/ttyACM*')\n print(\"Available porst: \")\n print(available_ports)\n\n return available_ports", "def serial_ports():\n if sys.platform.startswith('win'):\n result = []\n for i in range(256):\n try:\n s = serial.Serial(i)\n s.close()\n result.append('COM' + str(i + 1))\n except serial.SerialException:\n pass\n return result\n\n elif sys.platform.startswith('linux'):\n return glob.glob('/dev/rf*')\n\n elif sys.platform.startswith('darwin'):\n return glob.glob('/dev/tty.*')", "def get_vulnerable_ports(self):\n self.__get_vulnerable_ports(modules.__path__[0])\n return self.__vulnerable_ports", "def input_ports(self):\n return {\n 'sys_uttr': NeuralType(\n axes=[AxisType(kind=AxisKind.Batch, is_list=True)], elements_type=SystemUtterance()\n ),\n 'dialog_history': NeuralType(\n axes=(AxisType(kind=AxisKind.Batch, is_list=True), AxisType(kind=AxisKind.Time, is_list=True),),\n elements_type=AgentUtterance(),\n ),\n }", "def _list_modi_ports() -> List[ListPortInfo]:\n def __is_modi_port(port):\n return (\n port.manufacturer == \"LUXROBO\"\n or port.product == \"MODI Network Module\"\n or port.description == \"MODI Network Module\"\n or (port.vid == 12254 and port.pid == 2))\n\n return [port for port in stl.comports() if __is_modi_port(port)]", "def get_outputs(self):\n return self.outputs", "def named_ports(self) -> pulumi.Output[Optional[List['outputs.RegionInstanceGroupManagerNamedPort']]]:\n return pulumi.get(self, \"named_ports\")", "def get_group_out_ports(self, dp, dstip):\n pass", "def message_ports_in(self):\n return _spacegrant_swig.hdlc_deframer_sptr_message_ports_in(self)", "def serialize(self) -> Dict[str, Any]:\n serialized_outputs = {\"mappings\": []}\n\n # Get the right output dictionary.\n if len(self._manual_outputs) > 0:\n serialized_outputs[\"type\"] = \"manual\"\n d = self._manual_outputs\n else:\n serialized_outputs[\"type\"] = \"default\"\n d = self._default_outputs\n\n # Iterate through \"bindings\" (GraphOutputs).\n for key, binding in d.items():\n # Serialize: step.module.port -> output | ntype.\n smp = binding.producer_step_module_port\n source = str(smp.step_number) + \".\" + smp.module_name + \".\" + smp.port_name\n # Get type.\n ntype_str = str(binding.ntype)\n # Serialize!\n serialized_outputs[\"mappings\"].append(source + \"->\" + key + \" | \" + ntype_str)\n # Return the result.\n return serialized_outputs", "def output_node(self, port: int):\n return self._output_nodes_map[port]" ]
[ "0.7231068", "0.7108193", "0.7017519", "0.698227", "0.68667525", "0.6826521", "0.6819943", "0.67742664", "0.6712915", "0.6690715", "0.66691464", "0.6668941", "0.66622776", "0.66509235", "0.6629479", "0.66278684", "0.6618243", "0.6612363", "0.6601604", "0.6582517", "0.65754664", "0.6561065", "0.6556166", "0.6540711", "0.65385544", "0.6520195", "0.6475474", "0.64606667", "0.6451759", "0.6441488", "0.6429259", "0.642597", "0.6422558", "0.6411996", "0.64073044", "0.6400737", "0.6397496", "0.63965285", "0.6385203", "0.6383235", "0.637431", "0.635967", "0.63261485", "0.6312596", "0.62848747", "0.62762415", "0.6259129", "0.6226155", "0.61605966", "0.6153221", "0.6141573", "0.6107296", "0.6104552", "0.607051", "0.6063836", "0.60486734", "0.603739", "0.6036817", "0.6036817", "0.60050356", "0.5922777", "0.59012824", "0.5895449", "0.588001", "0.5856447", "0.5852306", "0.584482", "0.58402455", "0.5838579", "0.5836416", "0.5805866", "0.57874835", "0.5784171", "0.5782383", "0.57756495", "0.5769555", "0.57495975", "0.5747719", "0.5743746", "0.573979", "0.57395357", "0.57395357", "0.57341546", "0.57294184", "0.57118714", "0.57068217", "0.57034284", "0.5690668", "0.56885165", "0.56868565", "0.5681813", "0.56746185", "0.56738317", "0.5659519", "0.56551486", "0.5654497", "0.5650887", "0.56499815", "0.56418353", "0.5637193", "0.56338584" ]
0.0
-1
Loads a data file into a list of `InputBatch`s.
def convert_examples_to_features( self, examples: List[str], label_list: List[int], max_seq_length: int, tokenizer: TokenizerSpec, output_mode: str, bos_token: str = None, eos_token: str = '[SEP]', pad_token: str = '[PAD]', cls_token: str = '[CLS]', sep_token_extra: str = None, cls_token_at_end: bool = False, cls_token_segment_id: int = 0, pad_token_segment_id: int = 0, pad_on_left: bool = False, mask_padding_with_zero: bool = True, sequence_a_segment_id: int = 0, sequence_b_segment_id: int = 1, ): label_map = {label: i for i, label in enumerate(label_list)} features = [] for ex_index, example in enumerate(examples): if example.label == "-": # skip examples without a consensus label (e.g. in SNLI data set) continue if ex_index % 10000 == 0: logging.info("Writing example %d of %d" % (ex_index, len(examples))) tokens_a = tokenizer.text_to_tokens(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.text_to_tokens(example.text_b) special_tokens_count = 2 if eos_token else 0 special_tokens_count += 1 if sep_token_extra else 0 special_tokens_count += 2 if bos_token else 0 special_tokens_count += 1 if cls_token else 0 self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count) else: special_tokens_count = 1 if eos_token else 0 special_tokens_count += 1 if sep_token_extra else 0 special_tokens_count += 1 if bos_token else 0 if len(tokens_a) > max_seq_length - special_tokens_count: tokens_a = tokens_a[: max_seq_length - special_tokens_count] # Add special tokens to sequence_a tokens = tokens_a if bos_token: tokens = [bos_token] + tokens if eos_token: tokens += [eos_token] segment_ids = [sequence_a_segment_id] * len(tokens) # Add sequence separator between sequences if tokens_b and sep_token_extra: tokens += [sep_token_extra] segment_ids += [sequence_a_segment_id] # Add special tokens to sequence_b if tokens_b: if bos_token: tokens += [bos_token] segment_ids += [sequence_b_segment_id] tokens += tokens_b segment_ids += [sequence_b_segment_id] * (len(tokens_b)) if eos_token: tokens += [eos_token] segment_ids += [sequence_b_segment_id] # Add classification token - for BERT models if cls_token: if cls_token_at_end: tokens += [cls_token] segment_ids += [cls_token_segment_id] else: tokens = [cls_token] + tokens segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) pad_token_id = tokenizer.tokens_to_ids([pad_token])[0] if pad_on_left: input_ids = ([pad_token_id] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids else: input_ids = input_ids + ([pad_token_id] * padding_length) input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) if len(input_ids) != max_seq_length: raise ValueError("input_ids must be of length max_seq_length") if len(input_mask) != max_seq_length: raise ValueError("input_mask must be of length max_seq_length") if len(segment_ids) != max_seq_length: raise ValueError("segment_ids must be of length max_seq_length") if output_mode == "classification": label_id = label_map[example.label] elif output_mode == "regression": label_id = np.float32(example.label) else: raise KeyError(output_mode) if ex_index < 5: logging.info("*** Example ***") logging.info("guid: %s" % (example.guid)) logging.info("tokens: %s" % " ".join(list(map(str, tokens)))) logging.info("input_ids: %s" % " ".join(list(map(str, input_ids)))) logging.info("input_mask: %s" % " ".join(list(map(str, input_mask)))) logging.info("segment_ids: %s" % " ".join(list(map(str, segment_ids)))) logging.info("label: %s (id = %d)" % (example.label, label_id)) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id) ) return features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LoadBatch(filename):", "def load_batch(fpath, label_key='labels'):\n f = open(fpath, 'rb')\n if sys.version_info < (3,):\n d = cPickle.load(f)\n else:\n d = cPickle.load(f, encoding='bytes')\n # decode utf8\n d_decoded = {}\n for k, v in d.items():\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n f.close()\n data = d['data']\n labels = d[label_key]\n\n data = data.reshape(data.shape[0], 3, 32, 32)\n return data, labels", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def _load_input() -> List[List[int]]:\n filepath = os.path.join(os.getcwd(), os.path.dirname(__file__), INPUT_FILE)\n f = open(filepath, 'r')\n data = f.read()\n f.close()\n\n raw_input = data.strip().split('\\n')\n input = [list(ri) for ri in raw_input]\n return [[int(i) for i in line] for line in input]", "def load_batch(batch_name):\n data_dict = unpickle('./datasets/cifar-10-batches-py/' + batch_name)\n X = data_dict[b'data'] / 255\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).reshape(10000, 3072).transpose(1,0)\n y = data_dict[b'labels']\n Y = make_one_hot(y)\n return X, Y, y", "def load_batch(n):\r\n print ('Loadng one batch...')\r\n batchfilename = flist[n - 1] + '.pkl'\r\n if not os.path.exists(batchfilename):\r\n set_batch_data()\r\n with open(batchfilename, 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def _load_batch_file(filename):\n # Load the pickled data-file.\n data = _unpickle(filename)\n # Get the raw images.\n raw_images = data[b'data']\n # Get the class-numbers for each image. Convert to numpy-array.\n cls = np.array(data[b'labels'])\n # Convert the images.\n images = _convert_images(raw_images)\n\n return images, cls", "def split_and_load(batch, ctx_list):\n new_batch = []\n for i, data in enumerate(batch):\n if isinstance(data, (list, tuple)):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n else:\n new_data = [data.as_in_context(ctx_list[0])]\n new_batch.append(new_data)\n return new_batch", "def loadInMemoryOneBatch(fileName,batchSize):\n\n inputFile = open(fileName)\n\n while True:\n objects = []\n allDone = False\n while True:\n line = inputFile.readline()\n if line:\n objects.append(line)\n if len(objects) == batchSize:\n break\n else:\n allDone = True\n break\n yield objects\n if allDone == True:\n break", "def split_and_load(batch, ctx_list):\n num_ctx = len(ctx_list)\n new_batch = []\n for i, data in enumerate(batch):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n new_batch.append(new_data)\n return new_batch", "def split_and_load(batch, ctx_list):\n num_ctx = len(ctx_list)\n new_batch = []\n for i, data in enumerate(batch):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n new_batch.append(new_data)\n return new_batch", "def batch(data_path):\n train, _, _ = get_datasets(\n data_path=data_path,\n nb_nodes=7,\n task_type=\"classification\",\n nb_classes=2,\n split=None,\n k_fold=None,\n seed=1234,\n )\n for batch in torch.utils.data.DataLoader(\n train, shuffle=False, batch_size=25, drop_last=False\n ):\n return batch", "def load_preprocess_training_batch(batch_id, batch_size):\n path, dataset = select_dataset(training = True)\n data = dataset_lib.get_data(batch_id, dataset=dataset, path=path)\n features = [np.array(x[1]) for x in data]\n labels = np.array([x[0] for x in data])\n\n # Return the training data in batches of size <batch_size> or less\n return batch_features_labels(features, labels, batch_size)", "def load_all_data(self):\n layers, layers_fnames = [], []\n # TODO: add multiprocessing\n for i in range(len(self.infiles)):\n fname = self.infiles[i]\n self.logger.info(\n \"#Loading file: {} {}\".format(fname, \"(text file)\" if pathlib.Path(fname).suffix == \"\" else \"\"))\n\n # load text file\n data = load_data_text(file_path=fname, sample_names=self.sn, feature_names=self.fn, drop_features=self.df,\n drop_samples=self.ds)\n layers.append(data)\n layers_fnames.append(fname)\n\n return list(zip(layers_fnames, layers))", "def _read_one_file(file_name, label_list):\n lines = tf.io.gfile.GFile(file_name, \"r\").readlines()\n examples = []\n label_id_map = {label: i for i, label in enumerate(label_list)}\n sentence_id = 0\n example = InputExample(sentence_id=0)\n for line in lines:\n line = line.strip(\"\\n\")\n if line:\n # The format is: <token>\\t<label> for train/dev set and <token> for test.\n items = line.split(\"\\t\")\n assert len(items) == 2 or len(items) == 1\n token = items[0].strip()\n\n # Assign a dummy label_id for test set\n label_id = label_id_map[items[1].strip()] if len(items) == 2 else 0\n example.add_word_and_label_id(token, label_id)\n else:\n # Empty line indicates a new sentence.\n if example.words:\n examples.append(example)\n sentence_id += 1\n example = InputExample(sentence_id=sentence_id)\n\n if example.words:\n examples.append(example)\n return examples", "def load_batch(batch, feat_list, device='cpu'):\n batch_feat_list = []\n for hop_feat_list in feat_list:\n batch_feats = [feat[batch] for feat in hop_feat_list]\n batch_feat_list.append(batch_feats)\n\n batch_feat_list = [torch.stack(feat) for feat in batch_feat_list]\n batch_feats = torch.cat(batch_feat_list, dim=0)\n # if len(batch_feats.shape) == 2:\n # batch_feats = batch_feats.unsqueeze(1)\n\n return batch_feats.to(device)", "def __init__(self, data_path):\r\n\t\tfile_names = ['data_batch_%d' % i for i in range(1,6)]\r\n\t\tfile_names.append('test_batch')\r\n\r\n\t\tX = []\r\n\t\ty = []\r\n\t\tfor file_name in file_names:\r\n\t\t\twith open(data_path + file_name) as fin:\r\n\t\t\t\tdata_dict = cPickle.load(fin)\r\n\t\t\tX.append(data_dict['data'].ravel())\r\n\t\t\ty = y + data_dict['labels']\r\n\r\n\t\tself.X = np.asarray(X).reshape(60000, 32*32*3)\r\n\t\tself.y = np.asarray(y)\r\n\r\n\t\tfin = open(data_path + 'batches.meta')\r\n\t\tself.LABEL_NAMES = cPickle.load(fin)['label_names']\r\n\t\tfin.close()", "def load_training_data(list_files):\n training_data = []\n for tr_file in list_files:\n with open(os.path.join(\"data\", tr_file)) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_data.append(row[1])\n return training_data", "def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]", "def load_training(file_name, target_val, training_data, training_targets, \n elements):\n\n file = open(file_name, \"r\")\n\n # Iterate over file until empty line recieved\n while True:\n chunk = file.readline()\n\n if(chunk == ''):\n break\n\n ret = load_chunk(chunk, elements)\n\n training_targets.append(target_val)\n\n # Convert data to frequency domain using fft()\n training_data.append([i.real for i in fft(ret)])", "def load_dataset(filepath):\n \n X = list()\n x = list()\n\n Y = list()\n y = list()\n \n for line in open(filepath):\n # blank lines separate sequences\n if len(line) <= 1:\n X.append(x)\n Y.append(y)\n\n x = list()\n y = list()\n else:\n a, b = line.strip().split('\\t')\n x.append(a)\n y.append(b)\n \n return X, Y", "def load_input(input_name):\n with open(input_name) as input_file:\n input_list = list(map(int,input_file.readline().split(\",\")))\n return input_list", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def load_preprocess_training_batch(batch_id, batch_size):\r\n filename = 'preprocess_batch_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return batch_features_labels(features, labels, batch_size)", "def load_preprocess_training_batch(batch_id, batch_size):\r\n filename = 'preprocess_batch_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n# labels = np.argmax(labels,1)\r\n# num = len(labels)\r\n# arr = np.zeros((num, 1))\r\n# for i in range(num):\r\n# arr[i][0] = labels[i]\r\n# np.reshape(features,(2500,150528))\r\n# ind = [i for i in range(len(features))]\r\n# random.shuffle(ind)\r\n# features = features[ind]\r\n# labels = labels[ind]\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return features[0:batch_size],labels[0:batch_size]", "def read_input_files(input_file: str) -> list[Food]:\n with open(input_file) as input_fobj:\n foods = [Food.from_raw(line.strip()) for line in input_fobj]\n return foods", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n\n if os.path.isdir(filenames):\n num_records = len(os.listdir(filenames))\n print(\"Loading from directory. \" + str(num_records) + \" tfRecords found.\")\n files = tf.data.Dataset.list_files(filenames + \"/\" + \"*.tfrecord\").shuffle(num_records)\n dataset = files.apply(\n tf.contrib.data.parallel_interleave(\n lambda x: tf.data.TFRecordDataset(x, num_parallel_reads=256, buffer_size=8*1024*1024),\n cycle_length=32, sloppy=True)\n )\n else:\n print(\"Loading from single tfRecord...\")\n dataset = tf.data.TFRecordDataset(filenames + \".tfrecord\").repeat()\n \n dataset = dataset.map(self.parser, num_parallel_calls=128)\n \n if self.subset == 'train':\n min_queue_examples = int(\n Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n \n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(10)\n \n iterator = dataset.make_one_shot_iterator()\n seq_batch, input_batch, map_batch, transformation_batch = iterator.get_next()\n\n return seq_batch, input_batch, map_batch, transformation_batch", "def _load_cifar_batch(fpath, label_key='labels'):\n if isinstance(fpath, (os.PathLike, str, bytes)):\n with open(fpath, 'rb') as f:\n return _load_cifar_batch(f, label_key)\n\n d = pickle.load(fpath, encoding='bytes')\n # decode utf8\n d_decoded = {}\n for k, v in d.items():\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n data = d['data']\n labels = d[label_key]\n\n data = data.reshape(data.shape[0], 3, 32, 32).transpose([0, 2, 3, 1])\n return data, labels", "def data_batch(self, batch_size, input_size, seed=None):\n listing = self.listing\n if seed:\n listing, _ = train_test_split(self.listing, random_state=seed, test_size=0.25)\n image_list = [item + '_orig.jpg' for item in listing]\n label_list = [item + '_contour.png' for item in listing]\n image_files, label_files = tf.convert_to_tensor(image_list), tf.convert_to_tensor(label_list)\n queue = tf.train.slice_input_producer([image_files, label_files],\n shuffle=True)\n img_contents = tf.read_file(queue[0])\n label_contents = tf.read_file(queue[1])\n image = tf.image.decode_jpeg(img_contents, channels=3)\n label = tf.image.decode_png(label_contents, channels=1)\n image, label = default_image_prep(image, label, input_size)\n return tf.train.batch([image, label],\n batch_size=batch_size)", "def inputs(data_dir, batch_size,num_data_files=FLAGS.num_data_files):\n\n \n filenames = [os.path.join(data_dir, 'fc6pool4mask_batch_%d' % i)\n for i in xrange(1,num_data_files+1)]\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_record(filename_queue)\n\n # Subtract off the mean and divide by the variance of the pixels??\n \n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.1\n min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *\n min_fraction_of_examples_in_queue)\n\n #print(min_queue_examples)\n print ('Filling queue with %d bottlenecked inputs before starting to train. '\n 'This will take a few minutes.' % min_queue_examples)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return _generate_bottlenecked_batch(read_input.fc6, read_input.pool,read_input.mask,\n min_queue_examples, batch_size,\n shuffle=True)", "def load_dataset(path, test_or_train):\n senta_batch, sentb_batch, scores_batch = [], [], []\n with open(path, encoding='utf-8') as f:\n for i, line in enumerate(f):\n items = line.strip().split('\\t')\n if test_or_train == 'train':\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n elif test_or_train in ['dev', 'test']:\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n else:\n raise Exception(\"{} error\".format(test_or_train))\n senta_batch.append(senta)\n sentb_batch.append(sentb)\n scores_batch.append(score)\n return senta_batch, sentb_batch, scores_batch", "def load_batch(self, fpath, match, in_num):\n if in_num == None:\n in_num = input('Please specify IN number: ')\n\n if match == None:\n match = input('Please specify filename string to match for batch loading (ex. \\'_s2_\\'): ')\n\n # get a list of all matching files\n glob_match = f'{fpath}/*{match}*'\n files = glob.glob(glob_match)\n\n # load & concatenate files into a single dataframe\n data = pd.concat((pd.read_csv(file, header = [0, 1], index_col = 0, parse_dates=True, low_memory=False) for file in files)).sort_index()\n\n # extract sampling frequency\n s_freq = 1/(data.index[1] - data.index[0]).total_seconds()\n\n # reset the index to continuous time\n ind_freq = str(int(1/s_freq*1000000))+'us'\n ind_start = '1900-01-01 00:00:00.000'\n ind = pd.date_range(start = ind_start, periods=len(data), freq=ind_freq)\n data.index = ind\n\n # set metadata & attributes\n self.metadata = {'file_info':{'in_num': in_num, 'files': files, 'dir': fpath,\n 'match_phrase': match},\n 'analysis_info':{'s_freq': s_freq} }\n self.data = data\n self.s_freq = s_freq", "def load_data():\n with open('../data/dataset.txt', 'r') as data_file:\n return data_file.read().split('\\n')", "def read_group_batches(filename):\n l = []\n try:\n with open(filename) as f:\n for line in f:\n elements = line.rstrip('\\n').split('\\t')\n batchname = elements[0]\n batchgroup = int(elements[1])\n # Check if there is a nested list with index of the batchgroup\n if len(l)-1 < batchgroup:\n l.append([])\n l[batchgroup].append(batchname)\n except IOError as e:\n print(e)\n return l", "def generator_input(filenames, chunk_size, batch_size=64):\n\n feature_cols = None\n while True:\n input_reader = pd.read_csv(\n tf.gfile.Open(filenames[0]),\n names=CSV_COLUMNS,\n chunksize=chunk_size,\n na_values=' ?')\n\n for input_data in input_reader:\n input_data = input_data.dropna()\n # Pop off all of the columns we want to predict and concatenate them\n labels = pd.concat([input_data.pop(x) for x in LABEL_COLUMNS], 1)\n\n input_data = to_numeric_features(input_data, feature_cols)\n\n # Retains schema for next chunk processing.\n if feature_cols is None:\n feature_cols = input_data.columns\n\n idx_len = input_data.shape[0]\n for index in range(0, idx_len, batch_size):\n yield (input_data.iloc[index:min(idx_len, index + batch_size)],\n labels.iloc[index:min(idx_len, index + batch_size)])", "def load_nli_file(data_path, num_par=2):\n tokenizer = tokenization.NltkTokenizer()\n dataset = tf.data.TextLineDataset(data_path)\n dataset = dataset.map(\n functools.partial(_nli_line_to_tensors, tokenizer=tokenizer),\n num_parallel_calls=num_par)\n dataset = dataset.filter(lambda x: tf.greater_equal(x[\"label\"], 0))\n return dataset", "def split_and_load(batch_data, num_gpus):\n return [batch_data[i].data[0] for i in range(num_gpus)], \\\n [batch_data[i].label[0].as_in_context(mx.gpu(i)) for i in range(num_gpus)]", "def load_batch(filename: str) -> Tuple[ndarray, ndarray, ndarray]:\n dataDict = unpickle(filename)\n print(\"1\", dataDict[b\"data\"][1, :])\n X = (dataDict[b\"data\"] / 255).T\n print(\"2\", X[:, 1])\n y = np.array(dataDict[b\"labels\"])\n Y = np.eye(10)[y].T\n return X, Y, y", "def input_fn(data_file, num_epochs, shuffle, batch_size):\n assert tf.gfile.Exists(data_file), (\n '%s not found. Please make sure you have either run data_download.py or '\n 'set both arguments --train_data and --test_data.' % data_file)\n\n def parse_csv(value): \n print('Parsing', data_file)\n columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)\n features = dict(zip(_CSV_COLUMNS, columns))\n labels = features.pop('labels')\n return features, tf.equal(labels, 'TRUE')\n \n\n # Extract lines from input files using the Dataset API.\n dataset = tf.data.TextLineDataset(data_file)\n\n if shuffle:\n dataset = dataset.shuffle(buffer_size=_NUM_EXAMPLES['train'])\n\n dataset = dataset.map(parse_csv, num_parallel_calls=5)\n\n # We call repeat after shuffling, rather than before, to prevent separate\n # epochs from blending together.\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n\n iterator = dataset.make_one_shot_iterator()\n features, labels = iterator.get_next()\n return features, labels", "def input_fn(data_file, batch_size):\n assert tf.gfile.Exists(data_file), (\n '%s not found. Please make sure you have set both arguments --train_data and --test_data.' % data_file)\n\n def parse(serialized_example):\n context_feature = {'label': tf.FixedLenFeature([], dtype=tf.int64)}\n sequence_features = {\n \"week_list\": tf.FixedLenSequenceFeature([], dtype=tf.string),\n 'week_weight': tf.FixedLenSequenceFeature([], dtype=tf.float32)}\n context_parsed, sequence_parsed = tf.parse_single_sequence_example(\n serialized=serialized_example,\n context_features=context_feature,\n sequence_features=sequence_features)\n labels = context_parsed['label']\n week_list = sequence_parsed['week_list']\n week_weight = sequence_parsed['week_weight']\n return labels, week_list, week_weight\n\n def form_features(*line):\n cols = ['label', 'week_list', 'week_weight']\n features = dict(zip(cols, line))\n label = features.pop('label')\n return features, label\n\n dataset = tf.data.TFRecordDataset(data_file) \\\n .map(parse) \\\n .padded_batch(batch_size, ([1], [7], [7])) \\\n .map(form_features)\n iterator = dataset.make_one_shot_iterator()\n features, labels = iterator.get_next()\n return features, labels", "def data_loader(\n self, batch_size: int = 1, iter_steps: int = 0, batch_as_list: bool = True\n ) -> DataLoader:\n data = self.data\n datasets = []\n\n for _, dat in data.items():\n datasets.append(dat.dataset())\n\n if len(datasets) < 1:\n raise FileNotFoundError(\n \"no datasets available for this model to create a loader from\"\n )\n\n return DataLoader(\n *datasets,\n batch_size=batch_size,\n iter_steps=iter_steps,\n batch_as_list=batch_as_list,\n )", "def load_input(self, file_name):\n with open(file_name, \"r\") as in_file:\n self.all_lines = [line.rstrip('\\n') for line in in_file]", "def load_all(cls, data):\n return [cls.load(obj) for obj in data]", "def input_fn(data_file, num_epochs, shuffle, batch_size):\n assert tf.gfile.Exists(data_file), (\n '%s not found. Please make sure you have either run data_download.py or '\n 'set both arguments --train_data and --test_data.' % data_file)\n\n def parse_csv(value):\n print('Parsing', data_file)\n columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)\n features = dict(zip(_CSV_COLUMNS, columns))\n labels = features.pop('income_bracket')\n return features, tf.equal(labels, '>50K')\n\n # Extract lines from input files using the Dataset API.\n dataset = tf.data.TextLineDataset(data_file)\n\n if shuffle:\n dataset = dataset.shuffle(buffer_size=_NUM_EXAMPLES['train'])\n\n dataset = dataset.map(parse_csv, num_parallel_calls=5)\n\n # We call repeat after shuffling, rather than before, to prevent separate\n # epochs from blending together.\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n\n iterator = dataset.make_one_shot_iterator()\n features, labels = iterator.get_next()\n return features, labels", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n # all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n # test_files = [all_files[idx] for x in np.random.choice(len(all_files), 200, replace=False)]\n # for filepath in test_files:\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_data(batch_size=batch_size):\n trainset = LibriSpeechDataset(training_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds))\n testset = LibriSpeechDataset(validation_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds), stochastic=False)\n\n train_loader = DataLoader(trainset, batch_size=batch_size, num_workers=1, shuffle=True, drop_last=True)\n test_loader = DataLoader(testset, batch_size=1, num_workers=1, drop_last=True)\n\n return train_loader, test_loader", "def input_fn():\n bos_id = tf.constant(BOS_ID, tf.int32)\n eos_id = tf.constant(EOS_ID, tf.int32)\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_files)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.map(lambda record: _decode_record(record, name_to_features))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n tf.concat([[bos_id], src_ids, [eos_id]], 0),\n tf.concat([tgt_ids, [eos_id]], 0),\n label))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n src_ids[:FLAGS.max_sequence_length],\n tgt_ids[:FLAGS.max_sequence_length],\n label\n ))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n tf.concat([src_ids, tgt_ids], 0),\n tf.concat([tf.zeros_like(src_ids), tf.ones_like(tgt_ids)], 0),\n label\n ))\n\n d = d.map(lambda input_ids, segment_ids, label_ids: (\n input_ids,\n segment_ids,\n tf.ones_like(input_ids),\n label_ids\n ))\n\n def batching_func(x):\n return x.padded_batch(\n batch_size,\n # The entry is the source line rows;\n # this has unknown-length vectors. The last entry is\n # the source row size; this is a scalar.\n padded_shapes=(\n tf.TensorShape([None]), # src\n tf.TensorShape([None]), # tgt\n tf.TensorShape([None]),\n tf.TensorShape([])), # src_len\n # Pad the source sequences with eos tokens.\n # (Though notice we don't generally need to do this since\n # later on we will be masking out calculations past the true sequence.\n padding_values=(\n PAD_ID, # src\n PAD_ID,\n PAD_ID,\n 0)) # src_len -- unused\n\n batched_dataset = batching_func(d)\n features = batched_dataset.map(lambda input_ids, segment_ids, input_mask, label:\n {\n \"input_ids\": input_ids,\n \"segment_ids\": segment_ids,\n \"input_mask\": input_mask,\n \"label_ids\": label\n\n })\n\n return features", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = load_pickle(f)\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000,3072)\r\n Y = np.array(Y)\r\n return X, Y", "def load_batch(dataset, batch_size=32, height=299, width=299, is_training=False):\n data_provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, common_queue_capacity=32,\n common_queue_min=8)\n image_raw, label = data_provider.get(['image', 'label'])\n\n # Preprocess image for usage by Inception.\n image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)\n\n # Preprocess the image for display purposes.\n image_raw = tf.expand_dims(image_raw, 0)\n image_raw = tf.image.resize_images(image_raw, [height, width])\n image_raw = tf.squeeze(image_raw)\n\n # Batch it up.\n images, images_raw, labels = tf.train.batch(\n [image, image_raw, label],\n batch_size=batch_size,\n num_threads=1,\n capacity=2 * batch_size)\n\n return images, images_raw, labels", "def load_training_data(file_path):\n return load_data(file_path)", "def load_batch(dataset, batch_size=32, height=299, width=299, is_training=False):\n data_provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, common_queue_capacity=32,\n common_queue_min=8)\n image_raw, label = data_provider.get(['image', 'label'])\n \n # Preprocess image for usage by Inception.\n image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)\n \n # Preprocess the image for display purposes.\n image_raw = tf.expand_dims(image_raw, 0)\n image_raw = tf.image.resize_images(image_raw, [height, width])\n image_raw = tf.squeeze(image_raw)\n\n # Batch it up.\n images, images_raw, labels = tf.train.batch(\n [image, image_raw, label],\n batch_size=batch_size,\n num_threads=1,\n capacity=2 * batch_size)\n \n return images, images_raw, labels", "def load_examples(path: str) -> List['InputExample']:\n with open(path, 'rb') as fh:\n return pickle.load(fh)", "def load_examples(path: str) -> List['InputExample']:\n with open(path, 'rb') as fh:\n return pickle.load(fh)", "def load_examples(path: str) -> List['InputExample']:\n with open(path, 'rb') as fh:\n return pickle.load(fh)", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = load_pickle(f)\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000,3072)\n Y = np.array(Y)\n return X, Y", "def load_dataset(input_path):\n with open(input_path, \"r\") as f:\n smiles_list = f.read().strip().split(\"\\n\")\n return smiles_list", "def load_batch(dataset, batch_size=32, height=224, width=224, is_training=False):\n data_provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, common_queue_capacity=32,\n common_queue_min=8)\n image_raw, label = data_provider.get(['image', 'label'])\n\n # Preprocess image for usage by Inception.\n # image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)\n image = no_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)\n\n # Preprocess the image for display purposes.\n image_raw = tf.expand_dims(image_raw, 0)\n image_raw = tf.image.resize_images(image_raw, [height, width])\n image_raw = tf.squeeze(image_raw)\n\n # Batch it up.\n images, images_raw, labels = tf.train.batch(\n [image, image_raw, label],\n batch_size=batch_size,\n num_threads=1,\n capacity=2 * batch_size)\n\n return images, images_raw, labels", "def load_dataset(path):\n training_data = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_data.append(row[1])\n return training_data", "def input_fn():\n files = tf.data.Dataset.list_files(os.path.join(\n tft_working_dir, filebase + '*'))\n dataset = files.interleave(\n tf.data.TFRecordDataset, cycle_length=4, block_length=16)\n dataset = dataset.map(parser)\n\n if shuffle:\n dataset = dataset.shuffle(buffer_size)\n\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n\n dataset = dataset.prefetch(prefetch_buffer_size)\n iterator = dataset.make_one_shot_iterator()\n transformed_features, transformed_labels = iterator.get_next()\n\n return transformed_features, transformed_labels", "def input_fn(data_file, num_epochs, shuffle, batch_size):\n assert tf.gfile.Exists(data_file), (\n '%s not found. Please make sure you have either run data_download.py or '\n 'set both arguments --train_data and --test_data.' % data_file)\n\n def parse_csv(value):\n print('Parsing', data_file)\n columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)\n features = dict(zip(_CSV_COLUMNS, columns))\n\n labels = features.pop('price')\n return features, tf.equal(labels, '>1000')\n\n # Extract lines from input files using the Dataset API.\n\n dataset = tf.data.TextLineDataset(data_file)\n\n if shuffle:\n dataset = dataset.shuffle(buffer_size=_NUM_EXAMPLES['train'])\n\n dataset = dataset.map(parse_csv, num_parallel_calls=5)\n\n # We call repeat after shuffling, rather than before, to prevent separate\n # epochs from blending together.\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n return dataset", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 1.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_CIFAR10(path):\r\n sampleList = []\r\n labelList = []\r\n # load all the data, as there only five training samples name as data_batch_id\r\n for i in range(1, 6):\r\n # get full filename\r\n filename = os.path.join(path, 'data_batch_%d' % (i, ))\r\n x, y = load_CIFAR_batch(filename)\r\n\r\n sampleList.append(x)\r\n labelList.append(y)\r\n\r\n # combine elements as one array\r\n Xtr = np.concatenate(sampleList)\r\n Ytr = np.concatenate(labelList)\r\n del x, y\r\n print(\"Training data loaded, total size : %d\", len(Xtr))\r\n # load test data\r\n Xte, Yte = load_CIFAR_batch(os.path.join(path, 'test_batch'))\r\n return Xtr, Ytr, Xte, Yte", "def load_train_batch(self):\n def _parse_train_img(img_path):\n with tf.device('/cpu:0'):\n img_buffer = tf.read_file(img_path)\n image_decoded = tf.image.decode_jpeg(img_buffer)\n tgt_image, src_image_stack = \\\n self.unpack_image_sequence(\n image_decoded, self.img_height, self.img_width, self.num_source)\n return tgt_image, src_image_stack\n\n def _batch_preprocessing(stack_images, intrinsics, optional_data):\n intrinsics = tf.cast(intrinsics, tf.float32)\n image_all = tf.concat([stack_images[0], stack_images[1]], axis=3)\n\n if self.match_num == 0: # otherwise matches coords are wrong\n image_all, intrinsics = self.data_augmentation(\n image_all, intrinsics, self.img_height, self.img_width)\n tgt_image = image_all[:, :, :, :3]\n src_image_stack = image_all[:, :, :, 3:]\n intrinsics = self.get_multi_scale_intrinsics(intrinsics, self.num_scales)\n return tgt_image, src_image_stack, intrinsics, optional_data\n\n file_list = self.format_file_list(self.dataset_dir, 'train')\n self.steps_per_epoch = int(len(file_list['image_file_list'])//self.batch_size)\n\n input_image_names_ph = tf.placeholder(tf.string, shape=[None], name='input_image_names_ph')\n image_dataset = tf.data.Dataset.from_tensor_slices(\n input_image_names_ph).map(_parse_train_img)\n\n cam_intrinsics_ph = tf.placeholder(tf.float32, [None, 3, 3], name='cam_intrinsics_ph')\n intrinsics_dataset = tf.data.Dataset.from_tensor_slices(cam_intrinsics_ph)\n\n datasets = (image_dataset, intrinsics_dataset, intrinsics_dataset)\n if self.read_pose:\n poses_ph = tf.placeholder(tf.float32, [None, self.num_source+1, 6], name='poses_ph')\n pose_dataset = tf.data.Dataset.from_tensor_slices(poses_ph)\n datasets = (image_dataset, intrinsics_dataset, pose_dataset)\n if self.match_num > 0:\n matches_ph = tf.placeholder(tf.float32, [None, self.num_source, self.match_num, 4], name='matches_ph')\n match_dataset = tf.data.Dataset.from_tensor_slices(matches_ph)\n datasets = (image_dataset, intrinsics_dataset, match_dataset)\n\n all_dataset = tf.data.Dataset.zip(datasets)\n all_dataset = all_dataset.batch(self.batch_size).repeat().prefetch(self.batch_size*4)\n all_dataset = all_dataset.map(_batch_preprocessing)\n iterator = all_dataset.make_initializable_iterator()\n return iterator", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_data(path_dataset):\n data = read_txt(path_dataset)[1:]\n return preprocess_data(data)", "def get_train_batches(data_dir='/home/yunhan/batchified'):\n # todo: read in data that is preoprocessed\n # Use batch 1 - 52 as train (60%), 53 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 53\n idx = np.random.permutation(n)\n idx = idx + 1\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def load_input(file_name, elements):\n\n input_file = open(file_name)\n input_data = []\n\n while True:\n chunk = input_file.readline()\n\n if(chunk == ''):\n break\n \n ret = load_chunk(chunk, elements)\n\n # Convert data to frequency domain using fft()\n input_data.append([i.real for i in fft(ret)])\n\n return input_data", "def load_dataset(fname, nb_lines):\n import os.path\n if os.path.isfile('safe/Amazon-'+str(nb_lines)+'.p'):\n return util.load('safe/Amazon-'+str(nb_lines)+'.p')\n count = 1\n X = []\n y = []\n with open(fname) as f:\n for line in f:\n text, label = read_line(line)\n #print((label, text))\n X.append(text)\n y.append(label)\n if count >= nb_lines:\n break\n count+=1\n\n #load pretrained dictonary\n dico = util.load('safe/vocab_gensim.p')\n preprocessor = text_preprocessing.Preprocessor(dico=dico)\n X = preprocessor.preprocess(X)\n #save the loaded dataset in a pickle for speeding up next run\n util.save((X,y), 'safe/Amazon-'+str(nb_lines)+'.p')\n return (X, y)", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_datasets(filepath):\n\n data_file = open(filepath, 'r')\n data_list = data_file.readlines()\n data_file.close()\n\n return data_list", "def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_cifa_10():\n train_set_x = np.ndarray([ 50000, 3072 ])\n train_set_y = np.ndarray( [50000] )\n\n batch_size = 10000\n for i in xrange(5):\n batch = open( datapath + \"data_batch_\"+str(i+1), 'rb')\n map = cPickle.load( batch )\n batch.close()\n train_set_x[ i*batch_size : (i+1)*batch_size , : ] = np.asarray( map[ 'data' ], dtype = 'float32' )\n train_set_y[ i*batch_size : (i+1)*batch_size ] = np.asarray( map[ 'labels' ], dtype = 'float32' )\n\n test_file = open( datapath + 'test_batch', 'rb')\n map = cPickle.load( test_file )\n test_file.close()\n \n test_set_x = np.asarray( map['data'], dtype = 'float32' )\n test_set_y = np.asarray( map['labels'], dtype = 'float32' )\n \n\n return train_set_x, train_set_y, test_set_x, test_set_y", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n # datadict = p.load(f)\n datadict = pickle.load(f, encoding = 'bytes')\n X = datadict[b'data']\n Y = datadict[b'labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def batch_data(cls, train_data, train_labels, batch_size):\n for batch in range(int(np.ceil(train_data.shape[0] / batch_size))):\n start = batch_size * batch\n end = start + batch_size\n if end > train_data.shape[0]:\n yield batch, (train_data[start:train_data.shape[0]], \\\n train_labels[start:train_data.shape[0]])\n else:\n yield batch, (train_data[start:end], \\\n train_labels[start:end])", "def load_preprocess_test_batch(batch_id, batch_size):\r\n filename = 'preprocess_test_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n# labels = np.argmax(labels,1)\r\n# num = len(labels)\r\n# arr = np.zeros((num, 1))\r\n# for i in range(num):\r\n# arr[i][0] = labels[i]\r\n# ind = [i for i in range(len(features))]\r\n# random.shuffle(ind)\r\n# features = features[ind]\r\n# labels = labels[ind]\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return features[1200:batch_size],labels[1200:batch_size]\r\n #return batch_features_labels(features, labels, batch_size)\r", "def parse_file(self, file_path) -> list:\n data = []\n with open(file_path, 'rb') as f:\n lines = pickle.load(f)\n for line in lines:\n input, output = line\n if input.strip() == \"\" or output.strip() == \"\":\n continue\n input_len = len(input.split())\n output_len = len(output.split())\n if input_len > 50 or output_len > 50:\n continue\n data_item = Text2TextDataItem(input_text=input, output_text=output, tokenizer=self.tokenizer,\n share_vocab=self.share_vocab)\n data.append(data_item)\n return data", "def load_input(filepath: str) -> list:\n lines = []\n with open(filepath, \"r\", encoding=\"utf-8\") as file:\n for line in file.readlines():\n lines.append(line.strip())\n return lines", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb')as f:\r\n datadict = p.load(f)\r\n \r\n X = datadict['data']\r\n Y = datadict['labels']\r\n \r\n print X.shape\r\n X = X.reshape(X.shape[0], SHAPE[0], SHAPE[1], SHAPE[2])\r\n Y = np.array(Y)\r\n return X, Y", "def _load_split_data(self, dataset_path):\n for i, prefix in enumerate(['train', 'dev', 'test']):\n filename = os.path.join(dataset_path, '{}.txt'.format(prefix))\n knowledge, src, tgt = self._load_multi_data(filename)\n self.group_text_data[0].append(knowledge)\n self.group_text_data[1].append(src)\n self.group_text_data[2].append(tgt)", "def batch_iter(input_data,batch_size):\r\n batch_ids,batch_mask,batch_segment,batch_label=[],[],[],[]\r\n for features in input_data:\r\n if len(batch_ids) == batch_size:\r\n yield batch_ids,batch_mask,batch_segment,batch_label\r\n batch_ids, batch_mask, batch_segment, batch_label = [], [], [], []\r\n\r\n batch_ids.append(features['input_ids'])\r\n batch_mask.append(features['input_mask'])\r\n batch_segment.append(features['segment_ids'])\r\n batch_label.append(features['label_ids'])\r\n\r\n if len(batch_ids) != 0:\r\n yield batch_ids, batch_mask, batch_segment, batch_label", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n if(FLAGS.checkpoint_file_name==\"vgg_16.ckpt\")or(FLAGS.checkpoint_file_name==\"vgg_19.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_50.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_101.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_152.ckpt\"):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float)\n images[idx, :, :, :] = image\n else:\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_data(loc='../data/SICK/'):\n trainA, trainB, testA, testB = [],[],[],[]\n trainS, testS = [],[]\n\n with open(loc + 'SICK_train.txt', 'rb') as f:\n for line in f:\n text = line.strip().split('\\t')\n trainA.append(text[1])\n trainB.append(text[2])\n trainS.append(text[3])\n with open(loc + 'SICK_test_annotated.txt', 'rb') as f:\n for line in f:\n text = line.strip().split('\\t')\n testA.append(text[1])\n testB.append(text[2])\n testS.append(text[3])\n\n trainS = [float(s) for s in trainS[1:]]\n testS = [float(s) for s in testS[1:]]\n\n return [trainA[1:], trainB[1:]], [testA[1:], testB[1:]], [trainS, testS]", "def load_dataset(filenames, batch_size, corruption_func, crop_size):\n cache = dict()\n while True:\n source_batch = np.zeros((batch_size, crop_size[0], crop_size[1], 1))\n target_batch = np.zeros((batch_size, crop_size[0], crop_size[1], 1))\n chosen_filenames = _read_images_and_put_in_dict(filenames,\n cache,\n batch_size)\n for filename_idx in range(len(chosen_filenames)):\n filename = chosen_filenames[filename_idx]\n im = cache[filename]\n patch = _get_random_patch_of_the_image(im, crop_size)\n corrupted_im = corruption_func(patch)\n source_batch[filename_idx, :, :, 0] = corrupted_im - 0.5\n target_batch[filename_idx, :, :, 0] = patch - 0.5\n yield source_batch, target_batch", "def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)", "def load_features(feature_path):\n if not os.path.exists(os.path.join(feature_path, f\"0_features.npy\")): \n raise ValueError(f\"The provided location {feature_path} does not contain any representation files\")\n\n ds_list, chunk_id = [], 0\n while os.path.exists(os.path.join(feature_path, f\"{chunk_id}_features.npy\")): \n features = ch.from_numpy(np.load(os.path.join(feature_path, f\"{chunk_id}_features.npy\"))).float()\n labels = ch.from_numpy(np.load(os.path.join(feature_path, f\"{chunk_id}_labels.npy\"))).long()\n ds_list.append(ch.utils.data.TensorDataset(features, labels))\n chunk_id += 1\n\n print(f\"==> loaded {chunk_id} files of representations...\")\n return ch.utils.data.ConcatDataset(ds_list)", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = pickle.load(f, encoding='latin1')\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\r\n Y = np.array(Y)\r\n return X, Y", "def read_files(\n self,\n file_instructions: List[Dict],\n num_examples_per_shard: List[int],\n shuffle_files: bool,\n ) -> DatasetType:\n if not file_instructions:\n raise AssertionError(f'Instruction {file_instructions} corresponds to no data')\n\n # Prepend path to filename\n files = copy.deepcopy(file_instructions)\n for f in files:\n f.update(filename=os.path.join(self._path, f['filename']))\n\n if self._read_config.experimental_interleave_sort_fn is not None:\n files = self._read_config.experimental_interleave_sort_fn(files)\n\n do_skip = any(f['skip'] > 0 for f in files)\n do_take = any(f['take'] > -1 for f in files)\n\n tensor_inputs = {\n k: list(vals) if k == 'filename' else np.array(vals, dtype=np.int64)\n for k, vals in zip_dict(*files)\n }\n\n instruction_ds = tf.data.Dataset.from_tensor_slices(tensor_inputs)\n if shuffle_files:\n instruction_ds = instruction_ds.shuffle(\n len(tensor_inputs['filename']),\n seed=self._read_config.seed,\n reshuffle_each_iteration=self._read_config.shuffle_reshuffle_each_iteration,\n )\n\n ds = instruction_ds.interleave(\n partial(self._get_dataset_from_filename, do_skip=do_skip, do_take=do_take),\n cycle_length=self._read_config.interleave_cycle_length,\n block_length=self._read_config.interleave_block_length,\n num_parallel_calls=tf.data.experimental.AUTOTUNE,\n )\n\n if (num_examples_per_shard and hasattr(tf.data.experimental, 'assert_cardinality')):\n cardinality = sum(num_examples_per_shard)\n ds = ds.apply(tf.data.experimental.assert_cardinality(cardinality))\n ds = ds.with_options(self._read_config.options)\n return ds.map(self._parser.parse_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)", "def set_batch_data():\r\n if not os.path.exists(filepath):\r\n download_data()\r\n for n in range(0,6):\r\n d = read(filepath + flist[n])\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, trts = {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n trts['x'], trts['y'] = d['data'], d['labels']\r\n trtsflag = ['train', 'train', 'train', 'train', 'train', 'test']\r\n\r\n data['flag'] = trtsflag[n]\r\n data[trtsflag[n]] = trts\r\n save_pkl(data, savename=flist[n]+'.pkl')", "def load_array(data_arrays, batch_size, is_train=True):\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n datadict = p.load(f, encoding='iso-8859-1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)", "def read_data(cls, input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n lines = []\n for line in f:\n line = line.strip()\n if line.startswith('-DOCSTART-'):\n continue\n else:\n word_labels = line.split('-seq-')\n assert len(word_labels) == 2\n\n words = word_labels[0]\n labels = word_labels[1]\n lines.append([words, labels])\n\n return lines", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f, encoding='latin1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(\"float64\")\n Y = np.array(Y)\n return X, Y", "def load_array(data_arrays, batch_size, is_train=True): #@save\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset", "def load_array(data_arrays, batch_size, is_train=True): #@save\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset", "def load_data(self, file_path):\n \n dataset = []\n \n for line in open(file_path):\n arr = line.strip().split('\\t')\n label = [w for w in arr[0].split(' ')]\n sentence = [w for w in arr[1].split(' ')]\n cname = ' '.join(label)\n \n # The line is useless if the class is\n # not in the class dictionary.\n if cname not in self.class_list:\n raise Exception(\"{} not in class list.\".format(cname))\n \n # Build the sample dictionary.\n sample = {}\n sample['sentence_w2v'] = []\n \n for word in sentence:\n if word not in self.w2v.vocab.keys():\n continue # ignore sentence\n \n # In the loading embedding (see self.load_embedding()), we\n # stack one additional layer of zeros in front to handle padding.\n # Thus here we append the embedding index plus one.\n sample['sentence_w2v'].append(torch.Tensor([self.w2v.vocab[word].index + 1]))\n\n sample['length'] = len(sample['sentence_w2v'])\n sample['label_onehot'] = self.onehot(self.class_indices[cname])\n sample['label_w2v'] = self.class_w2v[cname]\n dataset.append(sample)\n \n return dataset", "def load_raw_data(self, input_files):\n\n log.debug(f\"Loading dataset {input_files}\") \n print(f\"Loading dataset\")\n\n # Load stroke information from XML files\n for file in input_files:\n new_strokeset = strokeset.StrokeSet(file)\n self.strokesets.append(new_strokeset)\n self.stroke_matrix.append(new_strokeset.as_delta_array())\n self.stroke_ascii.append(new_strokeset.get_text())\n\n done_msg = \"Finished parsing dataset. Imported {} lines\".format(len(self.get_strokesets()))\n print (done_msg)\n log.info(done_msg)", "def generateBatches(data, batch_size):\n random.shuffle(data)\n batches = []\n size = len(data)\n def loadBatches(data, total_size, batch_size_):\n for i in range(0, total_size, batch_size_):\n yield data[i:min(total_size, i + batch_size_)]\n\n for unprocessed_batch in loadBatches(data, size, batch_size):\n processed_batch = processBatch(unprocessed_batch)\n batches.append(processed_batch)\n return batches", "def load_data_list(self):\n\n data = mat4py.loadmat(self.ann_file)['images']\n names = data['name']\n labels = data['class']\n parts = data['set']\n num = len(names)\n assert num == len(labels) == len(parts), 'get error ann file'\n\n if self.split == 'train':\n target_set = {1}\n elif self.split == 'val':\n target_set = {2}\n elif self.split == 'test':\n target_set = {3}\n else:\n target_set = {1, 2}\n\n data_list = []\n for i in range(num):\n if parts[i] in target_set:\n img_name = names[i]\n img_path = self.backend.join_path(self.img_prefix, img_name)\n gt_label = labels[i] - 1\n info = dict(img_path=img_path, gt_label=gt_label)\n data_list.append(info)\n\n return data_list" ]
[ "0.7116717", "0.6971949", "0.6867253", "0.66187465", "0.65909564", "0.6522058", "0.6507134", "0.6502703", "0.6472176", "0.64224887", "0.64197946", "0.64197946", "0.6409885", "0.6405037", "0.638099", "0.6297901", "0.62916416", "0.6273321", "0.6232164", "0.6228044", "0.62100035", "0.619388", "0.6192539", "0.61876273", "0.617474", "0.61606276", "0.61519134", "0.61499006", "0.61281717", "0.6128039", "0.61150753", "0.6111708", "0.6111542", "0.61110896", "0.61090493", "0.61071134", "0.6101637", "0.6086956", "0.60579926", "0.6032005", "0.6024992", "0.6019548", "0.6017373", "0.6011465", "0.6010065", "0.60000634", "0.5978645", "0.596911", "0.595964", "0.5958455", "0.59583694", "0.59521914", "0.595202", "0.595202", "0.595202", "0.594958", "0.59444404", "0.5941284", "0.59343547", "0.59342116", "0.59296846", "0.59216154", "0.59120494", "0.5904878", "0.59041274", "0.59016997", "0.59010535", "0.5900086", "0.58996344", "0.58840376", "0.58830106", "0.58741426", "0.58731467", "0.5872644", "0.5872463", "0.5869319", "0.5865742", "0.58558446", "0.585137", "0.58511966", "0.5842526", "0.58325833", "0.58243495", "0.5822058", "0.58119094", "0.5810837", "0.58093053", "0.5808302", "0.5805563", "0.5805004", "0.57928324", "0.57900864", "0.57870924", "0.578546", "0.57852185", "0.57847345", "0.57847345", "0.5769793", "0.5768504", "0.5762122", "0.5751444" ]
0.0
-1
Truncates a sequence pair in place to the maximum length. This will always truncate the longer sequence one token at a time. This makes more sense than truncating an equal percent of tokens from each, since if one sequence is very short then each token that's truncated likely contains more information than a longer sequence.
def _truncate_seq_pair(self, tokens_a: str, tokens_b: str, max_length: int): while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):\r\n # This is a simple heuristic which will always truncate the longer sequence\r\n # one token at a time. This makes more sense than truncating an equal percent\r\n # of tokens from each, since if one sequence is very short then each token\r\n # that's truncated likely contains more information than a longer sequence.\r\n while True:\r\n total_length = len(tokens_a) + len(tokens_b)\r\n if total_length <= max_length:\r\n break\r\n if len(tokens_a) > len(tokens_b):\r\n tokens_a.pop()\r\n else:\r\n tokens_b.pop()", "def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):\r\n # This is a simple heuristic which will always truncate the longer sequence\r\n # one token at a time. This makes more sense than truncating an equal percent\r\n # of tokens from each, since if one sequence is very short then each token\r\n # that's truncated likely contains more information than a longer sequence.\r\n while True:\r\n total_length = len(tokens_a) + len(tokens_b)\r\n if total_length <= max_length:\r\n break\r\n if len(tokens_a) > len(tokens_b):\r\n tokens_a.pop()\r\n else:\r\n tokens_b.pop()", "def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):\r\n # This is a simple heuristic which will always truncate the longer sequence\r\n # one token at a time. This makes more sense than truncating an equal percent\r\n # of tokens from each, since if one sequence is very short then each token\r\n # that's truncated likely contains more information than a longer sequence.\r\n while True:\r\n total_length = len(tokens_a) + len(tokens_b)\r\n if total_length <= max_length:\r\n break\r\n if len(tokens_a) > len(tokens_b):\r\n tokens_a.pop()\r\n else:\r\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\r\n # This is a simple heuristic which will always truncate the longer sequence\r\n # one token at a time. This makes more sense than truncating an equal percent\r\n # of tokens from each, since if one sequence is very short then each token\r\n # that's truncated likely contains more information than a longer sequence.\r\n while True:\r\n total_length = len(tokens_a) + len(tokens_b)\r\n if total_length <= max_length:\r\n break\r\n if len(tokens_a) > len(tokens_b):\r\n tokens_a.pop(0) #For dialogue context\r\n else:\r\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Need to check whether truncation really helps, coz it might remove context! :( \"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):\n\n\t\t# This is a simple heuristic which will always truncate the longer sequence\n\t\t# one token at a time. This makes more sense than truncating an equal percent\n\t\t# of tokens from each, since if one sequence is very short then each token\n\t\t# that's truncated likely contains more information than a longer sequence.\n\t\twhile True:\n\t\t\ttotal_length = len(tokens_a) + len(tokens_b)\n\t\t\tif total_length <= max_length:\n\t\t\t\tbreak\n\t\t\tif len(tokens_a) > len(tokens_b):\n\t\t\t\ttokens_a.pop()\n\t\t\telse:\n\t\t\t\ttokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n ####\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):\r\n\r\n # This is a simple heuristic which will always truncate the longer sequence\r\n # one token at a time. This makes more sense than truncating an equal percent\r\n # of tokens from each, since if one sequence is very short then each token\r\n # that's truncated likely contains more information than a longer sequence.\r\n while True:\r\n total_length = len(tokens_a) + len(tokens_b)\r\n if total_length <= max_length:\r\n break\r\n if len(tokens_a) > len(tokens_b):\r\n tokens_a.pop()\r\n else:\r\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n\t# This is a simple heuristic which will always truncate the longer sequence\n\t# one token at a time. This makes more sense than truncating an equal percent\n\t# of tokens from each, since if one sequence is very short then each token\n\t# that's truncated likely contains more information than a longer sequence.\n\twhile True:\n\t\ttotal_length = len(tokens_a) + len(tokens_b)\n\t\tif total_length <= max_length:\n\t\t\tbreak\n\t\tif len(tokens_a) > len(tokens_b):\n\t\t\ttokens_a.pop()\n\t\telse:\n\t\t\ttokens_b.pop()", "def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\r\n # This is a simple heuristic which will always truncate the longer sequence\r\n # one token at a time. This makes more sense than truncating an equal percent\r\n # of tokens from each, since if one sequence is very short then each token\r\n # that's truncated likely contains more information than a longer sequence.\r\n while True:\r\n total_length = len(tokens_a) + len(tokens_b)\r\n if total_length <= max_length:\r\n break\r\n if len(tokens_a) > len(tokens_b):\r\n tokens_a.pop()\r\n else:\r\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal\n # percent of tokens from each, since if one sequence is very short then\n # each token that's truncated likely contains more information than a\n # longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\r\n\r\n # This is a simple heuristic which will always truncate the longer sequence\r\n # one token at a time. This makes more sense than truncating an equal percent\r\n # of tokens from each, since if one sequence is very short then each token\r\n # that's truncated likely contains more information than a longer sequence.\r\n while True:\r\n total_length = len(tokens_a) + len(tokens_b)\r\n if total_length <= max_length:\r\n break\r\n if len(tokens_a) > len(tokens_b):\r\n tokens_a.pop()\r\n else:\r\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\r\n\r\n # This is a simple heuristic which will always truncate the longer sequence\r\n # one token at a time. This makes more sense than truncating an equal percent\r\n # of tokens from each, since if one sequence is very short then each token\r\n # that's truncated likely contains more information than a longer sequence.\r\n while True:\r\n total_length = len(tokens_a) + len(tokens_b)\r\n if total_length <= max_length:\r\n break\r\n if len(tokens_a) > len(tokens_b):\r\n tokens_a.pop()\r\n else:\r\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\r\n\r\n # This is a simple heuristic which will always truncate the longer sequence\r\n # one token at a time. This makes more sense than truncating an equal percent\r\n # of tokens from each, since if one sequence is very short then each token\r\n # that's truncated likely contains more information than a longer sequence.\r\n while True:\r\n total_len = len(tokens_a) + len(tokens_b)\r\n if total_len <= max_length:\r\n break\r\n if len(tokens_a) > len(tokens_b):\r\n tokens_a.pop()\r\n else:\r\n tokens_b.pop()", "def truncate_seq_pair(tokens_a, tokens_b, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair_RE(tokens_name_1, tokens_name_2, tokens_psg, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_name_1) + len(tokens_name_2) + len(tokens_psg) \n if total_length <= max_length:\n break\n tokens_psg.pop()", "def truncate_seq_pair_test(tokens_a, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a)\n if total_length <= max_length:\n break\n else:\n tokens_a = tokens_a[-max_length:].copy()\n return tokens_a", "def _truncate_seq_pair(tokens_a, tokens_b, max_length, rng):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n if rng is None:\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n else:\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n\n trunc_tokens = tokens_a if len(\n tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def truncate_seq_pair(tokens_a, tokens_b, max_length=509):\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()", "def _truncate_seq_pair_3(tokens_a, tokens_b, tokens_c, max_length):\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b) + len(tokens_c)\n if total_length <= max_length:\n break\n if len(tokens_b) > len(tokens_c):\n tokens_b.pop()\n else:\n tokens_c.pop()", "def _truncate_seq_pair_two_length(tokens_a, tokens_b, max_length_a, max_length_b):\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length_a + max_length_b:\n break\n if len(tokens_b) > max_length_b:\n tokens_b.pop()\n else: # len(tokens_a) > max_length_a\n tokens_a.pop()", "def _truncate_seq_pair(self, tokens_a, tokens_b):\n try:\n while True:\n if tokens_b:\n total_length = len(tokens_a) + len(tokens_b)\n else:\n total_length = len(tokens_a)\n\n if total_length <= 45 - 3:\n break\n if tokens_b == None:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n except:\n self.logger.error()", "def truncate_seq_pair(self,tokens_a, tokens_b, max_num_tokens, rng):\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()", "def truncate_sequences(self,\n ids,\n pair_ids=None,\n num_tokens_to_remove=0,\n truncation_strategy='longest_first',\n stride=0):\n if num_tokens_to_remove <= 0:\n return ids, pair_ids, []\n\n if truncation_strategy == 'longest_first':\n overflowing_tokens = []\n if pair_ids is None or len(ids) <= len(pair_ids):\n for _ in range(num_tokens_to_remove):\n if pair_ids is None or len(ids) >= len(pair_ids):\n overflowing_tokens = [ids[-1]] + overflowing_tokens\n ids = ids[:-1]\n else:\n pair_ids = pair_ids[:-1]\n window_len = min(len(ids), stride)\n else:\n for _ in range(num_tokens_to_remove):\n if pair_ids is None or len(ids) > len(pair_ids):\n overflowing_tokens = [ids[-1]] + overflowing_tokens\n ids = ids[:-1]\n else:\n pair_ids = pair_ids[:-1]\n window_len = min(len(ids), stride)\n if window_len > 0:\n overflowing_tokens = ids[-window_len:] + overflowing_tokens\n elif truncation_strategy == 'only_first':\n assert len(ids) > num_tokens_to_remove\n window_len = min(len(ids), stride + num_tokens_to_remove)\n overflowing_tokens = ids[-window_len:]\n ids = ids[:-num_tokens_to_remove]\n elif truncation_strategy == 'only_second':\n assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove\n window_len = min(len(pair_ids), stride + num_tokens_to_remove)\n overflowing_tokens = pair_ids[-window_len:]\n pair_ids = pair_ids[:-num_tokens_to_remove]\n elif truncation_strategy == 'do_not_truncate':\n raise ValueError(\n \"Input sequence are too long for max_length. Please select a truncation strategy.\"\n )\n else:\n raise ValueError(\n \"Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']\"\n )\n return (ids, pair_ids, overflowing_tokens)", "def _truncate_tokens(tokens_a, tokens_b, max_length):\n while len(tokens_a) + len(tokens_b) > max_length:\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop(0)\n else:\n tokens_b.pop()\n return tokens_a, tokens_b", "def truncate(data, sequence_length=3000):\n res = []\n for sample in data:\n if len(sample) > sequence_length:\n sample = sample[:sequence_length]\n res.append(sample)\n else:\n str_added = [PAD_STR] * (sequence_length - len(sample))\n sample += str_added\n res.append(sample)\n return res", "def _truncate(self):\n dif = len(self) - self._maxLen\n if dif > 0:\n #return\n self[:dif] = []", "def truncate_seqs(self, seqinfo, kvinfo, cyst_positions, debug=False):\n\n lengths = self.get_truncation_parameters(seqinfo, cyst_positions, debug)\n\n # truncate all the sequences to these lengths\n for query, seq in seqinfo.items():\n cpos = cyst_positions[query]\n istart = cpos - lengths['min']['left']\n istop = cpos + lengths['min']['right']\n chopleft = istart\n chopright = len(seq) - istop\n if debug:\n print ' chop %d %d %s' % (chopleft, chopright, query)\n print ' %d --> %d (%d-%d --> %d-%d) %s' % (len(seq), len(seq[istart : istop]),\n -1 if kvinfo is None else kvinfo[query]['min'], -1 if kvinfo is None else kvinfo[query]['max'],\n -1 if kvinfo is None else (kvinfo[query]['min'] - chopleft), -1 if kvinfo is None else (kvinfo[query]['max'] - chopleft),\n query)\n seqinfo[query] = seq[istart : istop]\n if kvinfo is not None:\n kvinfo[query]['min'] -= chopleft\n kvinfo[query]['max'] -= chopleft", "def truncate_sequences(\n self,\n ids: List[int],\n token_boxes: List[List[int]],\n pair_ids: Optional[List[int]] = None,\n pair_token_boxes: Optional[List[List[int]]] = None,\n labels: Optional[List[int]] = None,\n num_tokens_to_remove: int = 0,\n truncation_strategy: Union[str, TruncationStrategy] = \"longest_first\",\n stride: int = 0,\n ) -> Tuple[List[int], List[int], List[int]]:\n if num_tokens_to_remove <= 0:\n return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []\n\n if not isinstance(truncation_strategy, TruncationStrategy):\n truncation_strategy = TruncationStrategy(truncation_strategy)\n\n overflowing_tokens = []\n overflowing_token_boxes = []\n overflowing_labels = []\n if truncation_strategy == TruncationStrategy.LONGEST_FIRST:\n for _ in range(num_tokens_to_remove):\n if pair_ids is None or len(ids) > len(pair_ids):\n if not overflowing_tokens:\n window_len = min(len(ids), stride + 1)\n else:\n window_len = 1\n overflowing_tokens.extend(ids[-window_len:])\n overflowing_token_boxes.extend(token_boxes[-window_len:])\n overflowing_labels.extend(labels[-window_len:])\n ids = ids[:-1]\n token_boxes = token_boxes[:-1]\n labels = labels[:-1]\n else:\n if not overflowing_tokens:\n window_len = min(len(pair_ids), stride + 1)\n else:\n window_len = 1\n overflowing_tokens.extend(pair_ids[-window_len:])\n overflowing_token_boxes.extend(pair_token_boxes[-window_len:])\n pair_ids = pair_ids[:-1]\n pair_token_boxes = pair_token_boxes[:-1]\n elif truncation_strategy == TruncationStrategy.ONLY_FIRST:\n if len(ids) > num_tokens_to_remove:\n window_len = min(len(ids), stride + num_tokens_to_remove)\n overflowing_tokens = ids[-window_len:]\n overflowing_token_boxes = token_boxes[-window_len:]\n overflowing_labels = labels[-window_len:]\n ids = ids[:-num_tokens_to_remove]\n token_boxes = token_boxes[:-num_tokens_to_remove]\n labels = labels[:-num_tokens_to_remove]\n else:\n logger.error(\n f\"We need to remove {num_tokens_to_remove} to truncate the input \"\n f\"but the first sequence has a length {len(ids)}. \"\n f\"Please select another truncation strategy than {truncation_strategy}, \"\n \"for instance 'longest_first' or 'only_second'.\"\n )\n elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:\n if len(pair_ids) > num_tokens_to_remove:\n window_len = min(len(pair_ids), stride + num_tokens_to_remove)\n overflowing_tokens = pair_ids[-window_len:]\n overflowing_token_boxes = pair_token_boxes[-window_len:]\n pair_ids = pair_ids[:-num_tokens_to_remove]\n pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]\n else:\n logger.error(\n f\"We need to remove {num_tokens_to_remove} to truncate the input \"\n f\"but the second sequence has a length {len(pair_ids)}. \"\n f\"Please select another truncation strategy than {truncation_strategy}, \"\n \"for instance 'longest_first' or 'only_first'.\"\n )\n\n return (\n ids,\n token_boxes,\n pair_ids,\n pair_token_boxes,\n labels,\n overflowing_tokens,\n overflowing_token_boxes,\n overflowing_labels,\n )", "def range_truncate(s, max_len=8) :\n if len(s) > max_len :\n return s[0:2] + \"...\" + s[-2:]\n return s", "def trunc(fname, maxlen):\n front, back = fname.rsplit('.', 1)\n front_pieces = front.split(' ')\n new_max = maxlen - len(back) - 1\n\n while len(front) >= new_max:\n front_pieces = front_pieces[:-1]\n if front_pieces[-1] == '-':\n front_pieces = front_pieces[:-1]\n front = ' '.join(front_pieces)\n\n new_fname = front + '.' + back\n return new_fname.replace(',.' + back, '.' + back) # remove trailing commas", "def _truncate_string(cls, s1, s2, max_len):\n s1_len = len(s1.split(' '))\n s2_len = len(s2.split(' '))\n MIN_S2_LEN = 6\n if s1_len + s2_len <= max_len or s2_len < MIN_S2_LEN:\n return s2, False\n\n truncate_point = MIN_S2_LEN if s1_len > max_len else max_len - s1_len\n\n return ' '.join(s2.split(' ')[0:truncate_point]), True", "def _smart_truncate(self, text, length, suffix='...'):\n\n slen = len(suffix)\n pattern = r'^(.{0,%d}\\S)\\s+\\S+' % (length-slen-1)\n if len(text) > length:\n match = re.match(pattern, text)\n if match:\n length0 = match.end(0)\n length1 = match.end(1)\n if abs(length0+slen-length) < abs(length1+slen-length):\n return match.group(0) + suffix\n else:\n return match.group(1) + suffix\n return text", "def _smart_truncate(self, text, length, suffix='...'):\n\n slen = len(suffix)\n pattern = r'^(.{0,%d}\\S)\\s+\\S+' % (length-slen-1)\n if len(text) > length:\n match = re.match(pattern, text)\n if match:\n length0 = match.end(0)\n length1 = match.end(1)\n if abs(length0+slen-length) < abs(length1+slen-length):\n return match.group(0) + suffix\n else:\n return match.group(1) + suffix\n return text", "def auto_truncate(val):\n return val[:7]", "def smart_truncate(text, length=100, suffix='...'):\n\n slen = len(suffix)\n pattern = r'^(.{0,%d}\\S)\\s+\\S+' % (length - slen - 1)\n if len(text) > length:\n match = re.match(pattern, text)\n if match:\n length0 = match.end(0)\n length1 = match.end(1)\n if abs(length0 + slen - length) < abs(length1 + slen - length):\n return match.group(0) + suffix\n else:\n return match.group(1) + suffix\n return text", "def truncate(self, parts_a: List[Tuple[List[int], bool]], parts_b: List[Tuple[List[int], bool]], answer: List[int],\n max_length: int):\n total_len = self._seq_length(parts_a) + self._seq_length(parts_b)\n if answer:\n total_len += len(answer)\n total_len += num_special_tokens_to_add(\n parts_a, parts_b, answer, add_cls=True, add_sep=False, add_piece=True)\n num_tokens_to_remove = total_len - max_length\n\n if num_tokens_to_remove <= 0:\n return False\n\n for _ in range(num_tokens_to_remove):\n if self._seq_length(parts_a, only_shortenable=True) > self._seq_length(parts_b, only_shortenable=True):\n self._remove_last(parts_a)\n else:\n self._remove_last(parts_b)\n return True", "def truncate(string):", "def truncate(value, size=72):\n value = value.replace(\"\\n\", \" \").replace(\"\\r\", \"\").replace(\"\\t\", \" \")\n value = value.strip()\n value = WHITESPACES.sub(\" \", value)\n\n if len(value) > size:\n value = \"{0}...\".format(value[:size-3])\n\n return value", "def pad_to_max_length(self, sequence):\n sequence = sequence[:self.max_seq_length]\n n = len(sequence)\n #return sequence + ['[PAD]'] * (self.max_seq_length - n)\n return sequence + [0] *(self.max_seq_length - n)", "def truncate(text_str: str, max_length: int = 256) -> str:\n truncated_str = text_str[:max_length]\n if len(truncated_str) == max_length:\n i = max(0, (max_length - 3) // 2)\n j = max(0, max_length - 3 - i)\n truncated_str = text_str[:i] + text_str[len(text_str)-j:]\n truncated_str = truncated_str[:i] + '...' + truncated_str[len(truncated_str)-j:]\n return truncated_str", "async def _truncate_adjusted_tick_data(self, pair: str):\n\n truncate = len(self.close_times[pair]) - self.min_tick_length\n if truncate > 60:\n del self.base_24hr_volumes[pair][1][:truncate]\n del self.adjusted_close_values[pair][:truncate]", "def truncate_middle(path: str, acceptable_len: int):\n if len(path) <= acceptable_len:\n return path\n # half of the size, minus the 3 .'s\n n_2 = int(acceptable_len / 2 - 3)\n # whatever's left\n n_1 = int(acceptable_len - n_2 - 3)\n return f\"{path[:n_1]}...{path[-n_2:]}\"", "def pad_or_trim(seq, max_len=1000):\n n, m = seq.shape\n \n if n > max_len:\n seq = seq[-max_len:, :]\n elif n < max_len:\n if sparse.issparse(seq):\n pad_csr(seq, (max_len, m))\n else:\n seq = np.r_[seq, np.zeros((max_len - n, m))]\n return seq", "def truncate(text, max_length=140, pad_with_dot=True):\n if len(text) > max_length:\n if pad_with_dot:\n return text[:max_length-3] + \"...\"\n else:\n return text[:max_length]\n return text", "def trunc(s, max_pos=75): \n length = len(s)\n if length <= max_pos:\n return s\n else:\n end = s.rfind(' ',0,max_pos)\n if end > 0 and end > max_pos-5:\n return s[0:end] + '...'\n else:\n if s[max_pos-1] == '.':\n max_pos = max_pos - 1\n return s[0:max_pos] + '...'", "def trunc_string(string, length=50):\n if len(string)>length:\n return \"%s...\" % string[:length-3]\n else:\n return string", "def _truncate(text: str, max_len: int) -> str:\n if len(text) > max_len:\n return text[:max_len - 1] + \"…\"\n return text", "def truncate_reads(tmp_dir, infile, unaligned_set, n, min_len):\n\n outfile = \"{0}/truncated.fastq\".format(tmp_dir)\n with ps.FastxFile(infile, \"r\") as inf, open(outfile, \"w\") as outf:\n for entry in inf:\n if entry.name in unaligned_set or n == min_len:\n entry.sequence = entry.sequence[:n]\n entry.quality = entry.quality[:n]\n outf.write(str(entry) + \"\\n\")\n return outfile", "def truncate(cls, value, target_len=200, ellipsis='...'):\n # open tags are pushed on here, then popped when\n # the matching close tag is found\n stack = []\n # string to be returned\n retval = []\n # number of characters (not counting markup) placed in retval so far\n length = 0\n tokens = Tokenizer(value)\n tok = tokens.next_token()\n while tok != tokens.token_end:\n if not length < target_len:\n retval.append(ellipsis)\n break\n if tok.__class__.__name__ == 'OpenTag':\n stack.append(tok)\n retval.append(tok.as_string())\n elif tok.__class__.__name__ == 'CloseTag':\n if stack[-1].tag == tok.tag:\n stack.pop()\n retval.append(tok.as_string())\n else:\n raise UnbalancedError(tok.as_string())\n elif tok.__class__.__name__ == 'SelfClosingTag':\n retval.append(tok.as_string())\n else:\n retval.append(tok)\n length += 1\n tok = tokens.next_token()\n while len(stack) > 0:\n tok = CloseTag(stack.pop().tag)\n retval.append(tok.as_string())\n return ''.join(retval)", "def truncate(self, count=0, reverse=True):\n if reverse:\n return self.value[:(len(self.value) - count)]\n else:\n return self.value[count:]", "async def _truncate_tick_data(self, pair: str):\n\n truncate = len(self.close_times[pair]) - self.min_tick_length\n if truncate > 60:\n del self.base_24hr_volumes[pair][0][:truncate]\n del self.close_values[pair][:truncate]\n del self.close_times[pair][:truncate]", "def limit_size(msg, max_size, trunc_symbol=\"...\"):\n if len(msg) > max_size:\n msg = msg[:max_size - len(trunc_symbol)] + trunc_symbol\n return msg", "def truncate_text_by_num_tokens(text, max_tokens, tok_separator=\" \"):\n _toks = text.split(tok_separator)\n return tok_separator.join(_toks[:min(max_tokens, len(_toks))])", "def truncate_pad(line, num_steps, padding_token):\n if len(line) > num_steps:\n return line[:num_steps] # Truncate\n return line + [padding_token] * (num_steps - len(line)) # Pad", "def truncate_pad(line, num_steps, padding_token):\n if len(line) > num_steps:\n return line[:num_steps] # Truncate\n return line + [padding_token] * (num_steps - len(line)) # Pad", "def truncate_pad(line, num_steps, padding_token):\n if len(line) > num_steps:\n return line[:num_steps] # Truncate\n return line + [padding_token] * (num_steps - len(line)) # Pad", "def truncate(x: str, limit: int) -> str:\n return \" \".join(x.split()[:limit])", "def shrink_seq(mrnaseq, mrna_frag, mrna_frag_target, total_length=50):\n # Prepare sequences with no gaps\n mrnaseq_nogap = mrnaseq.replace(\"-\", \"\")\n mrna_frag_nogap = mrna_frag.replace(\"-\", \"\")\n if len(mrna_frag_nogap) < total_length:\n syserr(mrna_frag_nogap)\n syserr(mrnaseq)\n syserr(mrna_frag)\n syserr(mrna_frag_target)\n raise Exception(\n \"Check your sequences maybe you should extend, not shrink them\")\n span = re.search(mrna_frag_nogap, mrnaseq_nogap).span()\n\n # Decide which type of extension to do\n gap_pos_mean = mean(\n [i for i, x in enumerate(mrna_frag_target) if x == \"-\"])\n list_median = median([i for i in range(len(mrna_frag_target))])\n\n # this ratio gives us relative position of the gaps\n ratio = gap_pos_mean / list_median\n\n # Based on the ratio do the shrinkage of the sequence\n if ratio > 0.5 and ratio < 1.5: # extend both sides\n li = span[0]\n ui = span[1]\n length = ui - li\n if length < total_length:\n return -1\n elif length == total_length:\n return mrnaseq_nogap[li:ui]\n else:\n dif = abs(total_length - length)\n quot = dif // 2 # this is explicit integer division\n l_ext = li + quot\n u_ext = ui - (dif - quot)\n if (u_ext < 0) or (u_ext > len(mrnaseq_nogap) - 1):\n return \"NA\"\n else:\n return mrnaseq_nogap[l_ext:u_ext]\n elif ratio <= 0.5: # trim left - it means upstream (5'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = len(mrna_frag_nogap) - total_length\n return mrnaseq_nogap[li + dif:ui]\n elif ratio >= 1.5: # extend right - it means downstream (3'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = len(mrna_frag_nogap) - total_length\n return mrnaseq_nogap[li:ui - dif]", "def _truncate_string(self, text, max_size):\n import textwrap\n\n if len(text) <= max_size:\n return text\n return textwrap.wrap(text, max_size-3)[0] + \"...\"", "def trunc_inplace(a):", "def test_truncate_seqs(self):\r\n\r\n base_pos = 5\r\n\r\n fasta_seqs = {'seq1': 'GAAATCAAGAATAC',\r\n 'seq2': 'ATAAACAAGAT'}\r\n qual_scores = {'seq1': array(map(str, [20, 10, 15, 25, 24, 25, 27])),\r\n 'seq2': array(map(str, [22, 21, 15, 12, 22, 25, 27, 28]))}\r\n\r\n expected_fasta = {'seq1': 'GAAAT',\r\n 'seq2': 'ATAAA'}\r\n\r\n expected_qual = {'seq1': map(str, array([20, 10, 15, 25, 24])),\r\n 'seq2': map(str, array([22, 21, 15, 12, 22]))}\r\n\r\n actual_fasta_seqs, actual_qual_scores =\\\r\n truncate_seqs(fasta_seqs, qual_scores, base_pos)\r\n\r\n self.assertDictEqual(actual_fasta_seqs, expected_fasta)\r\n self.assertItemsEqual(expected_qual, actual_qual_scores)", "def truncate_signals(signals, length=None, samplerate=None):\n if length is None:\n return signals\n if samplerate is not None:\n length = round(samplerate * length)\n\n def truncation(signal):\n return signal[..., :length]\n\n return _apply_to_signals(truncation, signals)", "def pad_tokens(x, max_length, pad_token_id,\n truncate_from=\"left\",\n pad_from=\"left\"):\n assert truncate_from in (\"left\", \"right\")\n assert pad_from in (\"left\", \"right\")\n if len(x) > max_length:\n if truncate_from == \"left\":\n return x[-max_length:]\n else:\n return x[:max_length]\n elif len(x) < max_length:\n padding = [pad_token_id] * (max_length - len(x))\n if pad_from == \"left\":\n return padding + x\n else:\n return x + padding\n else:\n return x", "def truncate(text=\"\", max_len=50):\n return text if len(text) < max_len else text[:max_len]", "def truncatesmart(value, limit=80):\n\n try:\n limit = int(limit)\n # invalid literal for int()\n except ValueError:\n # Fail silently.\n return value\n\n # Make sure it's unicode\n value = unicode(value)\n\n # Return the string itself if length is smaller or equal to the limit\n if len(value) <= limit:\n return value\n\n # Cut the string\n value = value[:limit]\n\n return value + '...'", "def truncate(s, max_len=10) :\n MAX_LEN = max_len\n display_string = escape_for_display(s)\n if len(s) == 0 :\n return display_string # Display empty string token.\n if len(s) > MAX_LEN :\n display_string = display_string[0:MAX_LEN] + \"...\"\n return display_string", "def truncate_description(description):\n if len(description) <= 160 :\n return description\n\n cut_desc = \"\"\n character_counter = 0\n for i, letter in enumerate(description) :\n character_counter += 1\n if character_counter > 160 :\n if letter == ' ' :\n return cut_desc+\"...\"\n else :\n return cut_desc.rsplit(' ',1)[0]+\"...\"\n cut_desc += description[i]\n return cut_desc", "def pad_seq(seq, max_seq_len=0):\n if max_seq_len:\n pad_len = max_seq_len - len(seq)\n if pad_len > 0:\n return np.concatenate([seq, np.zeros(pad_len, dtype=np.int64)])\n elif pad_len < 0: # chop to fit\n two_last_tokens = seq[-2:]\n out = seq[:max_seq_len]\n out[-2:] = two_last_tokens\n return out.astype(np.int64)\n return seq.astype(np.int64)", "def fast_forward_to_length(sequences, length):\n return itertools.dropwhile(lambda seq: len(seq) != length, sequences)", "def remove_every_other(seq):\n length = len(seq)\n new_seq = seq[0:length:2]\n return new_seq", "def truncate_string(text, length):\n word_tokens = word_tokenize(text)\n truncated = word_tokens[:length]\n truncated_text = \" \".join(truncated)\n return truncated_text", "def truncate(text, length=100, suffix=\"...\"):\n if len(text) > length:\n return text[: length - len(suffix)] + suffix\n else:\n return text", "def truncate(s):\n truncate = whenFloating $ \\c e ->\n if dangerouslySmall c e\n then 0\n else fromInteger $ c `quotInteger` magnitude (-e)", "def test_truncates_to_correct_size(self):\n truncated = truncateFeedback(self.simpleTestString)\n self.assertTrue(len(truncated) < 16000)\n truncated = truncateFeedback(self.complicatedString)\n self.assertTrue(len(truncated) < 16000)" ]
[ "0.8350219", "0.8350219", "0.8350219", "0.8334177", "0.83308214", "0.83209616", "0.8302651", "0.83008575", "0.8299758", "0.8299758", "0.8289869", "0.82892597", "0.8288885", "0.8280558", "0.82622397", "0.82547146", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.8251895", "0.82393414", "0.8221394", "0.8221394", "0.8219125", "0.81621796", "0.8049963", "0.7977625", "0.78859013", "0.77645457", "0.77645457", "0.76782703", "0.7595379", "0.7472622", "0.7395533", "0.7118285", "0.6879421", "0.65802157", "0.6526709", "0.6285459", "0.62153745", "0.6122734", "0.6067807", "0.6006387", "0.59978825", "0.5971672", "0.5971672", "0.5859187", "0.58416826", "0.5841678", "0.5835902", "0.5750511", "0.572871", "0.5707871", "0.5701778", "0.5690294", "0.56891525", "0.56858164", "0.56840295", "0.56005436", "0.5579431", "0.55693763", "0.556896", "0.55380267", "0.5531859", "0.55277276", "0.5526574", "0.5512613", "0.5512613", "0.5512613", "0.5498131", "0.54882157", "0.54801923", "0.5478129", "0.5463013", "0.54561543", "0.5445229", "0.5441065", "0.54367125", "0.54260373", "0.5415514", "0.5379853", "0.5368602", "0.5351211", "0.5350351", "0.5349813", "0.5325778", "0.5324221" ]
0.78222877
41
Converts examples into TexttoText batches to be used with a model like T5. Inputs are prefixed with a text prompt that indicates the task to perform.
def convert_examples_to_features(self): features = [] for ex_index, example in enumerate(self.examples): if ex_index % 10000 == 0: logging.info(f"Writing example {ex_index} of {len(self.examples)}") text_to_text_query = self.processor.get_t5_prompted_query(example.text_a, example.text_b) enc_query = self.tokenizer.text_to_ids(text_to_text_query) if len(enc_query) > self.max_seq_length: enc_query = enc_query[: self.max_seq_length] dec_query = ( [self.tokenizer.bos_id] + self.tokenizer.text_to_ids(self.processor.label2string(example.label)) + [self.tokenizer.eos_id] ) dec_input = dec_query[:-1] labels = dec_query[1:] features.append([enc_query, dec_input, labels]) return features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_tokenize_fn(examples):\n sources = examples[config.source_lang]\n targets = examples[config.target_lang]\n model_inputs = config.tokenizer(sources, max_length=config.max_source_length, truncation=True)\n\n # setup the tokenizer for targets,\n # huggingface expects the target tokenized ids to be stored in the labels field\n with config.tokenizer.as_target_tokenizer():\n labels = config.tokenizer(targets, max_length=config.max_target_length, truncation=True)\n\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs", "def create_InputExamples(self, data, labels):\n examples = []\n for (i, u_tips) in enumerate(data):\n for text in u_tips:\n examples.append(\n run_classifier.InputExample(\n guid=None,\n text_a=text,\n text_b=None,\n label=np.argmax(labels[i])\n )\n )\n return examples", "def create_examples(topics, sentences):\n input_examples = []\n \n for i in range(len(sentences)):\n input_examples.append(InputExample(text_a=topics[i], text_b=sentences[i], label='NoArgument'))\n return input_examples", "def read_examples_string(input_text):#(input_file, input_text):\n examples = []\n unique_id = 0\n \n with io.StringIO(input_text) as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n\n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n\n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n\n examples.append(InputExample(unique_id=unique_id,\n text_a=text_a, \n text_b=text_b))\n unique_id += 1\n return examples", "def test_text_task(self):\n args = BASE_ARGS.copy()\n args.update(TEXT_ARGS)\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 1.5, 'failed to train image_seq2seq on text task'\n )", "def batchify(self, observations):\n # valid examples\n exs = [ex for ex in observations if 'text' in ex]\n # the indices of the valid (non-empty) tensors\n valid_inds = [i for i, ex in enumerate(observations) if 'text' in ex]\n\n # set up the input tensors\n batchsize = len(exs)\n if batchsize == 0:\n return None, None, None\n # tokenize the text\n parsed_x = [deque(maxlen=self.truncate) for _ in exs]\n for dq, ex in zip(parsed_x, exs):\n dq += self.parse(ex['text'])\n # parsed = [self.parse(ex['text']) for ex in exs]\n max_x_len = max((len(x) for x in parsed_x))\n for x in parsed_x:\n # left pad with zeros\n x.extendleft([self.fairseq_dict.pad()] * (max_x_len - len(x)))\n xs = torch.LongTensor(parsed_x)\n\n # set up the target tensors\n ys = None\n if 'labels' in exs[0]:\n # randomly select one of the labels to update on, if multiple\n labels = [random.choice(ex.get('labels', [''])) for ex in exs]\n parsed_y = [deque(maxlen=self.truncate) for _ in labels]\n for dq, y in zip(parsed_y, labels):\n dq.extendleft(reversed(self.parse(y)))\n for y in parsed_y:\n y.append(self.fairseq_dict.eos())\n # append EOS to each label\n max_y_len = max(len(y) for y in parsed_y)\n for y in parsed_y:\n y += [self.fairseq_dict.pad()] * (max_y_len - len(y))\n ys = torch.LongTensor(parsed_y)\n return xs, ys, valid_inds", "def args_batch_to_text(args_batch: ArgsBatch) -> Text:\n lines = []\n for args in args_batch:\n lines.append('; '.join(str(a) for a in args))\n return '\\n'.join(lines)", "def _create_examples(self, lines, kb_data, set_type):\n examples = []\n for idx, line in enumerate(lines):\n item = json.loads(line.strip())\n question_id = \"%s-%s\" % (set_type, idx)\n \n context_a_list = kb_data[idx]['answerA']\n context_b_list = kb_data[idx]['answerB']\n context_c_list = kb_data[idx]['answerC']\n\n context_a = \"\"\n for l in context_a_list[:1]:\n context_a += l.replace(\"\\n\",\". \")\n context_a = context_a[:-1]\n\n context_b = \"\"\n for l in context_b_list[:1]:\n context_b += l.replace(\"\\n\",\". \")\n context_b = context_b[:-1]\n\n context_c = \"\"\n for l in context_c_list[:1]:\n context_c += l.replace(\"\\n\",\". \")\n context_c = context_c[:-1]\n \n \n question = item[\"context\"] + item[\"question\"]\n endings = [item[\"answerA\"],item[\"answerB\"],item[\"answerC\"] ]\n label = item[\"correct\"]\n #race_id = \"%s-%s\" % (set_type, data_raw[\"race_id\"])\n #article = data_raw[\"article\"]\n #for i in range(len(data_raw[\"answers\"])):\n #truth = str(ord(data_raw[\"answers\"][i]) - ord(\"A\"))\n #question = data_raw[\"questions\"][i]\n #options = data_raw[\"options\"][i]\n\n examples.append(\n InputExample(\n example_id=question_id,\n question=question,\n contexts=[context_a,context_b,context_c],\n endings=[endings[0], endings[1], endings[2]],#, options[3]\n label=label,\n )\n )\n return examples", "def _create_examples(self, lines):\n examples = []\n for (i, line) in enumerate(lines):\n logger.info(line)\n guid = int(line[0])\n label = int(line[1])\n text = \" \".join(clean_tokens(line[3].split()))\n if guid < 1000:\n args_char_offset = find_char_offsets(text, line[2].split(\"-\"))\n else:\n args_char_offset = [int(i) for i in line[2].split('-')]\n examples.append(\n InputExample(guid=guid, text=text, args_char_offset=args_char_offset, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = line[0]\n text_a = line[1] + \" . \" + line[2]\n text_b = line[-1]\n label = 0\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def convert_examples_to_features(self):\n features = []\n max_label_len = 0\n # find ou the max label length\n labels_list = []\n for ex_index, example in enumerate(self.examples):\n processor = example.processor\n label_ids = self.tokenizer.text_to_ids(processor.label2string(example.label)) + [self.tokenizer.eos_id]\n max_label_len = max(len(label_ids), max_label_len)\n labels_list.append(label_ids)\n if self.max_seq_length_decoder is None:\n self.max_seq_length_decoder = max_label_len\n else:\n self.max_seq_length_decoder = max(\n self.max_seq_length_decoder, max_label_len\n ) # take the max of the two to be conservative\n for ex_index, example in enumerate(self.examples):\n taskname = example.taskname\n taskname_ids = self.tokenizer.text_to_ids(taskname)\n processor = example.processor\n if ex_index % 10000 == 0:\n logging.info(f\"Writing example {ex_index} of {len(self.examples)}\")\n label_ids = labels_list[ex_index]\n enc_query = processor.get_ptune_query(\n example.content,\n self.pseudo_token_id,\n self.max_seq_length - self.max_seq_length_decoder + 1,\n self.templates,\n self.tokenizer,\n )\n input_ids = enc_query + label_ids[:-1]\n labels = [SMALL_NUM for i in range(len(enc_query) - 1)] + label_ids\n features.append([input_ids, labels, enc_query, taskname_ids])\n return features", "def qkgnn_convert_examples_to_features(\r\n examples,\r\n tokenizer,\r\n max_length=512,\r\n task=None,\r\n label_list=None,\r\n output_mode=None,\r\n pad_on_left=False,\r\n pad_token=0,\r\n pad_token_segment_id=0,\r\n mask_padding_with_zero=True,\r\n):\r\n\r\n if task is not None:\r\n processor = gnn_processors[task]()\r\n if label_list is None:\r\n label_list = processor.get_labels()\r\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\r\n if output_mode is None:\r\n output_mode = gnn_output_modes[task]\r\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\r\n label_map = {label: i for i, label in enumerate(label_list)}\r\n\r\n\r\n # TODO : optimize this part if out of memory error occurs\r\n def convert_inputs_to_processed(inputs):\r\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\r\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\r\n # tokens are attended to.\r\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\r\n\r\n # Zero-pad up to the sequence length.\r\n padding_length = max_length - len(input_ids)\r\n if pad_on_left:\r\n input_ids = ([pad_token] * padding_length) + input_ids\r\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\r\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\r\n else:\r\n input_ids = input_ids + ([pad_token] * padding_length)\r\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\r\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\r\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(len(input_ids), max_length)\r\n assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format(\r\n len(attention_mask), max_length\r\n )\r\n assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format(\r\n len(token_type_ids), max_length\r\n )\r\n\r\n return input_ids, attention_mask, token_type_ids\r\n\r\n # TODO : optimize this part if out of memory error occurs\r\n def qkgnn_convert_single_exampl_to_feature(example, ex_index=10):\r\n inputs_q = tokenizer.encode_plus(example.text_a, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_k = tokenizer.encode_plus(example.text_b, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qk = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qk1 = tokenizer.encode_plus(example.text_a, example.k1, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qk2 = tokenizer.encode_plus(example.text_a, example.k2, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qk3 = tokenizer.encode_plus(example.text_a, example.k3, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_kq1 = tokenizer.encode_plus(example.text_b, example.q1, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_kq2 = tokenizer.encode_plus(example.text_b, example.q2, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_kq3 = tokenizer.encode_plus(example.text_b, example.q3, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_kk1 = tokenizer.encode_plus(example.text_b, example.k1, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_kk2 = tokenizer.encode_plus(example.text_b, example.k2, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_kk3 = tokenizer.encode_plus(example.text_b, example.k3, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qq1 = tokenizer.encode_plus(example.text_a, example.q1, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qq2 = tokenizer.encode_plus(example.text_a, example.q2, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qq3 = tokenizer.encode_plus(example.text_a, example.q3, add_special_tokens=True, max_length=max_length, truncation=True)\r\n # generate [input_ids, input_mask, segment_ids]\r\n # for q self\r\n input_ids_q, input_mask_q, segment_ids_q = convert_inputs_to_processed(inputs_q)\r\n # for k self\r\n input_ids_k, input_mask_k, segment_ids_k = convert_inputs_to_processed(inputs_k)\r\n # for qk\r\n input_ids_qk, input_mask_qk, segment_ids_qk = convert_inputs_to_processed(inputs_qk)\r\n # for qk1\r\n input_ids_qk1, input_mask_qk1, segment_ids_qk1 = convert_inputs_to_processed(inputs_qk1)\r\n # for qk2\r\n input_ids_qk2, input_mask_qk2, segment_ids_qk2 = convert_inputs_to_processed(inputs_qk2)\r\n # for qk3\r\n input_ids_qk3, input_mask_qk3, segment_ids_qk3 = convert_inputs_to_processed(inputs_qk3)\r\n # for kq1\r\n input_ids_kq1, input_mask_kq1, segment_ids_kq1 = convert_inputs_to_processed(inputs_kq1)\r\n # for kq2\r\n input_ids_kq2, input_mask_kq2, segment_ids_kq2 = convert_inputs_to_processed(inputs_kq2)\r\n # for kq3\r\n input_ids_kq3, input_mask_kq3, segment_ids_kq3 = convert_inputs_to_processed(inputs_kq3)\r\n # for kk1\r\n input_ids_kk1, input_mask_kk1, segment_ids_kk1 = convert_inputs_to_processed(inputs_kk1)\r\n # for kk2\r\n input_ids_kk2, input_mask_kk2, segment_ids_kk2 = convert_inputs_to_processed(inputs_kk2)\r\n # for kk3\r\n input_ids_kk3, input_mask_kk3, segment_ids_kk3 = convert_inputs_to_processed(inputs_kk3)\r\n # for qq1\r\n input_ids_qq1, input_mask_qq1, segment_ids_qq1 = convert_inputs_to_processed(inputs_qq1)\r\n # for qq2\r\n input_ids_qq2, input_mask_qq2, segment_ids_qq2 = convert_inputs_to_processed(inputs_qq2)\r\n # for qq3\r\n input_ids_qq3, input_mask_qq3, segment_ids_qq3 = convert_inputs_to_processed(inputs_qq3)\r\n\r\n # generate label\r\n if output_mode == \"classification\" or output_mode == \"classification2\":\r\n label = label_map[example.label]\r\n elif output_mode == \"regression\":\r\n label = float(example.label)\r\n else:\r\n raise KeyError(output_mode)\r\n\r\n # log info\r\n if ex_index < 5:\r\n logger.info(\"*** Example ***\")\r\n logger.info(\"guid: %s\" % (example.guid))\r\n logger.info(\"text_a: %s\" % (example.text_a))\r\n logger.info(\"text_b: %s\" % (example.text_b))\r\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids_qk]))\r\n logger.info(\"attention_mask: %s\" % \" \".join([str(x) for x in input_mask_qk]))\r\n logger.info(\"token_type_ids: %s\" % \" \".join([str(x) for x in segment_ids_qk]))\r\n logger.info(\"label: %s (id = %d)\" % (example.label, label))\r\n # generate the feature for single example\r\n feature = InputFeatures_GNN(\r\n input_ids_q=input_ids_q,\r\n input_mask_q=input_mask_q,\r\n segment_ids_q=segment_ids_q,\r\n\r\n input_ids_k=input_ids_k,\r\n input_mask_k=input_mask_k,\r\n segment_ids_k=segment_ids_k,\r\n\r\n input_ids_qk=input_ids_qk,\r\n input_mask_qk=input_mask_qk,\r\n segment_ids_qk=segment_ids_qk,\r\n\r\n input_ids_qk1=input_ids_qk1,\r\n input_mask_qk1=input_mask_qk1,\r\n segment_ids_qk1=segment_ids_qk1,\r\n input_ids_qk2=input_ids_qk2,\r\n input_mask_qk2=input_mask_qk2,\r\n segment_ids_qk2=segment_ids_qk2,\r\n input_ids_qk3=input_ids_qk3,\r\n input_mask_qk3=input_mask_qk3,\r\n segment_ids_qk3=segment_ids_qk3,\r\n\r\n input_ids_kq1=input_ids_kq1,\r\n input_mask_kq1=input_mask_kq1,\r\n segment_ids_kq1=segment_ids_kq1,\r\n input_ids_kq2=input_ids_kq2,\r\n input_mask_kq2=input_mask_kq2,\r\n segment_ids_kq2=segment_ids_kq2,\r\n input_ids_kq3=input_ids_kq3,\r\n input_mask_kq3=input_mask_kq3,\r\n segment_ids_kq3=segment_ids_kq3,\r\n\r\n input_ids_qq1=input_ids_qq1,\r\n input_mask_qq1=input_mask_qq1,\r\n segment_ids_qq1=segment_ids_qq1,\r\n input_ids_qq2=input_ids_qq2,\r\n input_mask_qq2=input_mask_qq2,\r\n segment_ids_qq2=segment_ids_qq2,\r\n input_ids_qq3=input_ids_qq3,\r\n input_mask_qq3=input_mask_qq3,\r\n segment_ids_qq3=segment_ids_qq3,\r\n\r\n input_ids_kk1=input_ids_kk1,\r\n input_mask_kk1=input_mask_kk1,\r\n segment_ids_kk1=segment_ids_kk1,\r\n input_ids_kk2=input_ids_kk2,\r\n input_mask_kk2=input_mask_kk2,\r\n segment_ids_kk2=segment_ids_kk2,\r\n input_ids_kk3=input_ids_kk3,\r\n input_mask_kk3=input_mask_kk3,\r\n segment_ids_kk3=segment_ids_kk3,\r\n\r\n row_id=int(example.guid),\r\n label_id=label,\r\n task_id=task,\r\n is_real_example=True)\r\n return feature\r\n\r\n features = []\r\n for (ex_index, example) in tqdm(enumerate(examples)):\r\n features.append(qkgnn_convert_single_exampl_to_feature(example, ex_index=ex_index))\r\n\r\n return features", "def _create_examples(self, lines: List[str], mode: Split):\n examples = []\n text_index = 1 if mode == Split.test else 0\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (mode.value, i)\n text_a = line[text_index]\n if len(line) > text_index + 1:\n label = line[text_index + 1]\n else:\n label = None\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self,lines, set_type):\n examples = []\n if len(lines[1]) == 2 and len(lines[1][0]) == 1: # label text_a\n for (i, line) in enumerate(lines):\n guid = f\"{set_type}-{i}\"\n text_a = tx.utils.compat_as_text(line[1])\n label = tx.utils.compat_as_text(line[0])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=\"\", label=label))\n\n elif len(lines[1]) == 2 and len(lines[1][0]) > 1: # text_a text_b (when test file has no label)\n for (i, line) in enumerate(lines):\n guid = f\"{set_type}-{i}\"\n text_a = tx.utils.compat_as_text(line[0])\n text_b = tx.utils.compat_as_text(line[1])\n if set_type == \"test\":\n label = \"0\"\n else:\n print(\"the file is not for testing, yet contains no labels\")\n exit()\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n elif len(lines[1]) == 1: # text_a (when test file has no label)\n for (i, line) in enumerate(lines):\n guid = f\"{set_type}-{i}\"\n text_a = tx.utils.compat_as_text(line[0])\n if set_type == \"test\":\n label = \"0\"\n else:\n print(\"the file is not for testing, yet contains no labels\")\n exit()\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=\"\", label=label))\n elif len(lines[1]) == 3: # label text_a text_b\n for (i, line) in enumerate(lines):\n guid = f\"{set_type}-{i}\"\n text_a = tx.utils.compat_as_text(line[1])\n text_b = tx.utils.compat_as_text(line[2])\n\n label = tx.utils.compat_as_text(line[0])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n return examples", "def create_examples(lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, str(i))\n text_a_id = line[0]\n text_a = tokenization.convert_to_unicode(line[1])\n text_b_id = line[2]\n text_b = tokenization.convert_to_unicode(line[3])\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(InputExample(guid=guid, text_a_id=text_a_id, text_a=text_a, text_b_id=text_b_id, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[0])\n text_b = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines: List[str], mode: Split):\n # id,title,content,label\n test_mode = mode == Split.test\n title_index = 1\n content_index = 2\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (mode.value, line[0])\n try:\n text_a = line[title_index]\n text_b = line[content_index]\n if test_mode:\n label = None\n else:\n label = line[3]\n except IndexError:\n continue\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[1])\n text_b = tokenization.convert_to_unicode(line[2])\n label = tokenization.convert_to_unicode(line[3])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def batch_inference(question,context): \n inputs = tokenizer(question, context, \n return_tensors='pt', \n truncation=True, \n padding=True)\n \n # Move data to GPU\n inputs = inputs.to(device)\n \n # Feed data through the model\n with torch.no_grad():\n outputs = model(**inputs)\n\n # Q&A model outputs the two logit scores for each word.\n # One for its chance of being the start of the answer\n # and one for its chance of being the end\n start_logits = outputs.start_logits\n end_logits = outputs.end_logits\n \n # Find the words with the highest score\n # argmax(dim=1) means argmax with each sample\n start = start_logits.argmax(dim=1)\n end = end_logits.argmax(dim=1)\n \n # Return the answers\n # This is the point where we move the prediction back to main memory with .cpu()\n tokens = [tokenizer.convert_ids_to_tokens(x) for x in inputs[\"input_ids\"].cpu().numpy()]\n return [tokenizer.convert_tokens_to_string(x[start[i]:end[i]+1]) for i,x in enumerate(tokens)]", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0]))\n text_a = tokenization.convert_to_unicode(line[8])\n text_b = tokenization.convert_to_unicode(line[9])\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n text_b = tokenization.convert_to_unicode(line[4])\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines: List[str], mode: Split):\n test_mode = mode == Split.test\n q1_index = 1 if test_mode else 3\n q2_index = 2 if test_mode else 4\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (mode.value, line[0])\n try:\n text_a = line[q1_index]\n text_b = line[q2_index]\n label = None if test_mode else line[5]\n except IndexError:\n continue\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def convert_questions_to_features(examples, tokenizer, max_query_length=None):\n\n unique_id = 1000000000\n question_features = []\n\n for (example_index, example) in enumerate(tqdm(examples, desc='Converting questions')):\n\n query_tokens = tokenizer.tokenize(example.question_text)\n if max_query_length is None:\n max_query_length = len(query_tokens)\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n for _ in enumerate(range(1)):\n tokens_ = []\n tokens_.append(\"[CLS]\")\n for token in query_tokens:\n tokens_.append(token)\n tokens_.append(\"[SEP]\")\n\n input_ids_ = tokenizer.convert_tokens_to_ids(tokens_)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask_ = [1] * len(input_ids_)\n\n # Zero-pad up to the sequence length.\n while len(input_ids_) < max_query_length + 2:\n input_ids_.append(0)\n input_mask_.append(0)\n\n assert len(input_ids_) == max_query_length + 2\n assert len(input_mask_) == max_query_length + 2\n\n if example_index < 1:\n # logger.info(\"*** Example ***\")\n # logger.info(\"unique_id: %s\" % (unique_id))\n # logger.info(\"example_index: %s\" % (example_index))\n logger.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in query_tokens]))\n # logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids_]))\n # logger.info(\n # \"input_mask: %s\" % \" \".join([str(x) for x in input_mask_]))\n\n question_features.append(\n QuestionFeatures(\n unique_id=unique_id,\n example_index=example_index,\n tokens_=tokens_,\n input_ids=input_ids_,\n input_mask=input_mask_))\n unique_id += 1\n\n return question_features", "def _create_examples(self, lines, set_type):\n test_mode = set_type == \"test\"\n if test_mode:\n lines = lines[1:]\n text_index = 1 if test_mode else 3\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[text_index]\n label = None if test_mode else line[1]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0]))\n text_a = tokenization.convert_to_unicode(line[8])\n text_b = tokenization.convert_to_unicode(line[9])\n if set_type == \"test\":\n label = \"contradiction\"\n else:\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for i, line in lines.iterrows():\n guid = \"%s-%s\" % (set_type, i)\n text_a = line['sentence1']\n text_b = line['sentence2']\n label = line['label']\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def print_examples(example_iter, model, num=0, max_len=100,\n bos_index=1,\n src_eos_index = None,\n trg_eos_index = None,\n src_vocab=None, trg_vocab=None):\n model.eval()\n count=0\n\n BOS_TOKEN = \"<s>\"\n EOS_TOKEN = \"</s>\"\n UNK_TOKEN = \"<unk>\"\n\n if src_vocab is not None and trg_vocab is not None:\n src_bos_index = src_vocab.stoi[BOS_TOKEN]\n src_eos_index = src_vocab.stoi[EOS_TOKEN]\n trg_unk_index = trg_vocab.stoi[UNK_TOKEN]\n # trg_bos_index = trg_vocab.stoi[BOS_TOKEN]\n # trg_eos_index = trg_vocab.stoi[EOS_TOKEN]\n else:\n src_bos_index = 0\n src_eos_index = 1\n trg_unk_index = 2\n # trg_bos_index = 1\n # trg_eos_index = None\n\n for i, batch in enumerate(example_iter, 1):\n src = batch.src.cpu().numpy()[0, :]\n trg_idx = batch.trg_idx.cpu().numpy()[0, :]\n\n # remove </s>\n src = src[1:] if src[0]==src_bos_index else src\n src = src[:-1] if src[-1]==src_eos_index else src\n # trg = trg[:-1] if trg[-1]==trg_eos_index else trg\n\n result = greedy_decode(model, batch.src_idx, batch.src_mask, batch.src_lengths)\n print()\n print(\"Example %d\" % i)\n print(\"Source: \", \" \".join(lookup_words(src, vocab=src_vocab)))\n print()\n print(\"Target: \", set(lookup_words(trg_idx, vocab=trg_vocab)))\n print()\n print(\"Prediction: \", \" \".join(lookup_words(result[0], vocab=trg_vocab)))\n\n count += 1\n if count == num:\n break", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[8]\n text_b = line[9]\n label = line[-1]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def convert_examples_to_features(examples, max_seq_length, tokenizer):\n\n features = []\n for (ex_index, example) in enumerate(examples):\n print(example.text_a)\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n input_mask = [1] * len(input_ids)\n\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n \n labels_ids = []\n for label in example.labels:\n labels_ids.append(int(label))\n \n if ex_index < 0:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %s)\" % (example.labels, labels_ids))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=labels_ids))\n return features", "def _create_examples(self, data, set_type):\n examples = []\n for (i, elem) in enumerate(data):\n guid = \"%s-%s\" % (set_type, i)\n text = elem[0]\n label = elem[1]\n examples.append(\n InputExample(guid=guid, text=text, label=label))\n return examples", "def generate_text(pmodel, num_generate, temperature, start_string):\n\n # Converting the start string to numbers (vectorizing)\n input_eval = [char2idx[s] for s in start_string]\n input_eval = tf.expand_dims(input_eval, 0)\n\n # Empty string to store the results\n text_generated = np.empty(1)\n\n # Here batch size = 1\n pmodel.reset_states()\n for i in range(num_generate):\n \n predictions = pmodel(input_eval)\n \n # remove the batch dimension\n predictions = tf.squeeze(predictions, 0)\n \n # using a multinomial distribution to predict the word returned by the model\n predictions = predictions / temperature\n predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()\n \n # We pass the predicted word as the next input to the model\n # along with the previous hidden state\n input_eval = tf.expand_dims([predicted_id], 0)\n \n text_generated = np.vstack((text_generated, idx2char[predicted_id].tolist()))\n \n return text_generated", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n try:\n text_a = line[3]\n text_b = line[4]\n label = line[5]\n except IndexError:\n continue\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def convert_examples_to_features(\n examples: List[InputExample],\n label_list: List[str],\n max_length: int,\n tokenizer: PreTrainedTokenizer,\n pad_token_segment_id=0,\n pad_on_left=False,\n pad_token=0,\n mask_padding_with_zero=True,\n) -> List[InputFeatures]:\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc=\"convert examples to features\"):\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n choices_features = []\n for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)):\n text_a = context\n if example.question.find(\"_\") != -1:\n # this is for cloze question\n text_b = example.question.replace(\"_\", ending)\n else:\n text_b = example.question + \" \" + ending\n if len(text_a) == 0:\n logger.info(\"context of example %d have length 0\" % (ex_index))\n text_a = \" \"\n\n inputs = tokenizer.encode_plus(text_a, text_b, add_special_tokens=True, max_length=max_length,)\n if \"num_truncated_tokens\" in inputs and inputs[\"num_truncated_tokens\"] > 0:\n logger.info(\n \"Attention! you are cropping tokens (swag task is ok). \"\n \"If you are training ARC and RACE and you are poping question + options,\"\n \"you need to try to use a bigger max seq length!\"\n )\n\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length\n assert len(attention_mask) == max_length\n assert len(token_type_ids) == max_length\n choices_features.append((input_ids, attention_mask, token_type_ids))\n\n label = label_map[example.label]\n\n if ex_index < 2:\n logger.info(\"*** Example ***\")\n logger.info(\"race_id: {}\".format(example.example_id))\n for choice_idx, (input_ids, attention_mask, token_type_ids) in enumerate(choices_features):\n logger.info(\"choice: {}\".format(choice_idx))\n logger.info(\"input_ids: {}\".format(\" \".join(map(str, input_ids))))\n logger.info(\"attention_mask: {}\".format(\" \".join(map(str, attention_mask))))\n logger.info(\"token_type_ids: {}\".format(\" \".join(map(str, token_type_ids))))\n logger.info(\"label: {}\".format(label))\n\n features.append(InputFeatures(example_id=example.example_id, choices_features=choices_features, label=label,))\n\n return features", "def _create_examples(self, lines, set_type):\n examples = []\n for i, line in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[5]\n text_b = line[6]\n pairID = line[7][2:] if line[7].startswith(\"ex\") else line[7]\n label = line[0]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n label = line[1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n label = line[1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n text_b = line[4]\n label = line[0]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0]) if set_type != \"test\" else line[0]\n text_a = line[1]\n text_b = line[2]\n label = line[-1] if set_type != \"test\" else \"entailment\"\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n # label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n exindex = {}\n passagelens = []\n\n sum_of_labels = 0\n\n for (ex_index, example) in tqdm(enumerate(examples), desc=\"Tokenizing:\"):\n if example.text_a not in tokenmap.keys():\n tokens_a = tokenizer.tokenize(example.text_a)\n tokenmap[example.text_a] = tokens_a\n else:\n tokens_a = tokenmap[example.text_a]\n\n tokens_b = None\n if example.text_b:\n if example.text_b not in tokenmap.keys():\n tokens_b = tokenizer.tokenize(example.text_b)\n tokenmap[example.text_b] = tokens_b\n else:\n tokens_b = tokenmap[example.text_b]\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n\n passagelens.append(len(tokens_a) + len(tokens_b) + 3)\n\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n # label_id = label_map[example.label]\n label_id = example.label\n\n sum_of_labels += label_id\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (str(example.label), 0))\n\n exindex[ex_index] = example.guid\n features.append(\n InputFeatures(uuid=ex_index,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n\n print(\"Passage Token Lengths Distribution\", passagelens[-1], np.percentile(passagelens, 50),\n np.percentile(passagelens, 90), np.percentile(passagelens, 95), np.percentile(passagelens, 99))\n return features, exindex", "def convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n multilabel=False,\n):\n\n if task is not None:\n processor = glue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = glue_output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n if multilabel:\n domain_label_map = {label: i for i, label in enumerate([\"0\", \"1\"])}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n len_examples = len(examples)\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d/%d\" % (ex_index, len_examples))\n\n inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, )\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(len(input_ids), max_length)\n assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format(\n len(attention_mask), max_length\n )\n assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format(\n len(token_type_ids), max_length\n )\n\n if output_mode == \"classification\":\n label = label_map[example.label] if not multilabel else label_map[example.label[0]]\n elif output_mode == \"regression\":\n label = float(example.label) if not multilabel else float(example.label[0])\n else:\n raise KeyError(output_mode)\n if multilabel:\n label = [label, domain_label_map[example.label[1]]]\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask]))\n logger.info(\"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids]))\n if multilabel:\n logger.info(\"label: %s (id = %d)\" % (example.label[0], label[0]))\n logger.info(\"domain label: %s (id = %d)\" % (example.label[1], label[1]))\n else:\n logger.info(\"label: %s (id = %d)\" % (example.label, label))\n\n features.append(\n InputFeatures(\n input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label\n )\n )\n\n return features", "def glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n is_tf_dataset = False\n if is_tf_available() and isinstance(examples, tf.data.Dataset):\n is_tf_dataset = True\n\n if task is not None:\n processor = glue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = glue_output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n len_examples = 0\n if is_tf_dataset:\n example = processor.get_example_from_tensor_dict(example)\n example = processor.tfds_map(example)\n len_examples = tf.data.experimental.cardinality(examples)\n else:\n len_examples = len(examples)\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d/%d\" % (ex_index, len_examples))\n\n inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(len(input_ids), max_length)\n assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format(\n len(attention_mask), max_length\n )\n assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format(\n len(token_type_ids), max_length\n )\n\n if output_mode == \"classification\":\n label_id = label_map[example.label]\n elif output_mode == \"regression\":\n label_id = float(example.label)\n else:\n raise KeyError(output_mode)\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask]))\n logger.info(\"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(\n input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label_id=label_id\n )\n )\n return features", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n # if i == 0:\n # continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[1]\n text_b = None\n label = line[2]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n # if i == 0:\n # continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[1]\n text_b = None\n label = line[2]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, ids) in enumerate(lines):\n text_a = lines[ids]['sentence']\n examples.append(\n InputExample(text_a=text_a) )\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = line[3]\r\n text_b = line[4]\r\n# if set_type == 'test':\r\n# label = self.get_labels()[0]\r\n# else:\r\n# label = line[0]\r\n label = line[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[7]\n text_b = line[8]\n label = None if set_type == \"test\" else line[-1]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = line[0]\r\n label = line[1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0]) if set_type != \"test\" else line[0]\n try:\n text_a = line[3] if set_type != \"test\" else line[1]\n text_b = line[4] if set_type != \"test\" else line[2]\n label = line[5] if set_type != \"test\" else \"0\"\n except IndexError:\n continue\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n text_b = tokenization.convert_to_unicode(line[4])\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n text_a = line[1]\r\n text_b = line[2]\r\n label = line[-1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens = example.text\n\n# # Account for [CLS] and [SEP] with \"- 2\"\n# if len(tokens) > max_seq_length - 2:\n# tokens = tokens[:(max_seq_length - 2)]\n\n bert_tokens = []\n orig_to_tok_map = []\n\n bert_tokens.append(\"[CLS]\")\n for token in tokens:\n new_tokens = tokenizer.tokenize(token)\n if len(bert_tokens) + len(new_tokens) > max_seq_length - 1:\n # print(\"You shouldn't see this since the test set is already pre-separated.\")\n break\n else:\n orig_to_tok_map.append(len(bert_tokens))\n bert_tokens.extend(new_tokens)\n bert_tokens.append(\"[SEP]\")\n\n if len(bert_tokens) == 2: # edge case\n continue\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n\n input_ids = tokenizer.convert_tokens_to_ids(bert_tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n\n segment_ids = [0] * max_seq_length # no use for our problem\n\n labels = example.label\n label_ids = [0] * max_seq_length\n label_mask = [0] * max_seq_length\n\n for label, target_index in zip(labels, orig_to_tok_map):\n label_ids[target_index] = label_map[label]\n label_mask[target_index] = 1\n\n assert len(segment_ids) == max_seq_length\n assert len(label_ids) == max_seq_length\n assert len(label_mask) == max_seq_length\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=label_ids,\n label_mask=label_mask))\n return features", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = line[3]\r\n text_b = line[4]\r\n label = line[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = line[3]\r\n label = line[1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n text_a = line[8]\r\n text_b = line[9]\r\n label = line[-1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n text_a = line[7]\r\n text_b = line[8]\r\n label = line[-1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n text_a = line[1]\r\n text_b = line[2]\r\n if set_type != 'test':\r\n label = line[-1]\r\n else:\r\n label = self.get_labels()[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def read_examples(input_file):\n examples = []\n unique_id = 0\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n \n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n \n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n examples.append(InputExample(unique_id=unique_id,\n text_a=text_a, \n text_b=text_b))\n unique_id += 1\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n if set_type != 'test':\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = line[0]\r\n label = line[1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n else:\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = line[1]\r\n label = self.get_labels()[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n\r\n return examples", "def convert_examples_to_features(examples,label_list, max_seq_length,tokenizer):\r\n label_map = {}\r\n for (i, label) in enumerate(label_list):\r\n label_map[label] = i\r\n\r\n input_data=[]\r\n for (ex_index, example) in enumerate(examples):\r\n tokens_a = tokenizer.tokenize(example.text_a)\r\n tokens_b = None\r\n if example.text_b:\r\n tokens_b = tokenizer.tokenize(example.text_b)\r\n if tokens_b:\r\n # Modifies `tokens_a` and `tokens_b` in place so that the total\r\n # length is less than the specified length.\r\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\r\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\r\n else:\r\n # Account for [CLS] and [SEP] with \"- 2\"\r\n if len(tokens_a) > max_seq_length - 2:\r\n tokens_a = tokens_a[0:(max_seq_length - 2)]\r\n\r\n if ex_index % 10000 == 0:\r\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\r\n\r\n # The convention in BERT is:\r\n # (a) For sequence pairs:\r\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\r\n # (b) For single sequences:\r\n # tokens: [CLS] the dog is hairy . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0\r\n #\r\n # Where \"type_ids\" are used to indicate whether this is the first\r\n # sequence or the second sequence. The embedding vectors for `type=0` and\r\n # `type=1` were learned during pre-training and are added to the wordpiece\r\n # embedding vector (and position vector). This is not *strictly* necessary\r\n # since the [SEP] token unambigiously separates the sequences, but it makes\r\n # it easier for the model to learn the concept of sequences.\r\n #\r\n # For classification tasks, the first vector (corresponding to [CLS]) is\r\n # used as as the \"sentence vector\". Note that this only makes sense because\r\n # the entire model is fine-tuned.\r\n tokens = []\r\n segment_ids = []\r\n tokens.append(\"[CLS]\")\r\n segment_ids.append(0)\r\n for token in tokens_a:\r\n tokens.append(token)\r\n segment_ids.append(0)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(0)\r\n\r\n if tokens_b:\r\n for token in tokens_b:\r\n tokens.append(token)\r\n segment_ids.append(1)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(1)\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\r\n\r\n input_mask = [1] * len(input_ids)\r\n\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n\r\n label_id = label_map[example.label]\r\n if ex_index < 3:\r\n tf.logging.info(\"*** Example ***\")\r\n tf.logging.info(\"guid: %s\" % (example.guid))\r\n tf.logging.info(\"tokens: %s\" % \" \".join([tokenization.printable_text(x) for x in tokens]))\r\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\r\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\r\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\r\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\r\n\r\n features = collections.OrderedDict()\r\n features[\"input_ids\"] = input_ids\r\n features[\"input_mask\"] = input_mask\r\n features[\"segment_ids\"] = segment_ids\r\n features[\"label_ids\"] =label_id\r\n input_data.append(features)\r\n\r\n return input_data", "def _create_examples(self, lines, set_type):\n examples = []\n for (_, data) in enumerate(lines):\n passage = data[\"passage\"][\"text\"]\n for Q in data[\"passage\"][\"questions\"]:\n question = Q[\"question\"]\n for A in Q[\"answers\"]:\n guid = f\"{set_type}-{data['idx']-Q['idx']-A['idx']}\"\n examples.append(\n InputExample(\n guid=guid,\n text_a=passage,\n text_b=question + \" \" + A[\"text\"],\n label=str(A[\"label\"]),\n )\n )\n return examples", "def _create_examples(self, data_dir, set_type):\n\t\texamples = []\n\t\tinput_file_data = os.path.join(data_dir, \"data.tsv\")\n\t\twith open(input_file_data, \"r\", encoding=\"utf-8-sig\") as f:\n\t\t\tfor i, inp in enumerate(f):\n\t\t\t\tinps = inp.split('\\t') \n\t\t\t\tguid = \"%s-%s\" % (set_type, i)\n\t\t\t\ttext_inp = inps[1].strip()\n\t\t\t\ttext_out = inps[2].strip()\n\t\t\t\texamples.append(InputExample(guid=guid, text_inp=text_inp, text_out=text_out))\n\t\t\t\t\n\t\t\t# Sort these out before returning\n\t\t\texamples = sorted(examples, key=sort_inp_len)\n\t\t\treturn examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n text_a = line[8]\r\n text_b = line[9]\r\n if set_type != 'test':\r\n label = line[-1]\r\n else:\r\n label = self.get_labels()[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def _convert_single_example(self, text_a, text_b):\n tokens = []\n input_ids = []\n segment_ids = []\n input_mask = []\n try:\n text_a = self.tokenizer.tokenize(text_a)\n if text_b:\n text_b = self.tokenizer.tokenize(text_b)\n self._truncate_seq_pair(text_a, text_b)\n\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n\n for token in text_a:\n tokens.append(token)\n segment_ids.append(0)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n\n if text_b:\n for token in text_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append('[SEP]')\n segment_ids.append(1)\n\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n\n input_mask = [1] * len(input_ids)\n\n while len(input_ids) < 50:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n except:\n self.logger.error()\n\n finally:\n return input_ids, input_mask, segment_ids", "def print_examples(example_iter, model, n=2, max_len=100, \n sos_index=1, \n src_eos_index=None, \n trg_eos_index=None, \n src_vocab=None, trg_vocab=None):\n\n model.eval()\n count = 0\n print()\n \n if src_vocab is not None and trg_vocab is not None:\n src_eos_index = src_vocab.stoi[EOS_TOKEN]\n trg_sos_index = trg_vocab.stoi[SOS_TOKEN]\n trg_eos_index = trg_vocab.stoi[EOS_TOKEN]\n else:\n src_eos_index = None\n trg_sos_index = 1\n trg_eos_index = None\n \n for i, batch in enumerate(example_iter):\n \n src = batch.src.cpu().numpy()[0, :]\n trg = batch.trg_y.cpu().numpy()[0, :]\n\n # remove </s> (if it is there)\n src = src[:-1] if src[-1] == src_eos_index else src\n trg = trg[:-1] if trg[-1] == trg_eos_index else trg \n \n result, _ = beam_decode(\n model, batch.src, batch.src_mask, batch.src_lengths,\n max_len=max_len, sos_index=trg_sos_index, eos_index=trg_eos_index)\n print(\"Example #%d\" % (i+1))\n print(\"Src : \", \" \".join(lookup_words(src, vocab=src_vocab)))\n print(\"Trg : \", \" \".join(lookup_words(trg, vocab=trg_vocab)))\n print(\"Pred: \", \" \".join(lookup_words(result, vocab=trg_vocab)))\n print()\n \n count += 1\n if count == n:\n break", "def _create_examples(self, lines, set_type):\n examples = []\n \n for (i, line) in enumerate(lines):\n sentence_number = 0\n premise_text = line[\"premise\"]\n modified_premise_text = re.sub(self.stage_name_pattern,\"\",premise_text)\n modified_premise_text = re.sub(self.w_patterns,\"\",modified_premise_text)\n hypothesis_text = line[\"hypothesis\"]\n hypothesis_text = re.sub(self.w_patterns,\"\",hypothesis_text)\n a_label = int(line[\"label\"])\n\n sentences = modified_premise_text.split('.')\n\n for j, sentence in enumerate(sentences):\n guid = \"\" + str(sentence_number) + \"\\t\" + str(i) + \"\\t\" + str(len(sentences)) + \"\\t\" + str(a_label)\n text_a = sentence\n text_b = hypothesis_text\n label = a_label\n sentence_number += 1\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n #print(\"16th sentence::\",sentences[16])\n\n return examples", "def text_generator(self, example_generator):\n\t\tif self._data_as_tf_example:\n\t\t\tquery_text = None\n\t\t\tquery_edge_list = None\n\t\t\tword_edge_list = None\n\t\t\t\n\t\t\twhile True:\n\t\t\t\te, epoch_num = example_generator.next() # e is a tf.Example\n\t\t\t\ttry:\n\t\t\t\t\tarticle_text = e.features.feature['article'].bytes_list.value[0] # document text\n\t\t\t\t\tabstract_text = e.features.feature['abstract'].bytes_list.value[0] # response text\n\t\t\t\t\tif self._hps.query_encoder.value:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tquery_text = e.features.feature['query'].bytes_list.value[0] # context text\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tquery_text = ''\n\t\t\t\t\tif self._hps.word_gcn.value:\n\t\t\t\t\t\tword_edge_list = []\n\t\t\t\t\t\tif self._hps.use_default_graph.value:\n\t\t\t\t\t\t\tword_edge_list = word_edge_list + ast.literal_eval(e.features.feature['word_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\t#tf.logging.info((word_edge_list[0]))\n\t\t\t\t\t\tif self._hps.use_coref_graph.value:\n\t\t\t\t\t\t\tword_edge_list = word_edge_list + ast.literal_eval(e.features.feature['word_coref_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\tif self._hps.use_entity_graph.value:\n\t\t\t\t\t\t\tword_edge_list = word_edge_list + ast.literal_eval(e.features.feature['word_entity_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\tif self._hps.use_lexical_graph.value:\n\t\t\t\t\t\t\tword_edge_list = word_edge_list + ast.literal_eval(e.features.feature['word_lexical_edge_list'].bytes_list.value[0])\n\t\t\t\t\t#\tprint(word_edge_list)\n\t\t\t\t\t\n\n\t\t\t\t\tif self._hps.query_gcn.value:\n\t\t\t\t\t\tquery_edge_list = []\n\t\t\t\t\t\tif self._hps.use_default_graph.value:\n\t\t\t\t\t\t\tquery_edge_list = query_edge_list + ast.literal_eval(e.features.feature['query_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\t\n\t\t\t\t\t\t'''\n\t\t\t\t\t\tThese are inter-sentence graph and may not be applicable\n\t\t\t\t\t\tif self._hps.use_coref_graph.value:\n\t\t\t\t\t\t\tquery_edge_list = query_edge_list + ast.literal_eval(e.features.feature['query_coref_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\tif self._hps.use_entity_graph.value:\n\t\t\t\t\t\t\tquery_edge_list = query_edge_list + ast.literal_eval(e.features.feature['query_entity_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\tif self._hps.use_lexical_graph.value:\n\t\t\t\t\t\t\tquery_edge_list = query_edge_list + ast.literal_eval(e.features.feature['query_lexical_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\t'''\n\n\n\n\t\t\t\texcept ValueError:\n\t\t\t\t\ttf.logging.error('Failed to get article or abstract from example')\n\t\t\t\t\tcontinue\n\t\t\t\tif len(article_text)==0: # See https://github.com/abisee/pointer-generator/issues/1\n\t\t\t\t\ttf.logging.warning('Found an example with empty article text. Skipping it.')\n\t\t\t\telse:\n\t\t\t\t\t#tf.logging.info(abstract_text)\n\t\t\t\t\tyield (article_text, abstract_text, word_edge_list, query_text, query_edge_list, epoch_num)\n\t\t\t\n\t\telse:\n\n\t\t\twhile True:\n\t\t\t\te = example_generator.next()\n\t\t\t\tyield e", "def _create_examples(self, lines, set_type):\n examples = []\n for (_, data) in enumerate(lines):\n examples.append(\n InputExample(\n guid=f\"{set_type}-{data['idx']}\",\n text_a=data[\"passage\"],\n text_b=data[\"question\"],\n label=str(data[\"label\"]),\n )\n )\n return examples", "def generate_tpu(self, prompts: List[str]):\n from flax.training.common_utils import shard # pylint:disable=g-import-not-at-top,g-importing-member\n import jax # pylint:disable=g-import-not-at-top\n import time # pylint:disable=g-import-not-at-top\n import numpy as np # pylint:disable=g-import-not-at-top\n\n rng = jax.random.PRNGKey(0)\n rng = jax.random.split(rng, jax.device_count())\n\n assert prompts, \"prompt parameter cannot be empty\"\n print(\"Prompts: \", prompts)\n prompt_ids = self._pipeline.prepare_inputs(prompts)\n prompt_ids = shard(prompt_ids)\n print(\"Sharded prompt ids has shape:\", prompt_ids.shape)\n if self._run_with_profiler:\n jax.profiler.start_trace(self._profiler_dir)\n\n time_start = time.time()\n images = self._p_generate(prompt_ids, self._p_params, rng)\n images = images.block_until_ready()\n elapsed = time.time() - time_start\n if self._run_with_profiler:\n jax.profiler.stop_trace()\n\n print(\"Inference time (in seconds): \", elapsed)\n print(\"Shape of the predictions: \", images.shape)\n images = images.reshape(\n (images.shape[0] * images.shape[1],) + images.shape[-3:])\n print(\"Shape of images afterwards: \", images.shape)\n return self._pipeline.numpy_to_pil(np.array(images))", "def translate_interactive(estimator, tokenizer_):\n \n predictor = ContinuePredict(estimator, continue_input_fn)\n while True:\n tf.logging.info(\"Enter the English sentence end with ENTER.\")\n raw_text = input().strip()\n if raw_text == r'\\q':\n predictor.close()\n break\n encoded_txt = _encode_and_add_eos(raw_text, tokenizer_)\n target = predictor.predict(encoded_txt)\n target = next(target)['outputs']\n target = _trim_and_decode(target, tokenizer_)\n tf.logging.info('\\t{}'.format(target))", "def generate_text(session, model, config, starting_text='<eos>',\n stop_length=100, stop_tokens=None, temp=1.0):\n state = model.initial_state.eval()\n # Imagine tokens as a batch size of one, length of len(tokens[0])\n tokens = [model.vocab.encode(word) for word in starting_text.split()]\n for i in xrange(stop_length):\n ### YOUR CODE HERE\n #print tokens\n feed = {}\n #x = np.array([tokens[-1]])\n #x.reshape(1,1)\n feed[model.input_placeholder] = [[tokens[-1]]]\n feed[model.dropout_placeholder] = 1\n feed[model.initial_state] = state\n y_pred, state = session.run([model.predictions[-1], model.final_state], feed_dict=feed)\n ### END YOUR CODE\n next_word_idx = sample(y_pred[0], temperature=temp)\n tokens.append(next_word_idx)\n if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:\n break\n output = [model.vocab.decode(word_idx) for word_idx in tokens]\n return output", "def batchify(TEXT, data, batch_size, device):\r\n data = TEXT.numericalize([data.examples[0].text])\r\n num_batches = data.size(0)//batch_size\r\n data = data.narrow(0, 0, num_batches * batch_size)\r\n data = data.view(batch_size, -1).t().contiguous()\r\n\r\n return data.to(device)", "def convert_example(example,\n tokenizer,\n label_list,\n max_seq_length=512,\n is_test=False):\n\n def _truncate_seqs(seqs, max_seq_length):\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n tokens_a, tokens_b = seqs\n max_seq_length -= 3\n while True: # truncate with longest_first strategy\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_seq_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n return seqs\n\n def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):\n concat = sum((seq + sep for sep, seq in zip(separators, seqs)), [])\n segment_ids = sum(\n ([i] * (len(seq) + len(sep))\n for i, (sep, seq) in enumerate(zip(separators, seqs))), [])\n if isinstance(seq_mask, int):\n seq_mask = [[seq_mask] * len(seq) for seq in seqs]\n if isinstance(separator_mask, int):\n separator_mask = [[separator_mask] * len(sep) for sep in separators]\n p_mask = sum((s_mask + mask\n for sep, seq, s_mask, mask in zip(\n separators, seqs, seq_mask, separator_mask)), [])\n return concat, segment_ids, p_mask\n\n if not is_test:\n # `label_list == None` is for regression task\n label_dtype = \"int64\" if label_list else \"float32\"\n # get the label\n label = example[-2]\n example = example[:-2]\n #create label maps if classification task\n if label_list:\n label_map = {}\n for (i, l) in enumerate(label_list):\n label_map[l] = i\n label = label_map[label]\n label = np.array([label], dtype=label_dtype)\n else:\n qas_id = example[-1]\n example = example[:-2]\n # tokenize raw text\n tokens_raw = [tokenizer(l) for l in example]\n # truncate to the truncate_length,\n tokens_trun = _truncate_seqs(tokens_raw, max_seq_length)\n # concate the sequences with special tokens\n tokens_trun[0] = [tokenizer.cls_token] + tokens_trun[0]\n tokens, segment_ids, _ = _concat_seqs(tokens_trun, [[tokenizer.sep_token]] *\n len(tokens_trun))\n # convert the token to ids\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n valid_length = len(input_ids)\n\n if not is_test:\n return input_ids, segment_ids, valid_length, label\n else:\n return input_ids, segment_ids, valid_length, qas_id", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n if set_type != 'test':\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n try:\r\n text_a = line[3]\r\n text_b = line[4]\r\n label = line[5]\r\n except IndexError:\r\n continue\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n else:\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n try:\r\n text_a = line[1]\r\n text_b = line[2]\r\n label = self.get_labels()[0]\r\n except IndexError:\r\n continue\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n\r\n return examples", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[0]\n label = line[1]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n guid = \"%s-%s\" % (set_type, i)\r\n if set_type != 'test':\r\n text_a = line[3]\r\n label = line[1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n else:\r\n if i == 0:\r\n continue\r\n text_a = line[1]\r\n label = self.get_labels()[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n return examples", "def query(self,text_input,prefix='answer:',convert_to_string=True):\n predictions, raw_outputs = self.model.predict([text_input])\n raw_outputs = [np.max(softmax([s[1][0] for s in v.items()][0])) for v in raw_outputs[0]]\n preds = [[(i[0],i[1],raw_outputs[k]) for i in p.items()][0] for k,p in enumerate(predictions[0])]\n return self._post_process_output(preds,convert_to_string=convert_to_string)", "def sample_to_features_text(\n sample, tasks, max_seq_len, tokenizer\n):\n\n if tokenizer.is_fast:\n text = sample.clear_text[\"text\"]\n # Here, we tokenize the sample for the second time to get all relevant ids\n # This should change once we git rid of FARM's tokenize_with_metadata()\n inputs = tokenizer(text,\n return_token_type_ids=True,\n truncation=True,\n truncation_strategy=\"longest_first\",\n max_length=max_seq_len,\n return_special_tokens_mask=True)\n\n if (len(inputs[\"input_ids\"]) - inputs[\"special_tokens_mask\"].count(1)) != len(sample.tokenized[\"tokens\"]):\n logger.error(f\"FastTokenizer encoded sample {sample.clear_text['text']} to \"\n f\"{len(inputs['input_ids']) - inputs['special_tokens_mask'].count(1)} tokens, which differs \"\n f\"from number of tokens produced in tokenize_with_metadata(). \\n\"\n f\"Further processing is likely to be wrong.\")\n else:\n # TODO It might be cleaner to adjust the data structure in sample.tokenized\n tokens_a = sample.tokenized[\"tokens\"]\n tokens_b = sample.tokenized.get(\"tokens_b\", None)\n\n inputs = tokenizer.encode_plus(\n tokens_a,\n tokens_b,\n add_special_tokens=True,\n truncation=False, # truncation_strategy is deprecated\n return_token_type_ids=True,\n is_split_into_words=False,\n )\n\n input_ids, segment_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n padding_mask = [1] * len(input_ids)\n\n # Padding up to the sequence length.\n # Normal case: adding multiple 0 to the right\n # Special cases:\n # a) xlnet pads on the left and uses \"4\" for padding token_type_ids\n if tokenizer.__class__.__name__ == \"XLNetTokenizer\":\n pad_on_left = True\n segment_ids = pad(segment_ids, max_seq_len, 4, pad_on_left=pad_on_left)\n else:\n pad_on_left = False\n segment_ids = pad(segment_ids, max_seq_len, 0, pad_on_left=pad_on_left)\n\n input_ids = pad(input_ids, max_seq_len, tokenizer.pad_token_id, pad_on_left=pad_on_left)\n padding_mask = pad(padding_mask, max_seq_len, 0, pad_on_left=pad_on_left)\n\n assert len(input_ids) == max_seq_len\n assert len(padding_mask) == max_seq_len\n assert len(segment_ids) == max_seq_len\n\n feat_dict = {\n \"input_ids\": input_ids,\n \"padding_mask\": padding_mask,\n \"segment_ids\": segment_ids,\n }\n\n # Add Labels for different tasks\n for task_name, task in tasks.items():\n try:\n label_name = task[\"label_name\"]\n label_raw = sample.clear_text[label_name]\n label_list = task[\"label_list\"]\n if task[\"task_type\"] == \"classification\":\n # id of label\n try:\n label_ids = [label_list.index(label_raw)]\n except ValueError as e:\n raise ValueError(f'[Task: {task_name}] Observed label {label_raw} not in defined label_list')\n elif task[\"task_type\"] == \"multilabel_classification\":\n # multi-hot-format\n label_ids = [0] * len(label_list)\n for l in label_raw.split(\",\"):\n if l != \"\":\n label_ids[label_list.index(l)] = 1\n elif task[\"task_type\"] == \"regression\":\n label_ids = [float(label_raw)]\n else:\n raise ValueError(task[\"task_type\"])\n except KeyError:\n # For inference mode we don't expect labels\n label_ids = None\n if label_ids is not None:\n feat_dict[task[\"label_tensor_name\"]] = label_ids\n return [feat_dict]", "def tokenize_nmt(text, num_examples=None):\n source, target = [], []\n for i, line in enumerate(text.split('\\n')):\n if num_examples and i > num_examples:\n break\n parts = line.split('\\t')\n if len(parts) == 2:\n source.append(parts[0].split(' '))\n target.append(parts[1].split(' '))\n return source, target", "def process(self, example: str) -> List[torch.Tensor]:\n return self._tokenizer.batch_encode_plus([example], return_tensors=\"pt\", output_past=True, max_length=self.max_seq_len)['input_ids'][0]", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def main():\n # Load and prep training files\n raw_speech_text = hg.load_training_file('trump_train.txt')\n speech_text = hg.prep_training(raw_speech_text)\n tweet_data = load_tweets('trump_tweets.json')\n raw_tweets = \"\"\n for dct in tweet_data:\n raw_tweets += \"{} \".format(dct['text'])\n tweets = hg.prep_training(raw_tweets)\n corpus = speech_text + tweets\n corpus = strip_punctuation(corpus)\n dict_1 = hg.map_one_to_one(corpus)\n dict_2 = hg.map_two_to_one(corpus)\n text = []\n \n # Introduction\n print(\"\\nTrump Speech Generator\\n\")\n print(\"Select words to add to speech\")\n print(\"\\'x\\' to exit\")\n print(\"\\'p\\' to add punctuation\")\n print(\"Select \\'p\\' before selecting the word you want to punctuate\")\n\n # Select first word\n options = corpus\n print ()\n selection = select_word(corpus)\n text.append(selection)\n \n # Select second word\n last = text[0]\n options = word_after_one(last, dict_1)\n print_text(text)\n selection = select_word(options)\n text.append(selection)\n \n # Select subsequent word\n while True:\n last = \"{} {}\".format(text[-2].strip(punctuation),\n text[-1].strip(punctuation))\n options = word_after_two(last, dict_2)\n if options == []:\n last = last.split()[1]\n options = word_after_one(last, dict_1)\n while options == []:\n last = random.choice(corpus)\n options = word_after_one(last, dict_1)\n print_text(text)\n selection = select_word(options)\n text.append(selection)\n \n print_text(text)", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n if i <= 3:\n print(\"i={}, line={}\".format(i, line))\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"test\":\n text_a = tokenization.convert_to_unicode(line[1])\n label = \"0\"\n else:\n text_a = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for idx, line in enumerate(lines):\n item = json.loads(line.strip())\n question_id = \"%s-%s\" % (set_type, idx)\n context = item[\"context\"]\n question = item[\"question\"]\n endings = [item[\"answerA\"],item[\"answerB\"],item[\"answerC\"] ]\n label = item[\"correct\"]\n #race_id = \"%s-%s\" % (set_type, data_raw[\"race_id\"])\n #article = data_raw[\"article\"]\n #for i in range(len(data_raw[\"answers\"])):\n #truth = str(ord(data_raw[\"answers\"][i]) - ord(\"A\"))\n #question = data_raw[\"questions\"][i]\n #options = data_raw[\"options\"][i]\n\n examples.append(\n InputExample(\n example_id=question_id,\n question=question,\n contexts=[context,context,context],\n endings=[endings[0], endings[1], endings[2]],#, options[3]\n label=label,\n )\n )\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n text_a = line[1]\r\n text_b = line[2]\r\n if set_type != 'test_matched':\r\n label = line[-1]\r\n else:\r\n label = self.get_labels()[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (_, data) in enumerate(lines):\n passage = data[\"passage\"]\n ents_pos = [(pos[\"start\"], pos[\"end\"])\n for pos in passage[\"entities\"]]\n ents = [passage[\"text\"][start:end+1] for start, end in ents_pos]\n for qa in data[\"qas\"]:\n qa_id = f\"{set_type}-{data['idx']}-{qa['idx']}\"\n answers = set([ans[\"text\"] for ans in qa[\"answers\"]])\n for ent_idx, ent in enumerate(ents):\n is_answer = ent in answers\n guid = f\"{qa_id}-{ent_idx}\"\n examples.append(\n InputExample(\n guid=guid,\n text_a=passage[\"text\"],\n # Insert entity in query\n text_b=qa[\"query\"].replace(\"@placeholder\", ent),\n label=\"1\" if is_answer else \"0\",\n )\n )\n return examples", "def run_prediction(question_texts, context_text):\r\n examples = []\r\n\r\n for i, question_text in enumerate(question_texts):\r\n example = SquadExample(\r\n qas_id=str(i),\r\n question_text=question_text,\r\n context_text=context_text,\r\n answer_text=None,\r\n start_position_character=None,\r\n title=\"Predict\",\r\n is_impossible=False,\r\n answers=None,\r\n )\r\n\r\n examples.append(example)\r\n\r\n features, dataset = squad_convert_examples_to_features(\r\n examples=examples,\r\n tokenizer=tokenizer,\r\n max_seq_length=384,\r\n doc_stride=128,\r\n max_query_length=64,\r\n is_training=False,\r\n return_dataset=\"pt\",\r\n threads=1,\r\n )\r\n\r\n eval_sampler = SequentialSampler(dataset)\r\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=10)\r\n\r\n all_results = []\r\n\r\n for batch in eval_dataloader:\r\n model.eval()\r\n batch = tuple(t.to(device) for t in batch)\r\n\r\n with torch.no_grad():\r\n inputs = {\r\n \"input_ids\": batch[0],\r\n \"attention_mask\": batch[1],\r\n \"token_type_ids\": batch[2],\r\n }\r\n\r\n example_indices = batch[3]\r\n\r\n outputs = model(**inputs)\r\n\r\n for i, example_index in enumerate(example_indices):\r\n eval_feature = features[example_index.item()]\r\n unique_id = int(eval_feature.unique_id)\r\n\r\n output = [to_list(output[i]) for output in outputs]\r\n\r\n start_logits, end_logits = output\r\n result = SquadResult(unique_id, start_logits, end_logits)\r\n all_results.append(result)\r\n\r\n output_prediction_file = \"predictions.json\"\r\n output_nbest_file = \"nbest_predictions.json\"\r\n output_null_log_odds_file = \"null_predictions.json\"\r\n\r\n predictions = compute_predictions_logits(\r\n examples,\r\n features,\r\n all_results,\r\n n_best_size,\r\n max_answer_length,\r\n do_lower_case,\r\n output_prediction_file,\r\n output_nbest_file,\r\n output_null_log_odds_file,\r\n False, # verbose_logging\r\n True, # version_2_with_negative\r\n null_score_diff_threshold,\r\n tokenizer,\r\n )\r\n\r\n return predictions", "def get_transformed_io(data_path, data_dir):\n sents, labels = read_line_examples_from_file(data_path)\n\n # the input is just the raw sentence\n inputs = [s.copy() for s in sents]\n\n task = 'asqp'\n if task == 'aste':\n targets = get_para_aste_targets(sents, labels)\n elif task == 'tasd':\n targets = get_para_tasd_targets(sents, labels)\n elif task == 'asqp':\n targets = get_para_asqp_targets(sents, labels)\n else:\n raise NotImplementedError\n\n return inputs, targets", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n text_b = line[4]\n label = line[0]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[7]\n text_b = line[8]\n label = line[-1]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, type):\n examples = []\n\n for i in range(0, len(lines), self.interval):\n text_a = lines[i]\n label = lines[i + 2]\n\n examples.append(\n InputExample(guid=len(examples), text_a=text_a, pos=None, label=label))\n return examples", "def _create_examples(self, df, set_type):\n examples = []\n for i, row in df.iterrows():\n guid = \"%s-%s\" % (set_type, i)\n text_a = row[4]\n label = row[2]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n label = line[1]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, ids) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, ids )\n text_a = lines[ids]['term']\n text_b = lines[ids]['sentence']\n label = lines[ids]['polarity']\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n # Only the test set has a header\n if set_type == \"test\" and i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"test\":\n text_a = tokenization.convert_to_unicode(line[1])\n label = \"0\"\n else:\n text_a = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[0])\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples" ]
[ "0.61118716", "0.592422", "0.58838516", "0.585979", "0.5801184", "0.5781311", "0.57746387", "0.57520574", "0.57209575", "0.56961864", "0.5692046", "0.5686498", "0.5684553", "0.5684334", "0.56032795", "0.5577988", "0.55708474", "0.55665946", "0.55582416", "0.5552899", "0.5534642", "0.55299616", "0.55197054", "0.5517937", "0.55169827", "0.55169827", "0.5511616", "0.550572", "0.5499984", "0.5494332", "0.5491262", "0.54877734", "0.54781", "0.547139", "0.5471115", "0.547109", "0.54651093", "0.5456408", "0.5456408", "0.54546094", "0.54541314", "0.54510176", "0.545045", "0.5447743", "0.5443042", "0.5443042", "0.5442952", "0.54371274", "0.5437034", "0.5434161", "0.5423708", "0.5423645", "0.54224443", "0.54152596", "0.54126334", "0.5410348", "0.5401643", "0.53947794", "0.53685164", "0.5364609", "0.53529596", "0.5346267", "0.5344079", "0.5342889", "0.53388065", "0.5338763", "0.5334941", "0.53341085", "0.53306425", "0.5329905", "0.5326312", "0.53261626", "0.5323165", "0.5318985", "0.5316931", "0.53124577", "0.5288165", "0.528546", "0.5284152", "0.5281828", "0.52811784", "0.5279969", "0.527894", "0.527894", "0.527894", "0.52783775", "0.52757883", "0.5275729", "0.5273562", "0.52716976", "0.52700806", "0.5265037", "0.526453", "0.52609104", "0.52589476", "0.52557975", "0.5252407", "0.5246993", "0.52448076", "0.52409756" ]
0.56218064
14
Converts examples into TexttoText batches to be used with a model like T5. Inputs are prefixed with a text prompt that indicates the task to perform.
def convert_xnli_examples_to_features(self): features = self.features lang_filtered_features = [] for ex_index, example in enumerate(self.examples): language = example.guid.split('-')[1] if language in self.lang_list: lang_filtered_features.append(features[ex_index] + [language]) return lang_filtered_features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_tokenize_fn(examples):\n sources = examples[config.source_lang]\n targets = examples[config.target_lang]\n model_inputs = config.tokenizer(sources, max_length=config.max_source_length, truncation=True)\n\n # setup the tokenizer for targets,\n # huggingface expects the target tokenized ids to be stored in the labels field\n with config.tokenizer.as_target_tokenizer():\n labels = config.tokenizer(targets, max_length=config.max_target_length, truncation=True)\n\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs", "def create_InputExamples(self, data, labels):\n examples = []\n for (i, u_tips) in enumerate(data):\n for text in u_tips:\n examples.append(\n run_classifier.InputExample(\n guid=None,\n text_a=text,\n text_b=None,\n label=np.argmax(labels[i])\n )\n )\n return examples", "def create_examples(topics, sentences):\n input_examples = []\n \n for i in range(len(sentences)):\n input_examples.append(InputExample(text_a=topics[i], text_b=sentences[i], label='NoArgument'))\n return input_examples", "def read_examples_string(input_text):#(input_file, input_text):\n examples = []\n unique_id = 0\n \n with io.StringIO(input_text) as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n\n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n\n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n\n examples.append(InputExample(unique_id=unique_id,\n text_a=text_a, \n text_b=text_b))\n unique_id += 1\n return examples", "def test_text_task(self):\n args = BASE_ARGS.copy()\n args.update(TEXT_ARGS)\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 1.5, 'failed to train image_seq2seq on text task'\n )", "def batchify(self, observations):\n # valid examples\n exs = [ex for ex in observations if 'text' in ex]\n # the indices of the valid (non-empty) tensors\n valid_inds = [i for i, ex in enumerate(observations) if 'text' in ex]\n\n # set up the input tensors\n batchsize = len(exs)\n if batchsize == 0:\n return None, None, None\n # tokenize the text\n parsed_x = [deque(maxlen=self.truncate) for _ in exs]\n for dq, ex in zip(parsed_x, exs):\n dq += self.parse(ex['text'])\n # parsed = [self.parse(ex['text']) for ex in exs]\n max_x_len = max((len(x) for x in parsed_x))\n for x in parsed_x:\n # left pad with zeros\n x.extendleft([self.fairseq_dict.pad()] * (max_x_len - len(x)))\n xs = torch.LongTensor(parsed_x)\n\n # set up the target tensors\n ys = None\n if 'labels' in exs[0]:\n # randomly select one of the labels to update on, if multiple\n labels = [random.choice(ex.get('labels', [''])) for ex in exs]\n parsed_y = [deque(maxlen=self.truncate) for _ in labels]\n for dq, y in zip(parsed_y, labels):\n dq.extendleft(reversed(self.parse(y)))\n for y in parsed_y:\n y.append(self.fairseq_dict.eos())\n # append EOS to each label\n max_y_len = max(len(y) for y in parsed_y)\n for y in parsed_y:\n y += [self.fairseq_dict.pad()] * (max_y_len - len(y))\n ys = torch.LongTensor(parsed_y)\n return xs, ys, valid_inds", "def args_batch_to_text(args_batch: ArgsBatch) -> Text:\n lines = []\n for args in args_batch:\n lines.append('; '.join(str(a) for a in args))\n return '\\n'.join(lines)", "def _create_examples(self, lines, kb_data, set_type):\n examples = []\n for idx, line in enumerate(lines):\n item = json.loads(line.strip())\n question_id = \"%s-%s\" % (set_type, idx)\n \n context_a_list = kb_data[idx]['answerA']\n context_b_list = kb_data[idx]['answerB']\n context_c_list = kb_data[idx]['answerC']\n\n context_a = \"\"\n for l in context_a_list[:1]:\n context_a += l.replace(\"\\n\",\". \")\n context_a = context_a[:-1]\n\n context_b = \"\"\n for l in context_b_list[:1]:\n context_b += l.replace(\"\\n\",\". \")\n context_b = context_b[:-1]\n\n context_c = \"\"\n for l in context_c_list[:1]:\n context_c += l.replace(\"\\n\",\". \")\n context_c = context_c[:-1]\n \n \n question = item[\"context\"] + item[\"question\"]\n endings = [item[\"answerA\"],item[\"answerB\"],item[\"answerC\"] ]\n label = item[\"correct\"]\n #race_id = \"%s-%s\" % (set_type, data_raw[\"race_id\"])\n #article = data_raw[\"article\"]\n #for i in range(len(data_raw[\"answers\"])):\n #truth = str(ord(data_raw[\"answers\"][i]) - ord(\"A\"))\n #question = data_raw[\"questions\"][i]\n #options = data_raw[\"options\"][i]\n\n examples.append(\n InputExample(\n example_id=question_id,\n question=question,\n contexts=[context_a,context_b,context_c],\n endings=[endings[0], endings[1], endings[2]],#, options[3]\n label=label,\n )\n )\n return examples", "def _create_examples(self, lines):\n examples = []\n for (i, line) in enumerate(lines):\n logger.info(line)\n guid = int(line[0])\n label = int(line[1])\n text = \" \".join(clean_tokens(line[3].split()))\n if guid < 1000:\n args_char_offset = find_char_offsets(text, line[2].split(\"-\"))\n else:\n args_char_offset = [int(i) for i in line[2].split('-')]\n examples.append(\n InputExample(guid=guid, text=text, args_char_offset=args_char_offset, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = line[0]\n text_a = line[1] + \" . \" + line[2]\n text_b = line[-1]\n label = 0\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def convert_examples_to_features(self):\n features = []\n max_label_len = 0\n # find ou the max label length\n labels_list = []\n for ex_index, example in enumerate(self.examples):\n processor = example.processor\n label_ids = self.tokenizer.text_to_ids(processor.label2string(example.label)) + [self.tokenizer.eos_id]\n max_label_len = max(len(label_ids), max_label_len)\n labels_list.append(label_ids)\n if self.max_seq_length_decoder is None:\n self.max_seq_length_decoder = max_label_len\n else:\n self.max_seq_length_decoder = max(\n self.max_seq_length_decoder, max_label_len\n ) # take the max of the two to be conservative\n for ex_index, example in enumerate(self.examples):\n taskname = example.taskname\n taskname_ids = self.tokenizer.text_to_ids(taskname)\n processor = example.processor\n if ex_index % 10000 == 0:\n logging.info(f\"Writing example {ex_index} of {len(self.examples)}\")\n label_ids = labels_list[ex_index]\n enc_query = processor.get_ptune_query(\n example.content,\n self.pseudo_token_id,\n self.max_seq_length - self.max_seq_length_decoder + 1,\n self.templates,\n self.tokenizer,\n )\n input_ids = enc_query + label_ids[:-1]\n labels = [SMALL_NUM for i in range(len(enc_query) - 1)] + label_ids\n features.append([input_ids, labels, enc_query, taskname_ids])\n return features", "def qkgnn_convert_examples_to_features(\r\n examples,\r\n tokenizer,\r\n max_length=512,\r\n task=None,\r\n label_list=None,\r\n output_mode=None,\r\n pad_on_left=False,\r\n pad_token=0,\r\n pad_token_segment_id=0,\r\n mask_padding_with_zero=True,\r\n):\r\n\r\n if task is not None:\r\n processor = gnn_processors[task]()\r\n if label_list is None:\r\n label_list = processor.get_labels()\r\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\r\n if output_mode is None:\r\n output_mode = gnn_output_modes[task]\r\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\r\n label_map = {label: i for i, label in enumerate(label_list)}\r\n\r\n\r\n # TODO : optimize this part if out of memory error occurs\r\n def convert_inputs_to_processed(inputs):\r\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\r\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\r\n # tokens are attended to.\r\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\r\n\r\n # Zero-pad up to the sequence length.\r\n padding_length = max_length - len(input_ids)\r\n if pad_on_left:\r\n input_ids = ([pad_token] * padding_length) + input_ids\r\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\r\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\r\n else:\r\n input_ids = input_ids + ([pad_token] * padding_length)\r\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\r\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\r\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(len(input_ids), max_length)\r\n assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format(\r\n len(attention_mask), max_length\r\n )\r\n assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format(\r\n len(token_type_ids), max_length\r\n )\r\n\r\n return input_ids, attention_mask, token_type_ids\r\n\r\n # TODO : optimize this part if out of memory error occurs\r\n def qkgnn_convert_single_exampl_to_feature(example, ex_index=10):\r\n inputs_q = tokenizer.encode_plus(example.text_a, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_k = tokenizer.encode_plus(example.text_b, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qk = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qk1 = tokenizer.encode_plus(example.text_a, example.k1, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qk2 = tokenizer.encode_plus(example.text_a, example.k2, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qk3 = tokenizer.encode_plus(example.text_a, example.k3, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_kq1 = tokenizer.encode_plus(example.text_b, example.q1, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_kq2 = tokenizer.encode_plus(example.text_b, example.q2, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_kq3 = tokenizer.encode_plus(example.text_b, example.q3, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_kk1 = tokenizer.encode_plus(example.text_b, example.k1, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_kk2 = tokenizer.encode_plus(example.text_b, example.k2, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_kk3 = tokenizer.encode_plus(example.text_b, example.k3, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qq1 = tokenizer.encode_plus(example.text_a, example.q1, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qq2 = tokenizer.encode_plus(example.text_a, example.q2, add_special_tokens=True, max_length=max_length, truncation=True)\r\n inputs_qq3 = tokenizer.encode_plus(example.text_a, example.q3, add_special_tokens=True, max_length=max_length, truncation=True)\r\n # generate [input_ids, input_mask, segment_ids]\r\n # for q self\r\n input_ids_q, input_mask_q, segment_ids_q = convert_inputs_to_processed(inputs_q)\r\n # for k self\r\n input_ids_k, input_mask_k, segment_ids_k = convert_inputs_to_processed(inputs_k)\r\n # for qk\r\n input_ids_qk, input_mask_qk, segment_ids_qk = convert_inputs_to_processed(inputs_qk)\r\n # for qk1\r\n input_ids_qk1, input_mask_qk1, segment_ids_qk1 = convert_inputs_to_processed(inputs_qk1)\r\n # for qk2\r\n input_ids_qk2, input_mask_qk2, segment_ids_qk2 = convert_inputs_to_processed(inputs_qk2)\r\n # for qk3\r\n input_ids_qk3, input_mask_qk3, segment_ids_qk3 = convert_inputs_to_processed(inputs_qk3)\r\n # for kq1\r\n input_ids_kq1, input_mask_kq1, segment_ids_kq1 = convert_inputs_to_processed(inputs_kq1)\r\n # for kq2\r\n input_ids_kq2, input_mask_kq2, segment_ids_kq2 = convert_inputs_to_processed(inputs_kq2)\r\n # for kq3\r\n input_ids_kq3, input_mask_kq3, segment_ids_kq3 = convert_inputs_to_processed(inputs_kq3)\r\n # for kk1\r\n input_ids_kk1, input_mask_kk1, segment_ids_kk1 = convert_inputs_to_processed(inputs_kk1)\r\n # for kk2\r\n input_ids_kk2, input_mask_kk2, segment_ids_kk2 = convert_inputs_to_processed(inputs_kk2)\r\n # for kk3\r\n input_ids_kk3, input_mask_kk3, segment_ids_kk3 = convert_inputs_to_processed(inputs_kk3)\r\n # for qq1\r\n input_ids_qq1, input_mask_qq1, segment_ids_qq1 = convert_inputs_to_processed(inputs_qq1)\r\n # for qq2\r\n input_ids_qq2, input_mask_qq2, segment_ids_qq2 = convert_inputs_to_processed(inputs_qq2)\r\n # for qq3\r\n input_ids_qq3, input_mask_qq3, segment_ids_qq3 = convert_inputs_to_processed(inputs_qq3)\r\n\r\n # generate label\r\n if output_mode == \"classification\" or output_mode == \"classification2\":\r\n label = label_map[example.label]\r\n elif output_mode == \"regression\":\r\n label = float(example.label)\r\n else:\r\n raise KeyError(output_mode)\r\n\r\n # log info\r\n if ex_index < 5:\r\n logger.info(\"*** Example ***\")\r\n logger.info(\"guid: %s\" % (example.guid))\r\n logger.info(\"text_a: %s\" % (example.text_a))\r\n logger.info(\"text_b: %s\" % (example.text_b))\r\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids_qk]))\r\n logger.info(\"attention_mask: %s\" % \" \".join([str(x) for x in input_mask_qk]))\r\n logger.info(\"token_type_ids: %s\" % \" \".join([str(x) for x in segment_ids_qk]))\r\n logger.info(\"label: %s (id = %d)\" % (example.label, label))\r\n # generate the feature for single example\r\n feature = InputFeatures_GNN(\r\n input_ids_q=input_ids_q,\r\n input_mask_q=input_mask_q,\r\n segment_ids_q=segment_ids_q,\r\n\r\n input_ids_k=input_ids_k,\r\n input_mask_k=input_mask_k,\r\n segment_ids_k=segment_ids_k,\r\n\r\n input_ids_qk=input_ids_qk,\r\n input_mask_qk=input_mask_qk,\r\n segment_ids_qk=segment_ids_qk,\r\n\r\n input_ids_qk1=input_ids_qk1,\r\n input_mask_qk1=input_mask_qk1,\r\n segment_ids_qk1=segment_ids_qk1,\r\n input_ids_qk2=input_ids_qk2,\r\n input_mask_qk2=input_mask_qk2,\r\n segment_ids_qk2=segment_ids_qk2,\r\n input_ids_qk3=input_ids_qk3,\r\n input_mask_qk3=input_mask_qk3,\r\n segment_ids_qk3=segment_ids_qk3,\r\n\r\n input_ids_kq1=input_ids_kq1,\r\n input_mask_kq1=input_mask_kq1,\r\n segment_ids_kq1=segment_ids_kq1,\r\n input_ids_kq2=input_ids_kq2,\r\n input_mask_kq2=input_mask_kq2,\r\n segment_ids_kq2=segment_ids_kq2,\r\n input_ids_kq3=input_ids_kq3,\r\n input_mask_kq3=input_mask_kq3,\r\n segment_ids_kq3=segment_ids_kq3,\r\n\r\n input_ids_qq1=input_ids_qq1,\r\n input_mask_qq1=input_mask_qq1,\r\n segment_ids_qq1=segment_ids_qq1,\r\n input_ids_qq2=input_ids_qq2,\r\n input_mask_qq2=input_mask_qq2,\r\n segment_ids_qq2=segment_ids_qq2,\r\n input_ids_qq3=input_ids_qq3,\r\n input_mask_qq3=input_mask_qq3,\r\n segment_ids_qq3=segment_ids_qq3,\r\n\r\n input_ids_kk1=input_ids_kk1,\r\n input_mask_kk1=input_mask_kk1,\r\n segment_ids_kk1=segment_ids_kk1,\r\n input_ids_kk2=input_ids_kk2,\r\n input_mask_kk2=input_mask_kk2,\r\n segment_ids_kk2=segment_ids_kk2,\r\n input_ids_kk3=input_ids_kk3,\r\n input_mask_kk3=input_mask_kk3,\r\n segment_ids_kk3=segment_ids_kk3,\r\n\r\n row_id=int(example.guid),\r\n label_id=label,\r\n task_id=task,\r\n is_real_example=True)\r\n return feature\r\n\r\n features = []\r\n for (ex_index, example) in tqdm(enumerate(examples)):\r\n features.append(qkgnn_convert_single_exampl_to_feature(example, ex_index=ex_index))\r\n\r\n return features", "def _create_examples(self, lines: List[str], mode: Split):\n examples = []\n text_index = 1 if mode == Split.test else 0\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (mode.value, i)\n text_a = line[text_index]\n if len(line) > text_index + 1:\n label = line[text_index + 1]\n else:\n label = None\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self,lines, set_type):\n examples = []\n if len(lines[1]) == 2 and len(lines[1][0]) == 1: # label text_a\n for (i, line) in enumerate(lines):\n guid = f\"{set_type}-{i}\"\n text_a = tx.utils.compat_as_text(line[1])\n label = tx.utils.compat_as_text(line[0])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=\"\", label=label))\n\n elif len(lines[1]) == 2 and len(lines[1][0]) > 1: # text_a text_b (when test file has no label)\n for (i, line) in enumerate(lines):\n guid = f\"{set_type}-{i}\"\n text_a = tx.utils.compat_as_text(line[0])\n text_b = tx.utils.compat_as_text(line[1])\n if set_type == \"test\":\n label = \"0\"\n else:\n print(\"the file is not for testing, yet contains no labels\")\n exit()\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n elif len(lines[1]) == 1: # text_a (when test file has no label)\n for (i, line) in enumerate(lines):\n guid = f\"{set_type}-{i}\"\n text_a = tx.utils.compat_as_text(line[0])\n if set_type == \"test\":\n label = \"0\"\n else:\n print(\"the file is not for testing, yet contains no labels\")\n exit()\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=\"\", label=label))\n elif len(lines[1]) == 3: # label text_a text_b\n for (i, line) in enumerate(lines):\n guid = f\"{set_type}-{i}\"\n text_a = tx.utils.compat_as_text(line[1])\n text_b = tx.utils.compat_as_text(line[2])\n\n label = tx.utils.compat_as_text(line[0])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n return examples", "def convert_examples_to_features(self):\n features = []\n for ex_index, example in enumerate(self.examples):\n if ex_index % 10000 == 0:\n logging.info(f\"Writing example {ex_index} of {len(self.examples)}\")\n\n text_to_text_query = self.processor.get_t5_prompted_query(example.text_a, example.text_b)\n enc_query = self.tokenizer.text_to_ids(text_to_text_query)\n if len(enc_query) > self.max_seq_length:\n enc_query = enc_query[: self.max_seq_length]\n dec_query = (\n [self.tokenizer.bos_id]\n + self.tokenizer.text_to_ids(self.processor.label2string(example.label))\n + [self.tokenizer.eos_id]\n )\n\n dec_input = dec_query[:-1]\n labels = dec_query[1:]\n\n features.append([enc_query, dec_input, labels])\n\n return features", "def create_examples(lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, str(i))\n text_a_id = line[0]\n text_a = tokenization.convert_to_unicode(line[1])\n text_b_id = line[2]\n text_b = tokenization.convert_to_unicode(line[3])\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(InputExample(guid=guid, text_a_id=text_a_id, text_a=text_a, text_b_id=text_b_id, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[0])\n text_b = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines: List[str], mode: Split):\n # id,title,content,label\n test_mode = mode == Split.test\n title_index = 1\n content_index = 2\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (mode.value, line[0])\n try:\n text_a = line[title_index]\n text_b = line[content_index]\n if test_mode:\n label = None\n else:\n label = line[3]\n except IndexError:\n continue\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[1])\n text_b = tokenization.convert_to_unicode(line[2])\n label = tokenization.convert_to_unicode(line[3])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def batch_inference(question,context): \n inputs = tokenizer(question, context, \n return_tensors='pt', \n truncation=True, \n padding=True)\n \n # Move data to GPU\n inputs = inputs.to(device)\n \n # Feed data through the model\n with torch.no_grad():\n outputs = model(**inputs)\n\n # Q&A model outputs the two logit scores for each word.\n # One for its chance of being the start of the answer\n # and one for its chance of being the end\n start_logits = outputs.start_logits\n end_logits = outputs.end_logits\n \n # Find the words with the highest score\n # argmax(dim=1) means argmax with each sample\n start = start_logits.argmax(dim=1)\n end = end_logits.argmax(dim=1)\n \n # Return the answers\n # This is the point where we move the prediction back to main memory with .cpu()\n tokens = [tokenizer.convert_ids_to_tokens(x) for x in inputs[\"input_ids\"].cpu().numpy()]\n return [tokenizer.convert_tokens_to_string(x[start[i]:end[i]+1]) for i,x in enumerate(tokens)]", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0]))\n text_a = tokenization.convert_to_unicode(line[8])\n text_b = tokenization.convert_to_unicode(line[9])\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n text_b = tokenization.convert_to_unicode(line[4])\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines: List[str], mode: Split):\n test_mode = mode == Split.test\n q1_index = 1 if test_mode else 3\n q2_index = 2 if test_mode else 4\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (mode.value, line[0])\n try:\n text_a = line[q1_index]\n text_b = line[q2_index]\n label = None if test_mode else line[5]\n except IndexError:\n continue\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def convert_questions_to_features(examples, tokenizer, max_query_length=None):\n\n unique_id = 1000000000\n question_features = []\n\n for (example_index, example) in enumerate(tqdm(examples, desc='Converting questions')):\n\n query_tokens = tokenizer.tokenize(example.question_text)\n if max_query_length is None:\n max_query_length = len(query_tokens)\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n for _ in enumerate(range(1)):\n tokens_ = []\n tokens_.append(\"[CLS]\")\n for token in query_tokens:\n tokens_.append(token)\n tokens_.append(\"[SEP]\")\n\n input_ids_ = tokenizer.convert_tokens_to_ids(tokens_)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask_ = [1] * len(input_ids_)\n\n # Zero-pad up to the sequence length.\n while len(input_ids_) < max_query_length + 2:\n input_ids_.append(0)\n input_mask_.append(0)\n\n assert len(input_ids_) == max_query_length + 2\n assert len(input_mask_) == max_query_length + 2\n\n if example_index < 1:\n # logger.info(\"*** Example ***\")\n # logger.info(\"unique_id: %s\" % (unique_id))\n # logger.info(\"example_index: %s\" % (example_index))\n logger.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in query_tokens]))\n # logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids_]))\n # logger.info(\n # \"input_mask: %s\" % \" \".join([str(x) for x in input_mask_]))\n\n question_features.append(\n QuestionFeatures(\n unique_id=unique_id,\n example_index=example_index,\n tokens_=tokens_,\n input_ids=input_ids_,\n input_mask=input_mask_))\n unique_id += 1\n\n return question_features", "def _create_examples(self, lines, set_type):\n test_mode = set_type == \"test\"\n if test_mode:\n lines = lines[1:]\n text_index = 1 if test_mode else 3\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[text_index]\n label = None if test_mode else line[1]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0]))\n text_a = tokenization.convert_to_unicode(line[8])\n text_b = tokenization.convert_to_unicode(line[9])\n if set_type == \"test\":\n label = \"contradiction\"\n else:\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for i, line in lines.iterrows():\n guid = \"%s-%s\" % (set_type, i)\n text_a = line['sentence1']\n text_b = line['sentence2']\n label = line['label']\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def print_examples(example_iter, model, num=0, max_len=100,\n bos_index=1,\n src_eos_index = None,\n trg_eos_index = None,\n src_vocab=None, trg_vocab=None):\n model.eval()\n count=0\n\n BOS_TOKEN = \"<s>\"\n EOS_TOKEN = \"</s>\"\n UNK_TOKEN = \"<unk>\"\n\n if src_vocab is not None and trg_vocab is not None:\n src_bos_index = src_vocab.stoi[BOS_TOKEN]\n src_eos_index = src_vocab.stoi[EOS_TOKEN]\n trg_unk_index = trg_vocab.stoi[UNK_TOKEN]\n # trg_bos_index = trg_vocab.stoi[BOS_TOKEN]\n # trg_eos_index = trg_vocab.stoi[EOS_TOKEN]\n else:\n src_bos_index = 0\n src_eos_index = 1\n trg_unk_index = 2\n # trg_bos_index = 1\n # trg_eos_index = None\n\n for i, batch in enumerate(example_iter, 1):\n src = batch.src.cpu().numpy()[0, :]\n trg_idx = batch.trg_idx.cpu().numpy()[0, :]\n\n # remove </s>\n src = src[1:] if src[0]==src_bos_index else src\n src = src[:-1] if src[-1]==src_eos_index else src\n # trg = trg[:-1] if trg[-1]==trg_eos_index else trg\n\n result = greedy_decode(model, batch.src_idx, batch.src_mask, batch.src_lengths)\n print()\n print(\"Example %d\" % i)\n print(\"Source: \", \" \".join(lookup_words(src, vocab=src_vocab)))\n print()\n print(\"Target: \", set(lookup_words(trg_idx, vocab=trg_vocab)))\n print()\n print(\"Prediction: \", \" \".join(lookup_words(result[0], vocab=trg_vocab)))\n\n count += 1\n if count == num:\n break", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[8]\n text_b = line[9]\n label = line[-1]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def convert_examples_to_features(examples, max_seq_length, tokenizer):\n\n features = []\n for (ex_index, example) in enumerate(examples):\n print(example.text_a)\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n input_mask = [1] * len(input_ids)\n\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n \n labels_ids = []\n for label in example.labels:\n labels_ids.append(int(label))\n \n if ex_index < 0:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %s)\" % (example.labels, labels_ids))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=labels_ids))\n return features", "def _create_examples(self, data, set_type):\n examples = []\n for (i, elem) in enumerate(data):\n guid = \"%s-%s\" % (set_type, i)\n text = elem[0]\n label = elem[1]\n examples.append(\n InputExample(guid=guid, text=text, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n try:\n text_a = line[3]\n text_b = line[4]\n label = line[5]\n except IndexError:\n continue\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def generate_text(pmodel, num_generate, temperature, start_string):\n\n # Converting the start string to numbers (vectorizing)\n input_eval = [char2idx[s] for s in start_string]\n input_eval = tf.expand_dims(input_eval, 0)\n\n # Empty string to store the results\n text_generated = np.empty(1)\n\n # Here batch size = 1\n pmodel.reset_states()\n for i in range(num_generate):\n \n predictions = pmodel(input_eval)\n \n # remove the batch dimension\n predictions = tf.squeeze(predictions, 0)\n \n # using a multinomial distribution to predict the word returned by the model\n predictions = predictions / temperature\n predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()\n \n # We pass the predicted word as the next input to the model\n # along with the previous hidden state\n input_eval = tf.expand_dims([predicted_id], 0)\n \n text_generated = np.vstack((text_generated, idx2char[predicted_id].tolist()))\n \n return text_generated", "def convert_examples_to_features(\n examples: List[InputExample],\n label_list: List[str],\n max_length: int,\n tokenizer: PreTrainedTokenizer,\n pad_token_segment_id=0,\n pad_on_left=False,\n pad_token=0,\n mask_padding_with_zero=True,\n) -> List[InputFeatures]:\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc=\"convert examples to features\"):\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n choices_features = []\n for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)):\n text_a = context\n if example.question.find(\"_\") != -1:\n # this is for cloze question\n text_b = example.question.replace(\"_\", ending)\n else:\n text_b = example.question + \" \" + ending\n if len(text_a) == 0:\n logger.info(\"context of example %d have length 0\" % (ex_index))\n text_a = \" \"\n\n inputs = tokenizer.encode_plus(text_a, text_b, add_special_tokens=True, max_length=max_length,)\n if \"num_truncated_tokens\" in inputs and inputs[\"num_truncated_tokens\"] > 0:\n logger.info(\n \"Attention! you are cropping tokens (swag task is ok). \"\n \"If you are training ARC and RACE and you are poping question + options,\"\n \"you need to try to use a bigger max seq length!\"\n )\n\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length\n assert len(attention_mask) == max_length\n assert len(token_type_ids) == max_length\n choices_features.append((input_ids, attention_mask, token_type_ids))\n\n label = label_map[example.label]\n\n if ex_index < 2:\n logger.info(\"*** Example ***\")\n logger.info(\"race_id: {}\".format(example.example_id))\n for choice_idx, (input_ids, attention_mask, token_type_ids) in enumerate(choices_features):\n logger.info(\"choice: {}\".format(choice_idx))\n logger.info(\"input_ids: {}\".format(\" \".join(map(str, input_ids))))\n logger.info(\"attention_mask: {}\".format(\" \".join(map(str, attention_mask))))\n logger.info(\"token_type_ids: {}\".format(\" \".join(map(str, token_type_ids))))\n logger.info(\"label: {}\".format(label))\n\n features.append(InputFeatures(example_id=example.example_id, choices_features=choices_features, label=label,))\n\n return features", "def _create_examples(self, lines, set_type):\n examples = []\n for i, line in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[5]\n text_b = line[6]\n pairID = line[7][2:] if line[7].startswith(\"ex\") else line[7]\n label = line[0]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n label = line[1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n label = line[1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n text_b = line[4]\n label = line[0]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0]) if set_type != \"test\" else line[0]\n text_a = line[1]\n text_b = line[2]\n label = line[-1] if set_type != \"test\" else \"entailment\"\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n # label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n exindex = {}\n passagelens = []\n\n sum_of_labels = 0\n\n for (ex_index, example) in tqdm(enumerate(examples), desc=\"Tokenizing:\"):\n if example.text_a not in tokenmap.keys():\n tokens_a = tokenizer.tokenize(example.text_a)\n tokenmap[example.text_a] = tokens_a\n else:\n tokens_a = tokenmap[example.text_a]\n\n tokens_b = None\n if example.text_b:\n if example.text_b not in tokenmap.keys():\n tokens_b = tokenizer.tokenize(example.text_b)\n tokenmap[example.text_b] = tokens_b\n else:\n tokens_b = tokenmap[example.text_b]\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n\n passagelens.append(len(tokens_a) + len(tokens_b) + 3)\n\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n # label_id = label_map[example.label]\n label_id = example.label\n\n sum_of_labels += label_id\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (str(example.label), 0))\n\n exindex[ex_index] = example.guid\n features.append(\n InputFeatures(uuid=ex_index,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n\n print(\"Passage Token Lengths Distribution\", passagelens[-1], np.percentile(passagelens, 50),\n np.percentile(passagelens, 90), np.percentile(passagelens, 95), np.percentile(passagelens, 99))\n return features, exindex", "def convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n multilabel=False,\n):\n\n if task is not None:\n processor = glue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = glue_output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n if multilabel:\n domain_label_map = {label: i for i, label in enumerate([\"0\", \"1\"])}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n len_examples = len(examples)\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d/%d\" % (ex_index, len_examples))\n\n inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, )\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(len(input_ids), max_length)\n assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format(\n len(attention_mask), max_length\n )\n assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format(\n len(token_type_ids), max_length\n )\n\n if output_mode == \"classification\":\n label = label_map[example.label] if not multilabel else label_map[example.label[0]]\n elif output_mode == \"regression\":\n label = float(example.label) if not multilabel else float(example.label[0])\n else:\n raise KeyError(output_mode)\n if multilabel:\n label = [label, domain_label_map[example.label[1]]]\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask]))\n logger.info(\"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids]))\n if multilabel:\n logger.info(\"label: %s (id = %d)\" % (example.label[0], label[0]))\n logger.info(\"domain label: %s (id = %d)\" % (example.label[1], label[1]))\n else:\n logger.info(\"label: %s (id = %d)\" % (example.label, label))\n\n features.append(\n InputFeatures(\n input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label\n )\n )\n\n return features", "def glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n is_tf_dataset = False\n if is_tf_available() and isinstance(examples, tf.data.Dataset):\n is_tf_dataset = True\n\n if task is not None:\n processor = glue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = glue_output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n len_examples = 0\n if is_tf_dataset:\n example = processor.get_example_from_tensor_dict(example)\n example = processor.tfds_map(example)\n len_examples = tf.data.experimental.cardinality(examples)\n else:\n len_examples = len(examples)\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d/%d\" % (ex_index, len_examples))\n\n inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(len(input_ids), max_length)\n assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format(\n len(attention_mask), max_length\n )\n assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format(\n len(token_type_ids), max_length\n )\n\n if output_mode == \"classification\":\n label_id = label_map[example.label]\n elif output_mode == \"regression\":\n label_id = float(example.label)\n else:\n raise KeyError(output_mode)\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask]))\n logger.info(\"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(\n input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label_id=label_id\n )\n )\n return features", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, ids) in enumerate(lines):\n text_a = lines[ids]['sentence']\n examples.append(\n InputExample(text_a=text_a) )\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n # if i == 0:\n # continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[1]\n text_b = None\n label = line[2]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n # if i == 0:\n # continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[1]\n text_b = None\n label = line[2]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = line[3]\r\n text_b = line[4]\r\n# if set_type == 'test':\r\n# label = self.get_labels()[0]\r\n# else:\r\n# label = line[0]\r\n label = line[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[7]\n text_b = line[8]\n label = None if set_type == \"test\" else line[-1]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = line[0]\r\n label = line[1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0]) if set_type != \"test\" else line[0]\n try:\n text_a = line[3] if set_type != \"test\" else line[1]\n text_b = line[4] if set_type != \"test\" else line[2]\n label = line[5] if set_type != \"test\" else \"0\"\n except IndexError:\n continue\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n text_b = tokenization.convert_to_unicode(line[4])\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n text_a = line[1]\r\n text_b = line[2]\r\n label = line[-1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens = example.text\n\n# # Account for [CLS] and [SEP] with \"- 2\"\n# if len(tokens) > max_seq_length - 2:\n# tokens = tokens[:(max_seq_length - 2)]\n\n bert_tokens = []\n orig_to_tok_map = []\n\n bert_tokens.append(\"[CLS]\")\n for token in tokens:\n new_tokens = tokenizer.tokenize(token)\n if len(bert_tokens) + len(new_tokens) > max_seq_length - 1:\n # print(\"You shouldn't see this since the test set is already pre-separated.\")\n break\n else:\n orig_to_tok_map.append(len(bert_tokens))\n bert_tokens.extend(new_tokens)\n bert_tokens.append(\"[SEP]\")\n\n if len(bert_tokens) == 2: # edge case\n continue\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n\n input_ids = tokenizer.convert_tokens_to_ids(bert_tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n\n segment_ids = [0] * max_seq_length # no use for our problem\n\n labels = example.label\n label_ids = [0] * max_seq_length\n label_mask = [0] * max_seq_length\n\n for label, target_index in zip(labels, orig_to_tok_map):\n label_ids[target_index] = label_map[label]\n label_mask[target_index] = 1\n\n assert len(segment_ids) == max_seq_length\n assert len(label_ids) == max_seq_length\n assert len(label_mask) == max_seq_length\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=label_ids,\n label_mask=label_mask))\n return features", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = line[3]\r\n text_b = line[4]\r\n label = line[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = line[3]\r\n label = line[1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n text_a = line[8]\r\n text_b = line[9]\r\n label = line[-1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n text_a = line[7]\r\n text_b = line[8]\r\n label = line[-1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n text_a = line[1]\r\n text_b = line[2]\r\n if set_type != 'test':\r\n label = line[-1]\r\n else:\r\n label = self.get_labels()[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def read_examples(input_file):\n examples = []\n unique_id = 0\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n \n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n \n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n examples.append(InputExample(unique_id=unique_id,\n text_a=text_a, \n text_b=text_b))\n unique_id += 1\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n if set_type != 'test':\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = line[0]\r\n label = line[1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n else:\r\n guid = \"%s-%s\" % (set_type, i)\r\n text_a = line[1]\r\n label = self.get_labels()[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n\r\n return examples", "def convert_examples_to_features(examples,label_list, max_seq_length,tokenizer):\r\n label_map = {}\r\n for (i, label) in enumerate(label_list):\r\n label_map[label] = i\r\n\r\n input_data=[]\r\n for (ex_index, example) in enumerate(examples):\r\n tokens_a = tokenizer.tokenize(example.text_a)\r\n tokens_b = None\r\n if example.text_b:\r\n tokens_b = tokenizer.tokenize(example.text_b)\r\n if tokens_b:\r\n # Modifies `tokens_a` and `tokens_b` in place so that the total\r\n # length is less than the specified length.\r\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\r\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\r\n else:\r\n # Account for [CLS] and [SEP] with \"- 2\"\r\n if len(tokens_a) > max_seq_length - 2:\r\n tokens_a = tokens_a[0:(max_seq_length - 2)]\r\n\r\n if ex_index % 10000 == 0:\r\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\r\n\r\n # The convention in BERT is:\r\n # (a) For sequence pairs:\r\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\r\n # (b) For single sequences:\r\n # tokens: [CLS] the dog is hairy . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0\r\n #\r\n # Where \"type_ids\" are used to indicate whether this is the first\r\n # sequence or the second sequence. The embedding vectors for `type=0` and\r\n # `type=1` were learned during pre-training and are added to the wordpiece\r\n # embedding vector (and position vector). This is not *strictly* necessary\r\n # since the [SEP] token unambigiously separates the sequences, but it makes\r\n # it easier for the model to learn the concept of sequences.\r\n #\r\n # For classification tasks, the first vector (corresponding to [CLS]) is\r\n # used as as the \"sentence vector\". Note that this only makes sense because\r\n # the entire model is fine-tuned.\r\n tokens = []\r\n segment_ids = []\r\n tokens.append(\"[CLS]\")\r\n segment_ids.append(0)\r\n for token in tokens_a:\r\n tokens.append(token)\r\n segment_ids.append(0)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(0)\r\n\r\n if tokens_b:\r\n for token in tokens_b:\r\n tokens.append(token)\r\n segment_ids.append(1)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(1)\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\r\n\r\n input_mask = [1] * len(input_ids)\r\n\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n\r\n label_id = label_map[example.label]\r\n if ex_index < 3:\r\n tf.logging.info(\"*** Example ***\")\r\n tf.logging.info(\"guid: %s\" % (example.guid))\r\n tf.logging.info(\"tokens: %s\" % \" \".join([tokenization.printable_text(x) for x in tokens]))\r\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\r\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\r\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\r\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\r\n\r\n features = collections.OrderedDict()\r\n features[\"input_ids\"] = input_ids\r\n features[\"input_mask\"] = input_mask\r\n features[\"segment_ids\"] = segment_ids\r\n features[\"label_ids\"] =label_id\r\n input_data.append(features)\r\n\r\n return input_data", "def _create_examples(self, lines, set_type):\n examples = []\n for (_, data) in enumerate(lines):\n passage = data[\"passage\"][\"text\"]\n for Q in data[\"passage\"][\"questions\"]:\n question = Q[\"question\"]\n for A in Q[\"answers\"]:\n guid = f\"{set_type}-{data['idx']-Q['idx']-A['idx']}\"\n examples.append(\n InputExample(\n guid=guid,\n text_a=passage,\n text_b=question + \" \" + A[\"text\"],\n label=str(A[\"label\"]),\n )\n )\n return examples", "def _create_examples(self, data_dir, set_type):\n\t\texamples = []\n\t\tinput_file_data = os.path.join(data_dir, \"data.tsv\")\n\t\twith open(input_file_data, \"r\", encoding=\"utf-8-sig\") as f:\n\t\t\tfor i, inp in enumerate(f):\n\t\t\t\tinps = inp.split('\\t') \n\t\t\t\tguid = \"%s-%s\" % (set_type, i)\n\t\t\t\ttext_inp = inps[1].strip()\n\t\t\t\ttext_out = inps[2].strip()\n\t\t\t\texamples.append(InputExample(guid=guid, text_inp=text_inp, text_out=text_out))\n\t\t\t\t\n\t\t\t# Sort these out before returning\n\t\t\texamples = sorted(examples, key=sort_inp_len)\n\t\t\treturn examples", "def _convert_single_example(self, text_a, text_b):\n tokens = []\n input_ids = []\n segment_ids = []\n input_mask = []\n try:\n text_a = self.tokenizer.tokenize(text_a)\n if text_b:\n text_b = self.tokenizer.tokenize(text_b)\n self._truncate_seq_pair(text_a, text_b)\n\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n\n for token in text_a:\n tokens.append(token)\n segment_ids.append(0)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n\n if text_b:\n for token in text_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append('[SEP]')\n segment_ids.append(1)\n\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n\n input_mask = [1] * len(input_ids)\n\n while len(input_ids) < 50:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n except:\n self.logger.error()\n\n finally:\n return input_ids, input_mask, segment_ids", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n text_a = line[8]\r\n text_b = line[9]\r\n if set_type != 'test':\r\n label = line[-1]\r\n else:\r\n label = self.get_labels()[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def print_examples(example_iter, model, n=2, max_len=100, \n sos_index=1, \n src_eos_index=None, \n trg_eos_index=None, \n src_vocab=None, trg_vocab=None):\n\n model.eval()\n count = 0\n print()\n \n if src_vocab is not None and trg_vocab is not None:\n src_eos_index = src_vocab.stoi[EOS_TOKEN]\n trg_sos_index = trg_vocab.stoi[SOS_TOKEN]\n trg_eos_index = trg_vocab.stoi[EOS_TOKEN]\n else:\n src_eos_index = None\n trg_sos_index = 1\n trg_eos_index = None\n \n for i, batch in enumerate(example_iter):\n \n src = batch.src.cpu().numpy()[0, :]\n trg = batch.trg_y.cpu().numpy()[0, :]\n\n # remove </s> (if it is there)\n src = src[:-1] if src[-1] == src_eos_index else src\n trg = trg[:-1] if trg[-1] == trg_eos_index else trg \n \n result, _ = beam_decode(\n model, batch.src, batch.src_mask, batch.src_lengths,\n max_len=max_len, sos_index=trg_sos_index, eos_index=trg_eos_index)\n print(\"Example #%d\" % (i+1))\n print(\"Src : \", \" \".join(lookup_words(src, vocab=src_vocab)))\n print(\"Trg : \", \" \".join(lookup_words(trg, vocab=trg_vocab)))\n print(\"Pred: \", \" \".join(lookup_words(result, vocab=trg_vocab)))\n print()\n \n count += 1\n if count == n:\n break", "def _create_examples(self, lines, set_type):\n examples = []\n \n for (i, line) in enumerate(lines):\n sentence_number = 0\n premise_text = line[\"premise\"]\n modified_premise_text = re.sub(self.stage_name_pattern,\"\",premise_text)\n modified_premise_text = re.sub(self.w_patterns,\"\",modified_premise_text)\n hypothesis_text = line[\"hypothesis\"]\n hypothesis_text = re.sub(self.w_patterns,\"\",hypothesis_text)\n a_label = int(line[\"label\"])\n\n sentences = modified_premise_text.split('.')\n\n for j, sentence in enumerate(sentences):\n guid = \"\" + str(sentence_number) + \"\\t\" + str(i) + \"\\t\" + str(len(sentences)) + \"\\t\" + str(a_label)\n text_a = sentence\n text_b = hypothesis_text\n label = a_label\n sentence_number += 1\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n #print(\"16th sentence::\",sentences[16])\n\n return examples", "def text_generator(self, example_generator):\n\t\tif self._data_as_tf_example:\n\t\t\tquery_text = None\n\t\t\tquery_edge_list = None\n\t\t\tword_edge_list = None\n\t\t\t\n\t\t\twhile True:\n\t\t\t\te, epoch_num = example_generator.next() # e is a tf.Example\n\t\t\t\ttry:\n\t\t\t\t\tarticle_text = e.features.feature['article'].bytes_list.value[0] # document text\n\t\t\t\t\tabstract_text = e.features.feature['abstract'].bytes_list.value[0] # response text\n\t\t\t\t\tif self._hps.query_encoder.value:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tquery_text = e.features.feature['query'].bytes_list.value[0] # context text\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tquery_text = ''\n\t\t\t\t\tif self._hps.word_gcn.value:\n\t\t\t\t\t\tword_edge_list = []\n\t\t\t\t\t\tif self._hps.use_default_graph.value:\n\t\t\t\t\t\t\tword_edge_list = word_edge_list + ast.literal_eval(e.features.feature['word_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\t#tf.logging.info((word_edge_list[0]))\n\t\t\t\t\t\tif self._hps.use_coref_graph.value:\n\t\t\t\t\t\t\tword_edge_list = word_edge_list + ast.literal_eval(e.features.feature['word_coref_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\tif self._hps.use_entity_graph.value:\n\t\t\t\t\t\t\tword_edge_list = word_edge_list + ast.literal_eval(e.features.feature['word_entity_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\tif self._hps.use_lexical_graph.value:\n\t\t\t\t\t\t\tword_edge_list = word_edge_list + ast.literal_eval(e.features.feature['word_lexical_edge_list'].bytes_list.value[0])\n\t\t\t\t\t#\tprint(word_edge_list)\n\t\t\t\t\t\n\n\t\t\t\t\tif self._hps.query_gcn.value:\n\t\t\t\t\t\tquery_edge_list = []\n\t\t\t\t\t\tif self._hps.use_default_graph.value:\n\t\t\t\t\t\t\tquery_edge_list = query_edge_list + ast.literal_eval(e.features.feature['query_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\t\n\t\t\t\t\t\t'''\n\t\t\t\t\t\tThese are inter-sentence graph and may not be applicable\n\t\t\t\t\t\tif self._hps.use_coref_graph.value:\n\t\t\t\t\t\t\tquery_edge_list = query_edge_list + ast.literal_eval(e.features.feature['query_coref_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\tif self._hps.use_entity_graph.value:\n\t\t\t\t\t\t\tquery_edge_list = query_edge_list + ast.literal_eval(e.features.feature['query_entity_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\tif self._hps.use_lexical_graph.value:\n\t\t\t\t\t\t\tquery_edge_list = query_edge_list + ast.literal_eval(e.features.feature['query_lexical_edge_list'].bytes_list.value[0])\n\t\t\t\t\t\t'''\n\n\n\n\t\t\t\texcept ValueError:\n\t\t\t\t\ttf.logging.error('Failed to get article or abstract from example')\n\t\t\t\t\tcontinue\n\t\t\t\tif len(article_text)==0: # See https://github.com/abisee/pointer-generator/issues/1\n\t\t\t\t\ttf.logging.warning('Found an example with empty article text. Skipping it.')\n\t\t\t\telse:\n\t\t\t\t\t#tf.logging.info(abstract_text)\n\t\t\t\t\tyield (article_text, abstract_text, word_edge_list, query_text, query_edge_list, epoch_num)\n\t\t\t\n\t\telse:\n\n\t\t\twhile True:\n\t\t\t\te = example_generator.next()\n\t\t\t\tyield e", "def _create_examples(self, lines, set_type):\n examples = []\n for (_, data) in enumerate(lines):\n examples.append(\n InputExample(\n guid=f\"{set_type}-{data['idx']}\",\n text_a=data[\"passage\"],\n text_b=data[\"question\"],\n label=str(data[\"label\"]),\n )\n )\n return examples", "def translate_interactive(estimator, tokenizer_):\n \n predictor = ContinuePredict(estimator, continue_input_fn)\n while True:\n tf.logging.info(\"Enter the English sentence end with ENTER.\")\n raw_text = input().strip()\n if raw_text == r'\\q':\n predictor.close()\n break\n encoded_txt = _encode_and_add_eos(raw_text, tokenizer_)\n target = predictor.predict(encoded_txt)\n target = next(target)['outputs']\n target = _trim_and_decode(target, tokenizer_)\n tf.logging.info('\\t{}'.format(target))", "def generate_tpu(self, prompts: List[str]):\n from flax.training.common_utils import shard # pylint:disable=g-import-not-at-top,g-importing-member\n import jax # pylint:disable=g-import-not-at-top\n import time # pylint:disable=g-import-not-at-top\n import numpy as np # pylint:disable=g-import-not-at-top\n\n rng = jax.random.PRNGKey(0)\n rng = jax.random.split(rng, jax.device_count())\n\n assert prompts, \"prompt parameter cannot be empty\"\n print(\"Prompts: \", prompts)\n prompt_ids = self._pipeline.prepare_inputs(prompts)\n prompt_ids = shard(prompt_ids)\n print(\"Sharded prompt ids has shape:\", prompt_ids.shape)\n if self._run_with_profiler:\n jax.profiler.start_trace(self._profiler_dir)\n\n time_start = time.time()\n images = self._p_generate(prompt_ids, self._p_params, rng)\n images = images.block_until_ready()\n elapsed = time.time() - time_start\n if self._run_with_profiler:\n jax.profiler.stop_trace()\n\n print(\"Inference time (in seconds): \", elapsed)\n print(\"Shape of the predictions: \", images.shape)\n images = images.reshape(\n (images.shape[0] * images.shape[1],) + images.shape[-3:])\n print(\"Shape of images afterwards: \", images.shape)\n return self._pipeline.numpy_to_pil(np.array(images))", "def generate_text(session, model, config, starting_text='<eos>',\n stop_length=100, stop_tokens=None, temp=1.0):\n state = model.initial_state.eval()\n # Imagine tokens as a batch size of one, length of len(tokens[0])\n tokens = [model.vocab.encode(word) for word in starting_text.split()]\n for i in xrange(stop_length):\n ### YOUR CODE HERE\n #print tokens\n feed = {}\n #x = np.array([tokens[-1]])\n #x.reshape(1,1)\n feed[model.input_placeholder] = [[tokens[-1]]]\n feed[model.dropout_placeholder] = 1\n feed[model.initial_state] = state\n y_pred, state = session.run([model.predictions[-1], model.final_state], feed_dict=feed)\n ### END YOUR CODE\n next_word_idx = sample(y_pred[0], temperature=temp)\n tokens.append(next_word_idx)\n if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:\n break\n output = [model.vocab.decode(word_idx) for word_idx in tokens]\n return output", "def batchify(TEXT, data, batch_size, device):\r\n data = TEXT.numericalize([data.examples[0].text])\r\n num_batches = data.size(0)//batch_size\r\n data = data.narrow(0, 0, num_batches * batch_size)\r\n data = data.view(batch_size, -1).t().contiguous()\r\n\r\n return data.to(device)", "def convert_example(example,\n tokenizer,\n label_list,\n max_seq_length=512,\n is_test=False):\n\n def _truncate_seqs(seqs, max_seq_length):\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n tokens_a, tokens_b = seqs\n max_seq_length -= 3\n while True: # truncate with longest_first strategy\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_seq_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n return seqs\n\n def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):\n concat = sum((seq + sep for sep, seq in zip(separators, seqs)), [])\n segment_ids = sum(\n ([i] * (len(seq) + len(sep))\n for i, (sep, seq) in enumerate(zip(separators, seqs))), [])\n if isinstance(seq_mask, int):\n seq_mask = [[seq_mask] * len(seq) for seq in seqs]\n if isinstance(separator_mask, int):\n separator_mask = [[separator_mask] * len(sep) for sep in separators]\n p_mask = sum((s_mask + mask\n for sep, seq, s_mask, mask in zip(\n separators, seqs, seq_mask, separator_mask)), [])\n return concat, segment_ids, p_mask\n\n if not is_test:\n # `label_list == None` is for regression task\n label_dtype = \"int64\" if label_list else \"float32\"\n # get the label\n label = example[-2]\n example = example[:-2]\n #create label maps if classification task\n if label_list:\n label_map = {}\n for (i, l) in enumerate(label_list):\n label_map[l] = i\n label = label_map[label]\n label = np.array([label], dtype=label_dtype)\n else:\n qas_id = example[-1]\n example = example[:-2]\n # tokenize raw text\n tokens_raw = [tokenizer(l) for l in example]\n # truncate to the truncate_length,\n tokens_trun = _truncate_seqs(tokens_raw, max_seq_length)\n # concate the sequences with special tokens\n tokens_trun[0] = [tokenizer.cls_token] + tokens_trun[0]\n tokens, segment_ids, _ = _concat_seqs(tokens_trun, [[tokenizer.sep_token]] *\n len(tokens_trun))\n # convert the token to ids\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n valid_length = len(input_ids)\n\n if not is_test:\n return input_ids, segment_ids, valid_length, label\n else:\n return input_ids, segment_ids, valid_length, qas_id", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n if set_type != 'test':\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n try:\r\n text_a = line[3]\r\n text_b = line[4]\r\n label = line[5]\r\n except IndexError:\r\n continue\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n else:\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n try:\r\n text_a = line[1]\r\n text_b = line[2]\r\n label = self.get_labels()[0]\r\n except IndexError:\r\n continue\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n\r\n return examples", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[0]\n label = line[1]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n guid = \"%s-%s\" % (set_type, i)\r\n if set_type != 'test':\r\n text_a = line[3]\r\n label = line[1]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n else:\r\n if i == 0:\r\n continue\r\n text_a = line[1]\r\n label = self.get_labels()[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\r\n return examples", "def query(self,text_input,prefix='answer:',convert_to_string=True):\n predictions, raw_outputs = self.model.predict([text_input])\n raw_outputs = [np.max(softmax([s[1][0] for s in v.items()][0])) for v in raw_outputs[0]]\n preds = [[(i[0],i[1],raw_outputs[k]) for i in p.items()][0] for k,p in enumerate(predictions[0])]\n return self._post_process_output(preds,convert_to_string=convert_to_string)", "def sample_to_features_text(\n sample, tasks, max_seq_len, tokenizer\n):\n\n if tokenizer.is_fast:\n text = sample.clear_text[\"text\"]\n # Here, we tokenize the sample for the second time to get all relevant ids\n # This should change once we git rid of FARM's tokenize_with_metadata()\n inputs = tokenizer(text,\n return_token_type_ids=True,\n truncation=True,\n truncation_strategy=\"longest_first\",\n max_length=max_seq_len,\n return_special_tokens_mask=True)\n\n if (len(inputs[\"input_ids\"]) - inputs[\"special_tokens_mask\"].count(1)) != len(sample.tokenized[\"tokens\"]):\n logger.error(f\"FastTokenizer encoded sample {sample.clear_text['text']} to \"\n f\"{len(inputs['input_ids']) - inputs['special_tokens_mask'].count(1)} tokens, which differs \"\n f\"from number of tokens produced in tokenize_with_metadata(). \\n\"\n f\"Further processing is likely to be wrong.\")\n else:\n # TODO It might be cleaner to adjust the data structure in sample.tokenized\n tokens_a = sample.tokenized[\"tokens\"]\n tokens_b = sample.tokenized.get(\"tokens_b\", None)\n\n inputs = tokenizer.encode_plus(\n tokens_a,\n tokens_b,\n add_special_tokens=True,\n truncation=False, # truncation_strategy is deprecated\n return_token_type_ids=True,\n is_split_into_words=False,\n )\n\n input_ids, segment_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n padding_mask = [1] * len(input_ids)\n\n # Padding up to the sequence length.\n # Normal case: adding multiple 0 to the right\n # Special cases:\n # a) xlnet pads on the left and uses \"4\" for padding token_type_ids\n if tokenizer.__class__.__name__ == \"XLNetTokenizer\":\n pad_on_left = True\n segment_ids = pad(segment_ids, max_seq_len, 4, pad_on_left=pad_on_left)\n else:\n pad_on_left = False\n segment_ids = pad(segment_ids, max_seq_len, 0, pad_on_left=pad_on_left)\n\n input_ids = pad(input_ids, max_seq_len, tokenizer.pad_token_id, pad_on_left=pad_on_left)\n padding_mask = pad(padding_mask, max_seq_len, 0, pad_on_left=pad_on_left)\n\n assert len(input_ids) == max_seq_len\n assert len(padding_mask) == max_seq_len\n assert len(segment_ids) == max_seq_len\n\n feat_dict = {\n \"input_ids\": input_ids,\n \"padding_mask\": padding_mask,\n \"segment_ids\": segment_ids,\n }\n\n # Add Labels for different tasks\n for task_name, task in tasks.items():\n try:\n label_name = task[\"label_name\"]\n label_raw = sample.clear_text[label_name]\n label_list = task[\"label_list\"]\n if task[\"task_type\"] == \"classification\":\n # id of label\n try:\n label_ids = [label_list.index(label_raw)]\n except ValueError as e:\n raise ValueError(f'[Task: {task_name}] Observed label {label_raw} not in defined label_list')\n elif task[\"task_type\"] == \"multilabel_classification\":\n # multi-hot-format\n label_ids = [0] * len(label_list)\n for l in label_raw.split(\",\"):\n if l != \"\":\n label_ids[label_list.index(l)] = 1\n elif task[\"task_type\"] == \"regression\":\n label_ids = [float(label_raw)]\n else:\n raise ValueError(task[\"task_type\"])\n except KeyError:\n # For inference mode we don't expect labels\n label_ids = None\n if label_ids is not None:\n feat_dict[task[\"label_tensor_name\"]] = label_ids\n return [feat_dict]", "def tokenize_nmt(text, num_examples=None):\n source, target = [], []\n for i, line in enumerate(text.split('\\n')):\n if num_examples and i > num_examples:\n break\n parts = line.split('\\t')\n if len(parts) == 2:\n source.append(parts[0].split(' '))\n target.append(parts[1].split(' '))\n return source, target", "def process(self, example: str) -> List[torch.Tensor]:\n return self._tokenizer.batch_encode_plus([example], return_tensors=\"pt\", output_past=True, max_length=self.max_seq_len)['input_ids'][0]", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def main():\n # Load and prep training files\n raw_speech_text = hg.load_training_file('trump_train.txt')\n speech_text = hg.prep_training(raw_speech_text)\n tweet_data = load_tweets('trump_tweets.json')\n raw_tweets = \"\"\n for dct in tweet_data:\n raw_tweets += \"{} \".format(dct['text'])\n tweets = hg.prep_training(raw_tweets)\n corpus = speech_text + tweets\n corpus = strip_punctuation(corpus)\n dict_1 = hg.map_one_to_one(corpus)\n dict_2 = hg.map_two_to_one(corpus)\n text = []\n \n # Introduction\n print(\"\\nTrump Speech Generator\\n\")\n print(\"Select words to add to speech\")\n print(\"\\'x\\' to exit\")\n print(\"\\'p\\' to add punctuation\")\n print(\"Select \\'p\\' before selecting the word you want to punctuate\")\n\n # Select first word\n options = corpus\n print ()\n selection = select_word(corpus)\n text.append(selection)\n \n # Select second word\n last = text[0]\n options = word_after_one(last, dict_1)\n print_text(text)\n selection = select_word(options)\n text.append(selection)\n \n # Select subsequent word\n while True:\n last = \"{} {}\".format(text[-2].strip(punctuation),\n text[-1].strip(punctuation))\n options = word_after_two(last, dict_2)\n if options == []:\n last = last.split()[1]\n options = word_after_one(last, dict_1)\n while options == []:\n last = random.choice(corpus)\n options = word_after_one(last, dict_1)\n print_text(text)\n selection = select_word(options)\n text.append(selection)\n \n print_text(text)", "def _create_examples(self, lines, set_type):\n examples = []\n for idx, line in enumerate(lines):\n item = json.loads(line.strip())\n question_id = \"%s-%s\" % (set_type, idx)\n context = item[\"context\"]\n question = item[\"question\"]\n endings = [item[\"answerA\"],item[\"answerB\"],item[\"answerC\"] ]\n label = item[\"correct\"]\n #race_id = \"%s-%s\" % (set_type, data_raw[\"race_id\"])\n #article = data_raw[\"article\"]\n #for i in range(len(data_raw[\"answers\"])):\n #truth = str(ord(data_raw[\"answers\"][i]) - ord(\"A\"))\n #question = data_raw[\"questions\"][i]\n #options = data_raw[\"options\"][i]\n\n examples.append(\n InputExample(\n example_id=question_id,\n question=question,\n contexts=[context,context,context],\n endings=[endings[0], endings[1], endings[2]],#, options[3]\n label=label,\n )\n )\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n if i <= 3:\n print(\"i={}, line={}\".format(i, line))\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"test\":\n text_a = tokenization.convert_to_unicode(line[1])\n label = \"0\"\n else:\n text_a = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n if i == 0:\r\n continue\r\n guid = \"%s-%s\" % (set_type, line[0])\r\n text_a = line[1]\r\n text_b = line[2]\r\n if set_type != 'test_matched':\r\n label = line[-1]\r\n else:\r\n label = self.get_labels()[0]\r\n examples.append(\r\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\r\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (_, data) in enumerate(lines):\n passage = data[\"passage\"]\n ents_pos = [(pos[\"start\"], pos[\"end\"])\n for pos in passage[\"entities\"]]\n ents = [passage[\"text\"][start:end+1] for start, end in ents_pos]\n for qa in data[\"qas\"]:\n qa_id = f\"{set_type}-{data['idx']}-{qa['idx']}\"\n answers = set([ans[\"text\"] for ans in qa[\"answers\"]])\n for ent_idx, ent in enumerate(ents):\n is_answer = ent in answers\n guid = f\"{qa_id}-{ent_idx}\"\n examples.append(\n InputExample(\n guid=guid,\n text_a=passage[\"text\"],\n # Insert entity in query\n text_b=qa[\"query\"].replace(\"@placeholder\", ent),\n label=\"1\" if is_answer else \"0\",\n )\n )\n return examples", "def run_prediction(question_texts, context_text):\r\n examples = []\r\n\r\n for i, question_text in enumerate(question_texts):\r\n example = SquadExample(\r\n qas_id=str(i),\r\n question_text=question_text,\r\n context_text=context_text,\r\n answer_text=None,\r\n start_position_character=None,\r\n title=\"Predict\",\r\n is_impossible=False,\r\n answers=None,\r\n )\r\n\r\n examples.append(example)\r\n\r\n features, dataset = squad_convert_examples_to_features(\r\n examples=examples,\r\n tokenizer=tokenizer,\r\n max_seq_length=384,\r\n doc_stride=128,\r\n max_query_length=64,\r\n is_training=False,\r\n return_dataset=\"pt\",\r\n threads=1,\r\n )\r\n\r\n eval_sampler = SequentialSampler(dataset)\r\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=10)\r\n\r\n all_results = []\r\n\r\n for batch in eval_dataloader:\r\n model.eval()\r\n batch = tuple(t.to(device) for t in batch)\r\n\r\n with torch.no_grad():\r\n inputs = {\r\n \"input_ids\": batch[0],\r\n \"attention_mask\": batch[1],\r\n \"token_type_ids\": batch[2],\r\n }\r\n\r\n example_indices = batch[3]\r\n\r\n outputs = model(**inputs)\r\n\r\n for i, example_index in enumerate(example_indices):\r\n eval_feature = features[example_index.item()]\r\n unique_id = int(eval_feature.unique_id)\r\n\r\n output = [to_list(output[i]) for output in outputs]\r\n\r\n start_logits, end_logits = output\r\n result = SquadResult(unique_id, start_logits, end_logits)\r\n all_results.append(result)\r\n\r\n output_prediction_file = \"predictions.json\"\r\n output_nbest_file = \"nbest_predictions.json\"\r\n output_null_log_odds_file = \"null_predictions.json\"\r\n\r\n predictions = compute_predictions_logits(\r\n examples,\r\n features,\r\n all_results,\r\n n_best_size,\r\n max_answer_length,\r\n do_lower_case,\r\n output_prediction_file,\r\n output_nbest_file,\r\n output_null_log_odds_file,\r\n False, # verbose_logging\r\n True, # version_2_with_negative\r\n null_score_diff_threshold,\r\n tokenizer,\r\n )\r\n\r\n return predictions", "def get_transformed_io(data_path, data_dir):\n sents, labels = read_line_examples_from_file(data_path)\n\n # the input is just the raw sentence\n inputs = [s.copy() for s in sents]\n\n task = 'asqp'\n if task == 'aste':\n targets = get_para_aste_targets(sents, labels)\n elif task == 'tasd':\n targets = get_para_tasd_targets(sents, labels)\n elif task == 'asqp':\n targets = get_para_asqp_targets(sents, labels)\n else:\n raise NotImplementedError\n\n return inputs, targets", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n text_b = line[4]\n label = line[0]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[7]\n text_b = line[8]\n label = line[-1]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_examples(self, lines, type):\n examples = []\n\n for i in range(0, len(lines), self.interval):\n text_a = lines[i]\n label = lines[i + 2]\n\n examples.append(\n InputExample(guid=len(examples), text_a=text_a, pos=None, label=label))\n return examples", "def _create_examples(self, df, set_type):\n examples = []\n for i, row in df.iterrows():\n guid = \"%s-%s\" % (set_type, i)\n text_a = row[4]\n label = row[2]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type, dom=-1):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n label = line[1]\n if dom != -1:\n label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, ids) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, ids )\n text_a = lines[ids]['term']\n text_b = lines[ids]['sentence']\n label = lines[ids]['polarity']\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n # Only the test set has a header\n if set_type == \"test\" and i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"test\":\n text_a = tokenization.convert_to_unicode(line[1])\n label = \"0\"\n else:\n text_a = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[0])\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples" ]
[ "0.6111713", "0.5924071", "0.58839375", "0.5859931", "0.5800309", "0.5781618", "0.5774486", "0.57522273", "0.57205606", "0.56961256", "0.56917894", "0.5685472", "0.5684877", "0.5684489", "0.5622114", "0.56032103", "0.5577944", "0.55711997", "0.5566548", "0.555741", "0.55527824", "0.55345577", "0.5530338", "0.5518854", "0.5518089", "0.55169445", "0.55169445", "0.5511586", "0.55056286", "0.5500359", "0.54940194", "0.54912585", "0.5487304", "0.5478574", "0.54711366", "0.5471066", "0.5470501", "0.5465104", "0.54564923", "0.54564923", "0.5454603", "0.5454121", "0.5450557", "0.54496026", "0.54470515", "0.5443643", "0.5443049", "0.5443049", "0.5437053", "0.54369944", "0.5434196", "0.5423586", "0.54234666", "0.5422409", "0.54148626", "0.54126024", "0.5410403", "0.540156", "0.53946924", "0.53684497", "0.5364544", "0.535289", "0.53457034", "0.5344018", "0.5342755", "0.5338888", "0.5338693", "0.53342134", "0.5334035", "0.5331439", "0.53299665", "0.53270173", "0.5324919", "0.5322709", "0.5319762", "0.5315595", "0.53123057", "0.5288355", "0.5285381", "0.52848047", "0.52809787", "0.5280526", "0.5279603", "0.52790946", "0.52790946", "0.52790946", "0.5277419", "0.52757096", "0.5275489", "0.5273472", "0.5271849", "0.52700144", "0.52647907", "0.52646774", "0.52610165", "0.5258706", "0.5256669", "0.5252593", "0.52471566", "0.52442247", "0.52408445" ]
0.0
-1
Export the settings as a DOM node.
def _exportNode(self): node = ZCatalogXMLAdapter._exportNode(self) self._logger.info('Person Catalog settings exported.') return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _exportNode(self):\n node = self._extractProperties()\n self._logger.info('settings exported.')\n return node", "def saveToXml(self) -> org.jdom.Element:\n ...", "def to_xml(self):\r\n element = ET.Element(\"node\")\r\n\r\n element.attrib['name'] = self.name\r\n element.attrib['description'] = self.description\r\n\r\n return element", "def get_dom(self) -> str:\n\n if self.is_running:\n return self.dumps()\n\n if self.dom is not None:\n return self.dom\n\n dom = self.dumps()\n self.dom = dom\n return dom", "def getXML(self):\n\n def _getElementForMappingEntry(entry, mappingStyle):\n xmlDocTmp = Document()\n element = xmlDocTmp.createElement(mappingStyle)\n for k, v in viewitems(entry):\n # ignore empty, None or compiled regexp items into output\n if not v or (k == \"path-match-expr\"):\n continue\n element.setAttribute(k, str(v))\n return element\n\n xmlDoc = Document()\n root = xmlDoc.createElement(\"storage-mapping\") # root element name\n for mappingStyle, mappings in viewitems(self):\n for mapping in mappings:\n mapElem = _getElementForMappingEntry(mapping, mappingStyle)\n root.appendChild(mapElem)\n return root.toprettyxml()", "def quick_set_html_conversion_settings(self):\n self.logger.debug(\"HTML conversion settings\")\n self.export_format = 'html'\n self.quick_setting = 'html'\n self.front_matter_format = 'yaml'\n self.metadata_schema = []\n if self.conversion_input == 'nsx':\n self.metadata_schema = ['title', 'ctime', 'mtime', 'tag']\n self.spaces_in_tags = False\n self.split_tags = False\n self.first_row_as_header = True\n self.first_column_as_header = True\n self.chart_image = True\n self.chart_csv = True\n self.chart_data_table = True", "def to_xml(self) -> str:\n # default name and stuff setup\n element_root, xml_tree = super()._add_basics()\n element_root = element_root.find('elementProp')\n element_root = element_root.find('collectionProp')\n for element in list(element_root):\n try:\n if element.attrib['name'] == 'influxdbUrl':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.influx_db_url:\n elem.text = self.influx_db_url\n elif element.attrib['name'] == 'application':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.application:\n elem.text = self.application\n elif element.attrib['name'] == 'measurement':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.measurement:\n elem.text = self.application\n elif element.attrib['name'] == 'summaryOnly':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value':\n elem.text = str(self.summary_only).lower()\n elif element.attrib['name'] == 'samplersRegex':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.samplers_regexp:\n elem.text = self.samplers_regexp\n elif element.attrib['name'] == 'percentiles':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.percentiles:\n elem.text = self.percentiles\n elif element.attrib['name'] == 'testTitle':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.test_title:\n elem.text = self.test_title\n elif element.attrib['name'] == 'eventTags':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.event_tags:\n elem.text = self.event_tags\n except Exception:\n raise Exception(f'Unable to render xml from {type(self).__class__}')\n return tree_to_str(xml_tree, hashtree=True)", "def write(self):\n temp_string = minidom.parseString(ET.tostring(self.root)).toprettyxml(encoding=\"UTF-8\")\n with open(self.xml_file, 'w') as f:\n f.write(temp_string)\n # f = open(self.xml_file, \"w\")\n # f.write(temp_string)\n # f.close()", "def exportElements(self):\n fname = os.path.join(\".\", \"atoms-exported.IN\")\n fname = QtWidgets.QFileDialog.getSaveFileName(self, \"Atoman - Export element properties\", fname,\n \"IN files (*.IN)\",\n options=QtWidgets.QFileDialog.DontUseNativeDialog)[0]\n\n if fname:\n if \".\" not in fname or fname[-3:] != \".IN\":\n fname += \".IN\"\n\n self.logger.info(\"Exporting elements settings to '%s'\", fname)\n elements.write(fname)", "def toxml(self) :\n\t\treturn self.doc.toxml()", "def config_html(output_file=''):\n if output_file:\n f = open(output_file, 'w')\n else:\n f = sys.stdout\n create_config_html(f)", "def dump(self, path, mode='standalone'):\n if mode == 'standalone':\n with open(path+\"/export_grid_standalone\"+str(self._id)+\".html\", 'w+') as f:\n f.write(self.export_html(build=True))\n elif mode == 'all':\n widget_export = self.export_html(build=False)\n with open(path+\"/export_scripts.html\", \"w+\") as f:\n f.write(widget_export['script_tags'])\n with open(path+\"/export_html_state.html\", \"w+\") as f:\n f.write(widget_export['html_state'])\n with open(path+\"/export_state_\"+str(self._id)+\".json\", \"w+\") as f:\n f.write(json.dumps(widget_export['manager_state']))\n with open(path+\"/export_grid_\"+str(self._id)+\".html\", \"w+\") as f:\n f.write(widget_export['grid_div'])", "def get_xml(self):\n with io.StringIO() as string:\n string.write(ET.tostring(self.root, encoding=\"unicode\"))\n return string.getvalue()", "def settings_view():\n return template('settings.html')", "def save(self):\n with self.open(self.filename, 'wt') as fd:\n for node in self.elements:\n fd.write(node.text)", "def to_etree(self):\n\n # Format connections\n text = []\n for i in range(self.width):\n if i in self.connections:\n text.append(str(self.connections[i]))\n else:\n text.append(\"open\")\n\n elem = ET.Element(\"port\", attrib={\"name\": self.name})\n elem.text = \" \".join(text)\n return elem", "def xml(self):\n rough_string = ElementTree.tostring(self.dom, \"utf-8\")\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")", "def xml(self, indent):\n if self.__commentOut:\n prefix = \"<!--\"\n suffix = \" -->\"\n else:\n prefix = \"\"\n suffix = \"\"\n hubs = self.__stringMap.keys()\n if self.OMIT_HUB_NUMBER or len(hubs) != 1:\n nStr = \"\"\n else:\n nStr = \" hub=\\\"%d\\\"\" % hubs[0]\n return \"%s%s<domConfigList%s>%s</domConfigList>%s\" % \\\n (prefix, indent, nStr, self.__fileName, suffix)", "def __init__(self, settings_xml):\n # The list of setting ids.\n #\n # XXX This is redundant. We could just get the ids from\n # getting the values of any of our dicts.\n #\n self.ids = []\n self.values = { }\n self.types = { }\n self.defaults = { }\n self.labels = { }\n\n if settings_xml:\n dom = parseString(settings_xml)\n s = dom.firstChild\n\n setting = first_child(s, \"setting\")\n while setting:\n setting_id = setting.getAttribute(\"id\")\n\n # I know the 'sep' setting has no id. I am not sure what it is\n # used for so I am just going to skip it.\n #\n if setting_id != \"\":\n self.ids.append(setting_id)\n self.labels[setting_id] = setting.getAttribute(\"label\")\n self.types[setting_id] = setting.getAttribute(\"type\")\n\n # For bool's actually set the default value to True or\n # False. otherwise it is all strings to us.\n #\n default = setting.getAttribute(\"default\")\n if self.types[setting_id] == \"bool\":\n self.defaults[setting_id] = (default.lower() == 'true')\n else:\n self.defaults[setting_id] = default\n\n # Settings start out with their default value.\n #\n self.values[setting_id] = self.defaults[setting_id]\n setting = next_sibling(setting, \"setting\")\n\n dom.unlink()\n dom = None\n\n # There is always an 'override' setting - \"override\", which is\n # set based on the Language Override setting in the scraper.\n #\n if 'override' not in self.ids:\n self.ids.append(\"override\")\n self.values[\"override\"] = False\n self.types[\"override\"] = \"bool\"\n self.defaults[\"override\"] = False\n self.labels[\"override\"] = \"Language Override\"\n\n # The default language for now is english!\n #\n if 'language' not in self.ids:\n self.ids.append(\"language\")\n self.values[\"language\"] = \"en\"\n self.types[\"language\"] = \"string\"\n self.defaults[\"language\"] = \"en\"\n self.labels[\"language\"] = \"Language\"\n\n return", "def __repr__(self) -> str:\n view = {\n \"server\": self.server,\n \"access-token\": 'yes' if self.token is not None else 'no',\n \"insecure\": self.insecure,\n \"output\": self.output,\n \"verbose\": self.verbose,\n }\n\n return \"<Configuration({})\".format(view)", "def create_dd_settings(xml_document, parent_element):\n dd_properties_element = xml_document.createElement(\"dd_properties\")\n parent_element.appendChild(dd_properties_element)\n\n option_element = xml_document.createElement(\"Option\")\n option_element.setAttribute('type', 'Map')\n dd_properties_element.appendChild(option_element)\n\n option_child1_element = xml_document.createElement(\"Option\")\n option_child1_element.setAttribute('type', 'QString')\n option_child1_element.setAttribute('name', 'name')\n option_child1_element.setAttribute('value', '')\n option_element.appendChild(option_child1_element)\n\n option_child2_element = xml_document.createElement(\"Option\")\n option_child2_element.setAttribute('name', 'properties')\n option_element.appendChild(option_child2_element)\n\n option_child3_element = xml_document.createElement(\"Option\")\n option_child3_element.setAttribute('type', 'QString')\n option_child3_element.setAttribute('name', 'type')\n option_child3_element.setAttribute('value', 'collection')\n option_element.appendChild(option_child3_element)", "def export(self, package):\n self.style = package.style\n self.copyFiles(package)\n self.html = self.renderHeader(package.name)\n self.html += u\"<body>\\n\"\n self.html += u\"<div id=\\\"content\\\">\\n\"\n self.html += u\"<div id=\\\"header\\\">\\n\"\n self.html += escape(package.title)\n self.html += u\"</div>\\n\"\n self.html += u\"<div id=\\\"main\\\">\\n\"\n self.renderNode(package.root)\n self.html += u\"</div>\\n\"\n self.html += u\"</div>\\n\"\n self.html += u\"</body></html>\\n\"\n self.save(self.outputDir/\"index.html\")", "def setup_render(\n self, options: Dict[str, Any], env: MutableMapping[str, Any]\n ) -> None:\n self.md_env = env\n self.config: Dict[str, Any] = options\n self.document: nodes.document = self.config.get(\"document\", make_document())\n self.current_node: nodes.Element = self.config.get(\n \"current_node\", self.document\n )\n self.reporter: Reporter = self.document.reporter\n # note there are actually two possible language modules:\n # one from docutils.languages, and one from docutils.parsers.rst.languages\n self.language_module_rst: ModuleType = get_language_rst(\n self.document.settings.language_code\n )\n self._level_to_elem: Dict[int, nodes.Element] = {0: self.document}", "def get_html(self):\r\n\r\n # these 3 will be used in class methods\r\n self.html_id = self.location.html_id()\r\n self.html_class = self.location.category\r\n\r\n self.configuration_json = self.build_configuration_json()\r\n params = {\r\n 'gst_html': self.substitute_controls(self.render),\r\n 'element_id': self.html_id,\r\n 'element_class': self.html_class,\r\n 'configuration_json': self.configuration_json\r\n }\r\n content = self.system.render_template(\r\n 'graphical_slider_tool.html', params\r\n )\r\n return content", "def to_dom_element(self, doc):\n string = \"This should never be printed. - EGCryptoSystem.py\"\n return self.to_stub(string, string).to_dom_element(doc)", "def build_configuration_json(self):\r\n # <root> added for interface compatibility with xmltodict.parse\r\n # class added for javascript's part purposes\r\n root = '<root class=\"{}\">{}</root>'.format(\r\n self.html_class,\r\n self.configuration)\r\n return json.dumps(xmltodict.parse(root))", "def save_settings(self):\r\n self.QtSettings.beginGroup(\"MainWindow\")\r\n self.QtSettings.setValue(\"geometry\",self.saveGeometry())\r\n self.QtSettings.setValue(\"state\",self.saveState())\r\n self.QtSettings.endGroup()\r\n \r\n #save element content\r\n self.QtSettings.beginGroup(\"Settings\")\r\n pyguitools.gui_save(self.ui,self.QtSettings)\r\n self.QtSettings.endGroup()", "def build_configuration_json(self):\n # <root> added for interface compatibility with xmltodict.parse\n # class added for javascript's part purposes\n return json.dumps(xmltodict.parse('<root class=\"' + self.html_class +\n '\">' + self.configuration + '</root>'))", "def toXMLNode(self):\n return _libsbml.SBase_toXMLNode(self)", "def serialize_settings(node_settings, current_user):\n user_settings = node_settings.user_settings\n user_is_owner = user_settings is not None and (\n user_settings.owner._primary_key == current_user._primary_key\n )\n current_user_settings = current_user.get_addon('menbib')\n rv = {\n 'nodeHasAuth': node_settings.has_auth,\n 'userIsOwner': user_is_owner,\n 'userHasAuth': current_user_settings is not None and current_user_settings.has_auth,\n 'urls': serialize_urls(node_settings)\n }\n if node_settings.has_auth:\n # Add owner's profile URL\n rv['urls']['owner'] = web_url_for('profile_view_id',\n uid=user_settings.owner._primary_key)\n rv['ownerName'] = user_settings.owner.fullname\n path = node_settings.folder\n if path is None:\n rv['folder'] = {'name': None, 'path': None}\n else:\n rv['folder'] = {\n 'name': 'Menbib' + path,\n 'path': path\n }\n return rv", "def write_config():\n\n e = Element(\"Configuration\")\n r = SubElement(e, \"RepositoryList\")\n r = SubElement(r, \"Repository\", name = \"default\")\n SubElement(r, \"Module\").text = args.driver\n SubElement(r, \"TokenLabel\").text = args.token_label\n SubElement(r, \"PIN\").text = args.pin\n ElementTree(e).write(args.write_config)\n args.write_config.flush()", "def xml(self):\n raise NotImplementedError('must be implemented by all subclasses')", "def addOutputsNode():\n return render_template(\"addOutputsNode.html\")", "def write(self, fd):\n indent = \" \"\n in2 = indent + indent\n print >>fd, \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\"\n if self.__topComment is not None:\n print >>fd, \"<!--%s-->\" % self.__topComment\n print >>fd, \"<runConfig>\"\n for d in self.__domCfgList:\n print >>fd, d.xml(indent)\n for n in self.__domCfgNames:\n print >>fd, n.xml(indent)\n if self.__replayBaseDir is not None:\n print >>fd, \"%s<hubFiles baseDir=\\\"%s\\\">\" % \\\n (indent, self.__replayBaseDir)\n for r in self.__replayHubList:\n print >>fd, r.xml(in2)\n print >>fd, \"%s</hubFiles>\" % indent\n print >>fd, \"%s<triggerConfig>%s</triggerConfig>\" % \\\n (indent, self.__trigCfg)\n for c in self.__comps:\n if not c.isHub():\n print >>fd, \"%s<runComponent name=\\\"%s\\\"/>\" % \\\n (indent, c.name())\n\n if self.__strayStream is not None:\n (name, prescale) = self.__strayStream\n in3 = in2 + indent\n\n print >>fd, \"%s<stream name=\\\"%s\\\">\" % (in2, name)\n print >>fd, \"%s<prescale>%d</prescale>\" % (in3, prescale)\n print >>fd, \"%s</stream>\" % in2\n\n if self.__senderOption is not None:\n (hub, fwdIsolatedHits) = self.__senderOption\n fwdName = \"forwardIsolatedHitsToTrigger\"\n if fwdIsolatedHits:\n fwdVal = \"true\"\n else:\n fwdVal = \"false\"\n\n in3 = in2 + indent\n in4 = in3 + indent\n\n print >>fd, \"%s<stringHub hubId=\\\"%d\\\">\" % (in2, hub)\n print >>fd, \"%s<sender>\" % in3\n print >>fd, \"%s<%s>%s</%s>\" % (in4, fwdName, fwdVal, fwdName)\n print >>fd, \"%s</sender>\" % in3\n print >>fd, \"%s</stringHub>\" % in2\n\n print >>fd, \"</runConfig>\"", "def write_settings(f, settings, name, embedded_flag):\n f.write(\"// Define settings structure\\n\")\n f.write(\"OSQPSettings %s = {\" % name)\n f.write(\"(c_float)%.20f, \" % settings['rho'])\n f.write(\"(c_float)%.20f, \" % settings['sigma'])\n f.write(\"%d, \" % settings['scaling'])\n\n # EMBEDDED == 2\n if embedded_flag != 1:\n f.write(\"%d, \" % settings['scaling_iter'])\n\n f.write(\"%d, \" % settings['max_iter'])\n f.write(\"(c_float)%.20f, \" % settings['eps_abs'])\n f.write(\"(c_float)%.20f, \" % settings['eps_rel'])\n f.write(\"(c_float)%.20f, \" % settings['eps_prim_inf'])\n f.write(\"(c_float)%.20f, \" % settings['eps_dual_inf'])\n f.write(\"(c_float)%.20f, \" % settings['alpha'])\n\n f.write(\"%d, \" % settings['scaled_termination'])\n f.write(\"%d, \" % settings['early_terminate'])\n f.write(\"%d, \" %\n settings['early_terminate_interval'])\n f.write(\"%d\" % settings['warm_start'])\n\n f.write(\"};\\n\\n\")", "def to_dom_element(self, doc): # pragma: no cover\n cs_scheme_element = doc.createElement(\"CryptoSystemScheme\")\n \n nbits_element = doc.createElement(\"nbits\")\n nbits_element.appendChild(doc.createTextNode(str(self.nbits)))\n cs_scheme_element.appendChild(nbits_element)\n \n prime_element = doc.createElement(\"prime\")\n prime_str = hex(self.prime)[2:] # Remove leading '0x'\n if(prime_str[-1] == 'L'): \n prime_str = prime_str[0:-1] # Remove trailing 'L'\n prime_element.appendChild(doc.createTextNode(prime_str))\n cs_scheme_element.appendChild(prime_element)\n \n generator_element = doc.createElement(\"generator\")\n generator_str = hex(self.generator)[2:] # Remove leading '0x'\n if(generator_str[-1] == 'L'): \n generator_str = generator_str[0:-1] # Remove trailing 'L'\n generator_element.appendChild(doc.createTextNode(generator_str))\n cs_scheme_element.appendChild(generator_element)\n \n return cs_scheme_element", "def export_to_file(self, filename):\n if len(filename.split(\".\")) == 1:\n filename += \".xml\"\n xmlstring = self._dommodel.toprettyxml(\" \", \"\\n\")\n with open(filename, \"w\") as f:\n f.write(xmlstring)", "def showSettings():\n cq = dz()\n cq.abag()", "def render_settings_view():\n return render_template('settings_screen.html', realsense_device_status=realsense_enabled, detector_enabled=enabled_detector)", "def get_xml(self):\n return etree.tostring(self.get_etree())", "def node():\n return render_template('nodes.html')", "def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })", "def exportHtmlSingle(self, filePath=''):\n if not filePath:\n filePath = self.getFileName(_('TreeLine - Export HTML'), 'html')\n if not filePath:\n return False\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n if ExportDialog.exportWhat == ExportDialog.entireTree:\n self.selectedNodes = [self.rootNode]\n outputGroup = treeoutput.OutputGroup(self.selectedNodes,\n ExportDialog.includeRoot,\n ExportDialog.exportWhat !=\n ExportDialog.selectNode,\n ExportDialog.openOnly, True)\n outputGroup.addBlanksBetween()\n outputGroup.addIndents()\n outputGroup.addSiblingPrefixes()\n outGroups = outputGroup.splitColumns(ExportDialog.numColumns)\n htmlTitle = os.path.splitext(os.path.basename(filePath))[0]\n indent = globalref.genOptions.getValue('IndentOffset')\n lines = ['<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 '\n 'Transitional//EN\">', '<html>', '<head>',\n '<meta http-equiv=\"Content-Type\" content=\"text/html; '\n 'charset=utf-8\">', '<title>{0}</title>'.format(htmlTitle),\n '<style type=\"text/css\"><!--',\n 'div {{margin-left: {0}em}}'.format(indent),\n 'td {padding: 10px}', 'tr {vertical-align: top}',\n '--></style>', '</head>', '<body>']\n if ExportDialog.addHeader:\n headerText = (globalref.mainControl.activeControl.printData.\n formatHeaderFooter(True))\n if headerText:\n lines.append(headerText)\n lines.extend(['<table>', '<tr><td>'])\n lines.extend(outGroups[0].getLines())\n for group in outGroups[1:]:\n lines.append('</td><td>')\n lines.extend(group.getLines())\n lines.extend(['</td></tr>', '</table>'])\n if ExportDialog.addHeader:\n footerText = (globalref.mainControl.activeControl.printData.\n formatHeaderFooter(False))\n if footerText:\n lines.append(footerText)\n lines.extend(['</body>', '</html>'])\n with open(filePath, 'w', encoding='utf-8') as f:\n f.writelines([(line + '\\n') for line in lines])\n return True", "def to_xml(self, scene_dir: str) -> Tuple[Et.Element, bool]:\n raise NotImplementedError", "def _save_settings(self):\n # data to be save :\n # -----------------\n # futurePivot node\n\n # create attributes\n self._create_data_attribute()\n\n # connect futurePivot node\n pm.connectAttr(\n \"%s%s\" % (self._futurePivot.name(), \".message\"),\n self._object.attr(\"pivotData.futurePivot\"),\n f=True,\n )", "def view_to_config(self):\n raise NotImplementedError", "def node_config():\r\n paragraph = document.add_paragraph('')\r\n document.add_heading('Server node details', 1)\r\n node_metrics = ['host name','central','purpose', 'engine', 'proxy', 'printing', 'scheduler']\r\n nodes = get_qlik_sense.get_nodeconfig()\r\n num_of_nodes = len(nodes)\r\n num_of_node_metrics = len(node_metrics)\r\n table = document.add_table(rows=num_of_node_metrics+1, cols=num_of_nodes+1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Metric'\r\n for item in range(0, num_of_nodes):\r\n row.cells[item+1].text = nodes[item][7]\r\n for item in range(num_of_node_metrics):\r\n row = table.rows[item+1]\r\n row.cells[0].text = str(node_metrics[item])\r\n\r\n for node in range(num_of_nodes):\r\n row.cells[node+1].text = str(nodes[node][item])\r\n document.add_page_break()", "def generateXML(self):\n return self.formatEval(\n self.TEMPLATES[self.attrs['name']]['XML'],\n self.attrs\n )", "def dom(self):\n raise NotImplementedError(\"base class called\")", "def build_settings(self, settings):\n \n settings.add_json_panel(\"Network\", self.config, data=network_json)\n settings.add_json_panel(\"Camera\", self.config, data=camera_json)\n settings.add_json_panel(\"CV\", self.config, data=cv_json)\n settings.add_json_panel(\"Admin\", self.config, data=admin_json)", "def load_settings(self):\n\n self.std = settings.settings", "def jssettings(self):\n self.update()\n return \"var %s = %s\" % (self.js_var_settings_name,\n json.dumps(self.settings))", "def writeFile(self, filename):\n s = ET.tostring(self._root)\n\n #Remove all formatting\n s = s.replace('\\n','')\n s = s.replace('\\t','')\n s = s.replace('\\r','')\n\n f = open(filename, 'w')\n f.write(minidom.parseString(s).toprettyxml())\n f.close()", "def serialize(self):\n return self.xmlnode.serialize(encoding=\"utf-8\")", "def setting():\n return render_template('setting.html', year=datetime.now().year)", "def build_settings(self, settings):\n settings.add_json_panel('Makesmith Settings', self.config, data=self.json)", "def saveSessionToXML(self, filename):\r\n xmlStr = self.createXMLStr()\r\n \r\n #Write to the file\r\n #xml.dom.ext.PrettyPrint(doc, open(filename, 'w'))\r\n xmlFile = open(filename, 'w')\r\n xmlFile.write(xmlStr)\r\n xmlFile.close()", "def export_html(self, build=False):\n if build:\n html = export_html_code(self)\n return (html['script_tags'] +\n (html['html_state']).format(manager_state=json.dumps(html['manager_state'])) +\n html['grid_div'])\n return export_html_code(self)", "def print_settings(self, title=None):\n if title:\n print(title)\n print('Contents of imagenode.yaml:')\n pprint.pprint(self.config)\n print()", "def exportHtmlPages(self, filePath=''):\n if not filePath:\n filePath = QtGui.QFileDialog.getExistingDirectory(QtGui.\n QApplication.activeWindow(),\n _('TreeLine - Export HTML'),\n self.defaultFilePath)\n if not filePath:\n return False\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n oldDir = os.getcwd()\n os.chdir(filePath)\n indent = globalref.genOptions.getValue('IndentOffset')\n cssLines = ['#sidebar {',\n ' width: 16em;',\n ' float: left;',\n ' border-right: 1px solid black;',\n '}',\n '#sidebar div {{margin-left: {0}em;}}'.format(indent),\n '#content {',\n ' margin-left: 16em;',\n ' border-left: 1px solid black;',\n ' padding-left: 6px;',\n '}']\n with open('default.css', 'w', encoding='utf-8') as f:\n f.writelines([(line + '\\n') for line in cssLines])\n if ExportDialog.exportWhat == ExportDialog.entireTree:\n self.selectedNodes = [self.rootNode]\n if len(self.selectedNodes) > 1:\n modelRef = self.selectedNodes[0].modelRef\n dummyFormat = modelRef.formats.addDummyRootType()\n root = treenode.TreeNode(None, dummyFormat.name, modelRef)\n name = os.path.basename(self.defaultFilePath)\n if not name:\n name = treemodel.defaultRootName\n root.setTitle(name)\n for node in self.selectedNodes:\n root.childList.append(copy.copy(node))\n root.childList[-1].parent = root\n else:\n root = self.selectedNodes[0]\n root.exportHtmlPage()\n root.modelRef.formats.removeDummyRootType()\n os.chdir(oldDir)\n return True", "def writeXml(self):\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField,\n treedoc.escDict)\n return text", "def get(self) :\n self.generate('export.html', {\n 'xml' : export(),\n 'title' : \"Admin Export\"})", "def save_output_node(out):\n out_wc = out.clone()\n return out_wc", "def writeDoc(self, out, displayOptions=None):\n if displayOptions is None:\n displayOptions = {}\n tw = textwrap.TextWrapper(initial_indent='# ',\n subsequent_indent='# ', width=70)\n out.write('# %s (Default: %s)\\n' % (self.name,\n ', '.join(self.valueType.toStrings(self.getDefault(), displayOptions))))\n if self.doc:\n out.write('\\n'.join(tw.wrap(self.doc)))\n out.write('\\n')", "def get_html(self):\n\n # these 3 will be used in class methods\n self.html_id = self.location.html_id()\n self.html_class = self.location.category\n self.configuration_json = self.build_configuration_json()\n params = {\n 'gst_html': self.substitute_controls(self.render),\n 'element_id': self.html_id,\n 'element_class': self.html_class,\n 'configuration_json': self.configuration_json\n }\n content = self.system.render_template(\n 'graphical_slider_tool.html', params)\n return content", "def write_to(self, fp):\n fp.write('<')\n fp.write(self.tag)\n for k, v in self.attrs.iteritems():\n fp.write(' ')\n fp.write(k),\n fp.write('=\"')\n fp.write(xml_escape(v))\n fp.write('\"')\n if len(self.contents) == 0:\n fp.write(' />')\n else:\n fp.write('>')\n for item in self.contents:\n if isinstance(item, basestring):\n item = xml_escape(item)\n fp.write(item)\n elif isinstance(item, Markup) or isinstance(item, Element):\n item.write_to(fp)\n else:\n raise TypeError('Item %r must be either a string, '\n '``Element``, or ``Markup``' % item)\n fp.write('</')\n fp.write(self.tag)\n fp.write('>')", "def get_xml(self):\n xml = svgwrite.etree.etree.Element(self.elementname)\n if self.debug:\n self.validator.check_all_svg_attribute_values(self.elementname, self.attribs)\n for attribute, value in self.attribs.items():\n # filter 'None' values\n if value is not None:\n value = self.value_to_string(value)\n if value: # just add not empty attributes\n xml.set(attribute, value)\n \n for element in self.elements:\n xml.append(element)\n return xml", "def create_html(self):\n # Add html content to the self.doc\n self.doc.asis('<!DOCTYPE html>')\n with self.tag('html'):\n self.design_header()\n self.design_body()\n # Write html content from self.doc\n with codecs.open(self.filestream.name, 'w', 'utf-8') as f:\n html_content = indent(\n self.doc.getvalue(),\n indentation=' ',\n newline='\\r\\n'\n )\n f.write(html_content)", "def SaveSettings(self, settingsFile):\n with codecs.open(settingsFile, encoding='utf-8-sig', mode='w+') as f:\n json.dump(self.__dict__, f, encoding='utf-8-sig')\n with codecs.open(settingsFile.replace(\"json\", \"js\"), encoding='utf-8-sig', mode='w+') as f:\n f.write(\"var settings = {0};\".format(json.dumps(self.__dict__, encoding='utf-8-sig')))\n return", "def XMLWriter(\n fd,\n encoding=\"utf-8\",\n pretty=True,\n compactempty=True,\n indentation=_DEFAULT_INDENTATION\n):\n return _document(fd, encoding, pretty, compactempty, indentation)", "def writeSettings(self):\n settings = QtCore.QSettings()\n output_directory = self.ui.outputDirLineEdit.text()\n settings.setValue(\"output_directory\", output_directory)", "def prettify(self):\n re_parsed = minidom.parseString(tostring(self.dom))\n return re_parsed.toprettyxml()", "def export_toml(self): # type: () -> str\n return self._config_to_toml(self._config, self._toml)", "def toxml(self) -> ET.Element:\n # Dummy element that ElementTree extend() will strip\n root = ET.Element('root')\n\n connection = ET.SubElement(root, 'Connection')\n\n origin = ET.SubElement(connection, 'Origin')\n origin.set('ToolID', self.origin_tool.tool_id)\n origin.set('Connection', self.origin_output)\n\n destination = ET.SubElement(connection, 'Destination')\n destination.set('ToolID', self.destination_tool.tool_id)\n destination.set('Connection', self.destination_input)\n\n return root", "def htmlNodeDumpFile(self, out, cur):\n if cur is None: cur__o = None\n else: cur__o = cur._o\n libxml2mod.htmlNodeDumpFile(out, self._o, cur__o)", "def exportZendoc(self,ofile):\n value = self.getZendoc()\n if not value: return\n ofile.write(\"<property id='zendoc' type='string'>\\n\")\n if not isinstance(value, basestring):\n value = unicode(value)\n elif isinstance(value, str):\n value = value.decode('latin-1')\n ofile.write(saxutils.escape(value).encode('utf-8')+\"\\n\")\n ofile.write(\"</property>\\n\")", "def config_to_view(self):\n raise NotImplementedError", "def exportHtmlNavSingle(self, filePath=''):\n if not filePath:\n filePath = self.getFileName(_('TreeLine - Export HTML'), 'html')\n if not filePath:\n return False\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n if ExportDialog.exportWhat == ExportDialog.entireTree:\n self.selectedNodes = [self.rootNode]\n outputGroup = treeoutput.OutputGroup(self.selectedNodes,\n ExportDialog.includeRoot, True,\n ExportDialog.openOnly, True,\n ExportDialog.navPaneLevels)\n outputGroup.addBlanksBetween()\n outputGroup.addIndents()\n outputGroup.addSiblingPrefixes()\n htmlTitle = os.path.splitext(os.path.basename(filePath))[0]\n indent = globalref.genOptions.getValue('IndentOffset')\n lines = ['<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 '\n 'Transitional//EN\">', '<html>', '<head>',\n '<meta http-equiv=\"Content-Type\" content=\"text/html; '\n 'charset=utf-8\">', '<title>{0}</title>'.format(htmlTitle),\n '<style type=\"text/css\"><!--',\n ' #sidebar {',\n ' width: 16em;',\n ' float: left;',\n ' border-right: 1px solid black;',\n ' }',\n ' #sidebar div {{margin-left: {0}em;}}'.format(indent),\n ' #content {',\n ' margin-left: 16em;',\n ' border-left: 1px solid black;',\n ' padding-left: 6px;',\n ' }',\n ' #content div {{margin-left: {0}em;}}'.format(indent),\n '--></style>',\n '</head>', '<body>', '<div id=\"sidebar\">']\n prevLevel = 0\n for parent in self.selectedNodes:\n for node, level in parent.levelDescendantGen(ExportDialog.\n includeRoot,\n ExportDialog.\n navPaneLevels,\n ExportDialog.\n openOnly):\n if level > prevLevel:\n lines.append('<div>')\n while level < prevLevel:\n lines.append('</div>')\n prevLevel -= 1\n lines.append('&bull; <a href=\"#{0}\">{1}</a><br />'.\n format(node.uniqueId, node.title()))\n prevLevel = level\n while level > 0:\n lines.append('</div>')\n level -= 1\n lines.extend(['</div>', '<div id=\"content\">'])\n if ExportDialog.addHeader:\n headerText = (globalref.mainControl.activeControl.printData.\n formatHeaderFooter(True))\n if headerText:\n lines.append(headerText)\n lines.extend(outputGroup.getLines())\n if ExportDialog.addHeader:\n footerText = (globalref.mainControl.activeControl.printData.\n formatHeaderFooter(False))\n if footerText:\n lines.append(footerText)\n lines.extend(['</div>', '</body>', '</html>'])\n with open(filePath, 'w', encoding='utf-8') as f:\n f.writelines([(line + '\\n') for line in lines])\n return True", "def get_xml(self):\n profile = self.profile\n version = self.version\n #self.attribs['xmlns'] = \"http://www.w3.org/2000/svg\"\n self.attribs['xmlns:xlink'] = \"http://www.w3.org/1999/xlink\"\n self.attribs['xmlns:ev'] = \"http://www.w3.org/2001/xml-events\"\n\n self.attribs['baseProfile'] = profile\n self.attribs['version'] = version\n return super(Drawing, self).get_xml()", "def serialize(self, root):", "def settings(self):\r\n return settings.Settings(self)", "def export_html(self, model_view='gapd'):\n '''\n <?xml version=\"1.0\" ?>\n <ROWSET>\n <ROW>\n <SURVEYID>921</SURVEYID>\n <SURVEYNAME>Goomalling, WA, 1996</SURVEYNAME>\n <STATE>WA</STATE>\n <OPERATOR>Stockdale Prospecting Ltd.</OPERATOR>\n <CONTRACTOR>Kevron Geophysics Pty Ltd</CONTRACTOR>\n <PROCESSOR>Kevron Geophysics Pty Ltd</PROCESSOR>\n <SURVEY_TYPE>Detailed</SURVEY_TYPE>\n <DATATYPES>MAG,RAL,ELE</DATATYPES>\n <VESSEL>Aero Commander</VESSEL>\n <VESSEL_TYPE>Plane</VESSEL_TYPE>\n <RELEASEDATE/>\n <ONSHORE_OFFSHORE>Onshore</ONSHORE_OFFSHORE>\n <STARTDATE>05-DEC-96</STARTDATE>\n <ENDDATE>22-DEC-96</ENDDATE>\n <WLONG>116.366662</WLONG>\n <ELONG>117.749996</ELONG>\n <SLAT>-31.483336</SLAT>\n <NLAT>-30.566668</NLAT>\n <LINE_KM>35665</LINE_KM>\n <TOTAL_KM/>\n <LINE_SPACING>250</LINE_SPACING>\n <LINE_DIRECTION>180</LINE_DIRECTION>\n <TIE_SPACING/>\n <SQUARE_KM/>\n <CRYSTAL_VOLUME>33.6</CRYSTAL_VOLUME>\n <UP_CRYSTAL_VOLUME>4.2</UP_CRYSTAL_VOLUME>\n <DIGITAL_DATA>MAG,RAL,ELE</DIGITAL_DATA>\n <GEODETIC_DATUM>WGS84</GEODETIC_DATUM>\n <ASL/>\n <AGL>60</AGL>\n <MAG_INSTRUMENT>Scintrex CS2</MAG_INSTRUMENT>\n <RAD_INSTRUMENT>Exploranium GR820</RAD_INSTRUMENT>\n </ROW>\n </ROWSET>\n '''\n if model_view == 'prov':\n prov_turtle = self.export_rdf('prov', 'text/turtle')\n g = Graph().parse(data=prov_turtle, format='turtle')\n\n view_html = render_template(\n 'survey_prov.html',\n visjs=self._make_vsjs(g),\n prov_turtle=prov_turtle,\n )\n else: # model_view == 'gapd':\n view_html = render_template(\n 'survey_gapd.html',\n survey_no=self.survey_no,\n survey_name=self.survey_name,\n state=self.state,\n operator=self.operator,\n contractor=self.contractor,\n processor=self.processor,\n survey_type=self.survey_type,\n data_types=self.data_types,\n vessel=self.vessel,\n vessel_type=self.vessel_type,\n release_date=self.release_date,\n onshore_offshore=self.onshore_offshore,\n start_date=self.start_date,\n end_date=self.end_date,\n line_km=self.line_km,\n total_km=self.total_km,\n line_spacing=self.line_spacing,\n line_direction=self.line_direction,\n tie_spacing=self.tie_spacing,\n area=self.square_km,\n crystal_volume=self.crystal_volume,\n up_crystal_volume=self.up_crystal_volume,\n digital_data=self.digital_data,\n geodetic_datum=self.geodetic_datum,\n asl=self.asl,\n agl=self.agl,\n mag_instrument=self.mag_instrument,\n rad_instrument=self.rad_instrument,\n wkt_polygon=self.wkt_polygon\n )\n\n return render_template(\n 'page_survey.html',\n view_html=view_html,\n survey_no=self.survey_no,\n end_date=self.end_date,\n survey_type=self.survey_type,\n date_now=datetime.now().strftime('%Y-%m-%d'),\n centroid_lat=self.centroid_lat,\n centroid_lon=self.centroid_lon,\n n_lat=self.n_lat,\n s_lat=self.s_lat,\n w_long=self.w_long,\n e_long=self.e_long,\n gm_key=config.GOOGLE_MAPS_API_KEY\n )", "def __str__(self):\n result = xml.dom.minidom.parseString(\n xml.etree.ElementTree.tostring(\n self.ToXMLElement(), encoding='utf-8')).toprettyxml(indent=' ')\n\n return result", "def project_settings(request):\n webnode_settings = kakocase_settings(request)\n webnode_settings['settings']['IS_WEBNODE'] = True\n return webnode_settings", "def build(self):\n root = ET.Element(\"html\", xmlns=self.xmlns)\n self.build_head(root)\n self.build_body(root)\n return root", "def save_settings(self, plugin_settings, instance_settings):\n instance_settings.set_value(\"output_directory\", self.output_directory)\n instance_settings.set_value(\"labels\", self.labels)\n if self._sub:\n instance_settings.set_value(\"topic_name\", self._sub.name)", "def pretty_print_content(self):\n\n return lxml.etree.tostring(self.get_content(),\n pretty_print = True,\n encoding = self.encoding,\n xml_declaration = True)", "def settings() -> Settings:\n return Settings()", "def settings(self) -> Any:\n self.ensure_initialized()\n return SettingsItem(self._data, self, FragmentPath())", "def saveSettings():\t\n\tglobal settings\n\tfout = open(config_file,'w')\n\tfout.write(json.dumps(settings, sort_keys=True, indent=4))\n\tfout.close()", "def exportMasterLayerSettings(self):\n\t\tmaster = rlayer.RenderLayer( 'defaultRenderLayer' )\n\t\tmaster.makeCurrent()\n\t\tmasterData = {}\n\t\tnodes = ['defaultArnoldRenderOptions','defaultResolution','defaultRenderGlobals']\n\t\tmnNodes =[ mn.Node( n ) for n in nodes ]\n\t\tfor n in mnNodes:\n\t\t\tfor a in n.listAttr( se = True, v = True, w = True ):\n\t\t\t\ttry:\n\t\t\t\t\tmasterData[a] = a.v\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\tpickle.dump( masterData, open( self.masterPath.path, \"wb\" ) )", "def __repr__(self):\n string = ''\n for key, val in self.setting().items():\n string += '{}({})\\n'.format(key, val)\n return string", "def _ds(elem):\n _indent(elem)\n return ElementTree.tostring(elem)", "def AddElement(self):\n if not self.exportStructureViewer:\n raise TypeError(\"Please connect the shot processor's export \"\n \"structure viewer to this header instance.\")\n \n # Don't discard double slashes at end to allow for unnamed nodes.\n segments = [seg for seg in self.nodePathEdit.text().split('/')]\n hostname = SplitHostname(segments[0])[0] # drop \"IFFFS\"\n \n # Require a clip name\n # NOTE: The actual path requirements for uploading to a Wiretap IFFFS\n # server are more strict, but path validation happens elsewhere.\n clipName = self.clipNameEdit.text().strip()\n if clipName:\n segments.append(clipName)\n elementPath = Path.Join(hostname, *segments[1:])\n \n # Populate export structure\n root = self.exportTemplate.rootElement()\n if clipName:\n root.createChild(elementPath, True)\n leaf = root[elementPath]\n \n # Add preset to leaf element\n # NOTE: Be sure to update the string representation of the Stonify\n # task if the module or class name changes.\n preset = FnStonify.StonifyPreset(name='FnStonify.StonifyTask',\n properties={})\n if leaf is not None:\n leaf.setPreset(preset)\n else:\n print(\"WARNING: Unable to set Stonify content on element \" +\n elementPath)\n elif elementPath:\n root.createChild(elementPath, False)\n\n self.exportStructureViewer.refresh()", "def toXML(self):\n return _libsbml.Layout_toXML(self)", "def createxmlmall():\r\n\r\n root = ET.Element(\"state\")\r\n model = ET.SubElement(root, \"model\")\r\n model.text = r\"\"\r\n\r\n dataid = ET.SubElement(root, \"dataids\")\r\n application = ET.SubElement(root, \"application\")\r\n\r\n application.text = \"SIBS Configurator\"\r\n safecookie = ET.SubElement(root, \"safecookie\")\r\n steps = ET.SubElement(root, \"steps\")\r\n prev = ET.SubElement(steps, \"prev\")\r\n\r\n lastproxy = ET.SubElement(root, \"last-proxy\").text = \"tcserver0\"\r\n\r\n tree = ET.ElementTree(root) # saves tree in variable \"tree\"\r\n return tree, safecookie, steps, prev", "def write(self):\r\n for prop in self.prpnames:\r\n elem = SubElement(self._root, prop)\r\n data = self.__getattribute__(prop)\r\n if self.prpnames[prop]['type'] == \"text\":\r\n elem.text = data\r\n elif self.prpnames[prop]['type'] == 'list':\r\n for x in data:\r\n SubElement(elem, 'regel').text = x\r\n elif self.prpnames[prop]['type'] == 'attr':\r\n elem.set(self.prpnames[prop]['naam'], data)\r\n tree = ElementTree(self._root)\r\n tree.write(self._fn)\r\n if not self.exists:\r\n self.exists = True", "def htmlNodeDumpFileFormat(self, out, cur, encoding, format):\n if cur is None: cur__o = None\n else: cur__o = cur._o\n ret = libxml2mod.htmlNodeDumpFileFormat(out, self._o, cur__o, encoding, format)\n return ret", "def toXMLString(self):\n return _libsbml.XMLNode_toXMLString(self)", "def write_view_settings(self, key, settings=None):\n logger.debug(\"Writing view settings for: {}\".format(key))" ]
[ "0.7224327", "0.56396407", "0.5610303", "0.55762726", "0.5313067", "0.5309712", "0.5301959", "0.5281795", "0.52664566", "0.5236637", "0.52181643", "0.5208334", "0.51855683", "0.517911", "0.51764274", "0.51666445", "0.5144142", "0.5129619", "0.5117044", "0.51037055", "0.5100118", "0.5091269", "0.5077694", "0.5074275", "0.50737214", "0.5066217", "0.5065415", "0.5061878", "0.50473577", "0.50441843", "0.5041363", "0.50257874", "0.50112456", "0.50078577", "0.5005025", "0.49848586", "0.49807614", "0.4978994", "0.49580222", "0.4951547", "0.4947395", "0.49447188", "0.49374518", "0.4933343", "0.49292153", "0.4924695", "0.4919242", "0.49148557", "0.49104267", "0.4905973", "0.48888797", "0.48855475", "0.4885398", "0.48804998", "0.487947", "0.48719457", "0.48709732", "0.48666906", "0.4863784", "0.48635322", "0.4850537", "0.48403504", "0.4833306", "0.4831462", "0.48283774", "0.48257557", "0.48239073", "0.48214415", "0.4818914", "0.48106736", "0.47947487", "0.47908393", "0.4781297", "0.47804484", "0.47796544", "0.47791895", "0.47755042", "0.4771167", "0.47553036", "0.474816", "0.47373566", "0.47362173", "0.47340444", "0.4718602", "0.47069892", "0.47052392", "0.4702123", "0.4696956", "0.4691864", "0.46890047", "0.4684087", "0.46839866", "0.4680165", "0.4678766", "0.46738762", "0.46735504", "0.46687403", "0.4667095", "0.4666293", "0.46644348" ]
0.63378537
1
Import the settings from the DOM node.
def _importNode(self, node): ZCatalogXMLAdapter._importNode(self, node) self._logger.info('Person Catalog settings imported.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _importNode(self, node):\n if self.environ.shouldPurge():\n self._purgeProperties()\n self._initProperties(node)\n self._logger.info('settings imported.')", "def load_settings(self):\n\n self.std = settings.settings", "def __init__(self, settings_xml):\n # The list of setting ids.\n #\n # XXX This is redundant. We could just get the ids from\n # getting the values of any of our dicts.\n #\n self.ids = []\n self.values = { }\n self.types = { }\n self.defaults = { }\n self.labels = { }\n\n if settings_xml:\n dom = parseString(settings_xml)\n s = dom.firstChild\n\n setting = first_child(s, \"setting\")\n while setting:\n setting_id = setting.getAttribute(\"id\")\n\n # I know the 'sep' setting has no id. I am not sure what it is\n # used for so I am just going to skip it.\n #\n if setting_id != \"\":\n self.ids.append(setting_id)\n self.labels[setting_id] = setting.getAttribute(\"label\")\n self.types[setting_id] = setting.getAttribute(\"type\")\n\n # For bool's actually set the default value to True or\n # False. otherwise it is all strings to us.\n #\n default = setting.getAttribute(\"default\")\n if self.types[setting_id] == \"bool\":\n self.defaults[setting_id] = (default.lower() == 'true')\n else:\n self.defaults[setting_id] = default\n\n # Settings start out with their default value.\n #\n self.values[setting_id] = self.defaults[setting_id]\n setting = next_sibling(setting, \"setting\")\n\n dom.unlink()\n dom = None\n\n # There is always an 'override' setting - \"override\", which is\n # set based on the Language Override setting in the scraper.\n #\n if 'override' not in self.ids:\n self.ids.append(\"override\")\n self.values[\"override\"] = False\n self.types[\"override\"] = \"bool\"\n self.defaults[\"override\"] = False\n self.labels[\"override\"] = \"Language Override\"\n\n # The default language for now is english!\n #\n if 'language' not in self.ids:\n self.ids.append(\"language\")\n self.values[\"language\"] = \"en\"\n self.types[\"language\"] = \"string\"\n self.defaults[\"language\"] = \"en\"\n self.labels[\"language\"] = \"Language\"\n\n return", "def loadSettings(self, e):\n if e.tag == self.type:\n c = e\n else:\n c = e.find(\".//\" + self.type)\n if c is not None:\n g = c.get(\"enabled\")\n self.enabled = (g == 'true')\n g = c.get(\"mode\")\n self.setMode((GRANT if g == 'grant' else LIMIT))\n g = c.get(\"priority\")\n try:\n self.setPriority(int(g))\n except:\n self.setPriority(0)\n self.name = c.get(\"name\")\n else:\n print \"Error: no settings found for constrain %s\" % self.name\n return c", "def load_from_settings(self):\n for param, value in self.settings['swan'].items():\n # Some settings do not have a GUI element, continue if encountered\n if param not in self.input_elements.keys():\n continue\n\n # Check if parameter is not empty before filling in\n if self.validate_parameter(value):\n self.input_elements[param].set_value(value)\n\n # Validate\n self.validate(check_empty=False)", "def load_from_settings(self):\n for group_name, group in self.settings['pharos'].items():\n # Some settings do not have a GUI element, continue if encountered\n if group_name not in self.input_elements.keys():\n continue\n\n for param, value in group.items():\n # Some settings do not have a GUI element, continue if encountered\n if param not in self.input_elements[group_name].keys():\n continue\n # Check if parameter is not empty before filling in\n if self.validate_parameter(value):\n self.input_elements[group_name][param].set_value(value)\n\n # Validate\n self.validate(check_empty=False)", "def load_from_settings(self):\n for param, value in self.settings['hares'].items():\n # Some settings do not have a GUI element, continue if encountered\n if param not in self.input_elements.keys():\n continue\n\n # Check if parameter is not empty before filling in\n if self.validate_parameter(value):\n self.input_elements[param].set_value(value)\n\n # Validate\n self.validate(check_empty=False)", "def from_settings(settings):", "def loadSettings(home_dir,pd_dir):\n\n settingsXML = os.path.join(pd_dir,\"settings.xml\")\n\n #print(\"Loading settings from {0}\".format(settingsXML))\n\n global installationTree\n global installationSettings\n global domainPath\n global userEmail\n global userToken\n\n if os.path.isfile(settingsXML):\n installationTree = etree.parse(settingsXML)\n installationSettings = installationTree.getroot()\n\n for child in installationSettings:\n if child.tag == \"domain_path\":\n domainPath = child.text\n\n if not os.path.isdir(domainPath):\n fetchPlanningDomains(domainPath)\n\n if child.tag == \"email\":\n userEmail = child.text\n\n if child.tag == \"token\":\n userToken = child.text\n\n return\n\n if installationSettings is None:\n installationSettings = etree.Element(\"{http://settings.planning.domains}settings\")\n installationTree = etree.ElementTree(installationSettings)\n\n domainPath = input(\"Enter path for installing files (or hit enter to use {0}): \".format(os.path.join(home_dir,\"planning.domains\")))\n\n domainPath = domainPath.lstrip()\n domainpath = domainPath.rstrip()\n\n if domainPath == \"\":\n domainPath = os.path.join(home_dir,\"planning.domains\")\n\n if os.path.isfile(domainPath):\n print(\"Fatal error: there is already a file called {0}\".format(domainPath))\n exit(1)\n\n if not os.path.isdir(domainPath):\n fetchPlanningDomains(domainPath)\n\n etree.SubElement(installationSettings,\"domain_path\").text = domainPath\n\n userEmail = input(\"Enter email for API updates: \")\n userToken = input(\"Enter token for API updates (leave blank if none provided): \")\n\n etree.SubElement(installationSettings,\"email\").text = userEmail\n etree.SubElement(installationSettings,\"token\").text = userToken\n\n saveSettings()", "def read_node_settings(node_xml):\n workflow_template_information = node_xml.find(xmlns + \"config[@key='workflow_template_information']\")\n if workflow_template_information is not None:\n workflow_template_information = elementtree_to_dict(workflow_template_information)\n return { 'workflow_template_information': workflow_template_information}", "def step_3(self, browser):\n xml_file = '../../src/imio.project.pst/src/imio/project/pst/model/demo_import_pst_from_ecomptes_201805V1.xsd'\n mytree = ET.parse(xml_file)\n myroot = mytree.getroot()\n # Update ElementId\n self.assertEqual(myroot[1][1][0][10][1][10][1][13][1].get('ElementId'), 'b07ec94c0e804690a9ef971db84e12b1')\n myroot[1][1][0][10][1][10][1][13][1].set('ElementId', self.sa17.UID())\n mytree.write('ecomptes_pst.xml')\n # select xml file\n file_field = browser.find(u'Document XML exporté depuis eComptes')\n with open('ecomptes_pst.xml', 'r') as f:\n file_field.set('value', (f.read(), 'ecomptes_pst.xml'))\n # import xml file\n browser.find_button_by_label('Importer').click()\n # write browser contents\n # with open('browser_contents', 'w') as f:\n # f.write(browser.contents)", "def loadFromDom(self, root):\n if hasattr(root, \"documentElement\"):\n self.xml = root\n else:\n # Encase the given tree fragment in a Document\n self.xml = createRootNode()\n self.xml.appendChild(self.xml.importNode(root, True))\n self.preprocess()", "def load_frontend_xml(xml_path):\n frontend_setting = open(xml_path, \"r\").read()\n return frontend_setting", "def fromElement(self, element):\n from comoonics.ComProperties import Properties\n props=element.getElementsByTagName(Properties.TAGNAME)\n #log.debug(\"fromElement: %s, %u\" %(element, len(props)))\n if len(props)>=1:\n self.properties=Properties(props[0])\n for propertyname in self.properties.keys():\n self.log.debug(\"fromElement: Setting attribute %s, %s\" %(propertyname, self.properties[propertyname].getAttribute(\"value\")))\n setattr(self, propertyname, self.properties[propertyname].getAttribute(\"value\"))\n for attribute in element.attributes:\n self.__dict__[attribute.name]=attribute.value", "def importElements(self):\n msg = \"This will overwrite the current element properties file. You should create a backup first!\\n\\n\"\n msg += \"Do you wish to continue?\"\n reply = QtWidgets.QMessageBox.question(self, \"Message\", msg,\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,\n QtWidgets.QMessageBox.No)\n\n if reply == QtWidgets.QMessageBox.Yes:\n # open file dialog\n title = \"Atoman - Import element properties\"\n fname = QtWidgets.QFileDialog.getOpenFileName(self, title, \".\", \"IN files (*.IN)\")[0]\n\n if fname:\n self.logger.info(\"Importing elements settings from '%s'\", fname)\n\n # read in new file\n elements.read(fname)\n\n # overwrite current file\n elements.write(dataPath(\"atoms.IN\"))\n\n # set on Lattice objects too\n self.inputState.refreshElementProperties()\n self.refState.refreshElementProperties()", "def load_inasafe_settings():\n # Load default settings\n from safe.definitions import default_settings\n from safe.utilities.settings import set_setting\n\n for key, value in default_settings.inasafe_default_settings.iteritems():\n set_setting(key, value)\n\n # Override settings from INASAFE_SETTINGS_PATH\n if headless_settings.INASAFE_SETTINGS_PATH:\n # Load from custom headless settings\n import_setting(headless_settings.INASAFE_SETTINGS_PATH)", "def parseFromFile(self, filePath):\n\t\ttry:\n\t\t\twith open(filePath) as fhd:\n\t\t\t\tpsXMLStr = reduce(lambda x,y: x+' '+y, fhd.xreadlines())\n\t\texcept IOError as e:\n\t\t\traise ImproperlyConfigured, \"Can not find setting file '%s', please check your setting of ``ANAFORA_PROJECT_FILE_ROOT'' and ``ANAFORA_PROJECT_SETTING_FILENAME'' in your setting file\" % (filePath)\n\t\tpsDOM = parseString(psXMLStr).childNodes[0]\n\t\tif psDOM.tagName != \"setting\":\n\t\t\traise Exception(\"Project Setting XML Dom parse error: \" + psDOM.toxml())\n\n\t\tfor childNode in [tNode for tNode in psDOM.childNodes if tNode.nodeType == tNode.ELEMENT_NODE]:\n\t\t\tif childNode.tagName == \"projects\":\n\t\t\t\tfor projectNode in [tNode for tNode in childNode.childNodes if tNode.nodeType == tNode.ELEMENT_NODE]:\n\t\t\t\t\tproject = Project.parseFromXMLDOM(projectNode)\n\t\t\t\t\tif os.path.isdir(os.path.join(settings.ANAFORA_PROJECT_FILE_ROOT, project.name)):\n\t\t\t\t\t\tself.projectList[project.name] = project\n\t\t\telif childNode.tagName == \"schemas\":\n\t\t\t\tfor schemaNode in [tNode for tNode in childNode.childNodes if tNode.nodeType == tNode.ELEMENT_NODE]:\n\t\t\t\t\tschema = Schema.parseFromXMLDOM(schemaNode)\n\t\t\t\t\tself.schemaList[str(schema.name)] = schema\n\t\t\telse:\n\t\t\t\traise Exception(\"unhandle tag: \" + childNode.tagName)\n\n\t\t#link schema in project\n\t\tfor projectName in self.projectList:\n\t\t\tproject = self.projectList[projectName]\n\t\t\tfor (idx, schemaName) in enumerate(project.allowedSchemas):\n\t\t\t\tif isinstance(schemaName, Schema):\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tif schemaName not in self.schemaList:\n\t\t\t\t\t\traise Exception('schema name \"' + str(schemaName) + '\" in project \"' + projectName + '\" is not exist')\n\t\t\t\t\tproject.allowedSchemas[idx] = self.schemaList[schemaName]\n\n\t\t#link preannotationFromMode in schema\n\t\tfor schemaName in self.schemaList:\n\t\t\tschema = self.schemaList[schemaName]\n\t\t\tfor modeName in schema.modes:\n\t\t\t\tmode = schema.modes[modeName]\n\n\t\t\t\tif mode.needPreannotation:\n\t\t\t\t\t#if mode.preannotationFromMode not in schema.modes:\n\t\t\t\t\t#\traise Exception('Preannotation mode name \"' + str(mode.preannotationFromMode) + '\" from mode \"' + mode.name + '\" of schema \"' + schemaName + '\" is not exists')\n\t\t\t\t\tif mode.preannotationFromMode.strip() == \"\":\n\t\t\t\t\t\tmode.preannotationFromMode = None\n\t\t\t\t\telse:\n\t\t\t\t\t\tmode.preannotationFromMode = schema.getMode(mode.preannotationFromMode)", "def load_settings(self):\r\n #create a QSettings object to store the settings\r\n self.QtSettings=QtCore.QSettings(\"OncoRay\",\"EBT Evaluation\")\r\n #self.QtSettings=QtCore.QSettings(\"settings.ini\",QtCore.QSettings.IniFormat)\r\n\r\n #load window settings \r\n self.QtSettings.beginGroup(\"MainWindow\")\r\n self.restoreGeometry(self.QtSettings.value(\"geometry\",QtCore.QByteArray(),type=QtCore.QByteArray))\r\n self.restoreState(self.QtSettings.value(\"state\",QtCore.QByteArray(),type=QtCore.QByteArray))\r\n# self.resize(self.QtSettings.value(\"windowSize\",QtCore.QSize(1024,1280),\r\n# type=QtCore.QSize))\r\n self.QtSettings.endGroup() \r\n\r\n #load values for various elements \r\n self.QtSettings.beginGroup(\"Settings\")\r\n pyguitools.gui_restore(self.ui,self.QtSettings)\r\n self.QtSettings.endGroup()", "def read_settings(self):\n config = ConfigParser.SafeConfigParser()\n config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini')\n\n # Cache related\n cache_path = config.get('linode', 'cache_path')\n self.cache_path_cache = cache_path + \"/ansible-linode.cache\"\n self.cache_path_index = cache_path + \"/ansible-linode.index\"\n self.cache_max_age = config.getint('linode', 'cache_max_age')", "def _exportNode(self):\n node = self._extractProperties()\n self._logger.info('settings exported.')\n return node", "def read_settings(self):\n self.settings = read_settings(self.settings_path)", "def upgrade_markup_controlpanel_settings(context):\n # get the old site properties\n portal_properties = getToolByName(context, \"portal_properties\")\n site_properties = portal_properties.site_properties\n # get the new registry\n registry = getUtility(IRegistry)\n # XXX: Somehow this code is executed for old migration steps as well\n # ( < Plone 4 ) and breaks because there is no registry. Looking up the\n # registry interfaces with 'check=False' will not work, because it will\n # return a settings object and then fail when we try to access the\n # attributes.\n try:\n settings = registry.forInterface(\n IMarkupSchema,\n prefix='plone',\n )\n except KeyError:\n settings = False\n if settings:\n settings.default_type = site_properties.default_contenttype\n\n forbidden_types = site_properties.getProperty('forbidden_contenttypes')\n forbidden_types = list(forbidden_types) if forbidden_types else []\n\n portal_transforms = getToolByName(context, 'portal_transforms')\n allowable_types = portal_transforms.listAvailableTextInputs()\n\n settings.allowed_types = tuple([\n _type for _type in allowable_types\n if _type not in forbidden_types\n and _type not in 'text/x-plone-outputfilters-html' # removed, as in plone.app.vocabularies.types # noqa\n ])", "def load_settings(self):\n raise NotImplemented(\"load_settings method should be implemented.\")", "def load_init(self):\n\t\tself.init_et = ET.parse(self.init_in_fn)", "def import_settings(path_to_settings=None):\n\n file_path = 'settings.json' if path_to_settings is None else path_to_settings\n\n if not os.path.isfile(file_path):\n # settings file doesn't exist\n raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), 'settings.json')\n\n with open(file_path) as in_file:\n data = json.load(in_file)\n settings = Settings()\n\n # required attributes, fail if missing\n try:\n settings.input_file_path = os.path.join(os.path.dirname(sys.argv[0]), data['input_folder'], data['input_file'])\n settings.output_file_path = os.path.join(os.path.dirname(sys.argv[0]), data['output_folder'], data['output_file'])\n settings.default_timezone = data['default_timezone']\n settings.output_timezone = data['output_timezone']\n settings.custom_column_headers = data.get('custom_column_headers', [])\n settings.app_id = data['app_id']\n except KeyError as e:\n print(\"Key not found in {}: \".format(file_path) + str(e))\n sys.exit(1)\n\n return settings", "def restoreFromXml(self, root: org.jdom.Element) -> None:\n ...", "def install(*args, **kwds):\n ns = utils.ns_prepare(args + (kwds,), install=True)\n settings = utils.settings_from_ns(ns, update=True)\n return settings", "def initialize(self):\n my_setting = self.settings.get('my_setting')", "def prepare_node_attrs(self):", "def load_theme_values(self): \n pass", "def load_settings(self):\n # Set the default settings. In case in a later version of this script the settings change, new default variables will be added automatically\n self.settings = {\n # Connection settings to OBS Studio websockets plugin\n \"host\": \"localhost\",\n \"port\": 4444,\n \"password\": \"\",\n \"update_frequency\": 1, # seconds, how often the script loads the SC2 UI location\n }\n if os.path.isfile(self.settings_path):\n with open(self.settings_path) as f:\n self.settings.update(json.load(f))", "def test_existing_attribute(self):\n self.assertEqual(import_from_setting('TEST_SETTING'), 1)", "def _load_settings_to_jinja_env(self) :\n\t\t# Load filters if exists\n\t\tif hasattr(self.settings, 'FILTERS') :\n\t\t\tfor name, cls in utils.load_module(self.settings.FILTERS).__dict__.items() :\n\t\t\t\tself.jinja_env.filters[name] = cls\n\n\n\t\t# Load globals if exists\n\t\tif hasattr(self.settings, 'GLOBALS') :\n\t\t\tfor name, cls in utils.load_module(self.settings.GLOBALS).__dict__.items() :\n\t\t\t\tself.jinja_env.globals[name] = cls", "def merge_onto(cls, settings):\r\n for key, value in cls.SETTINGS.iteritems():\r\n setattr(settings, key, value)", "def importWebSocketClientSettings(context):\n site = context.getSite()\n utility = queryUtility(IWebSocketConnectionConfig, context=site)\n if utility is None:\n logger = context.getLogger('collective.websocketclient')\n logger.info('Nothing to import.')\n return\n if IPersistent.providedBy(utility):\n importObjects(utility, '', context)", "def settings(self, settings):\n\n self._settings = settings", "async def load(cls, document: str) -> Dict[str, Any]:\n return await cls.from_html(document)", "def __init__(self, settings):\n self._read_config(settings)", "def import_(self, node):\n yamal_name = os.path.join(self._root, self.construct_scalar(node))\n\n with open(yamal_name, 'r') as yamal_file:\n return yaml.load(yamal_file, ImportLoader)", "def __load_settings(self):\n\n self.app_settings = sublime.load_settings(self.SETTINGS_FILE)\n self.__refresh_settings(True)\n\n # The settings may change during execution so we need to listen for changes\n self.app_settings.add_on_change(self.SETTINGS_CALLBACK_KEY, self.__refresh_settings)", "def patch(cls):\n cls._original_element = xml.dom.minidom.Element\n xml.dom.minidom.Element = KmlElement", "def load_into_settings_module(self, settings_module):\n\n if self._api_token:\n settings_module.API_TOKEN = self._api_token\n\n if self._bot_emoji:\n settings_module.BOT_EMOJI = self._bot_emoji\n\n if self._bot_icon:\n settings_module.BOT_ICON = self._bot_icon\n\n if self._errors_to:\n settings_module.ERRORS_TO = self._errors_to\n\n # TODO: Perhaps figure out a better way to do this...\n settings_module.PLUGINS = set(self._slackbot_plugins)", "def load(self):\n if not path.isfile(self.SETTINGS_FILE):\n return\n data = load_json_from_disk(self.SETTINGS_FILE)\n for (key, value) in data.items():\n self.__dict__[key] = value", "def _load_settings(self):\n self._dll.LS_LoadSettings(self._serial_number)\n return None", "async def settings(self, ctx: BBContext):\n pass", "def updateSettings(self):\n self.parser.read(self.file)\n self.showTicker = self.parser.getboolean('Settings', 'showTicker')\n self.verbose = self.parser.getboolean('Settings', 'verbose')\n self.sleepTime = self.parser.getint('Settings', 'sleeptime')\n self.saveGraph = self.parser.getboolean('Settings', 'saveGraph')\n self.graphDPI = self.parser.getint('Settings', 'graphDPI')", "def settings_load(self):\n self.ui.spinBox_ATSP.setValue(self.default['ATSP'])\n\n if self.default['serialLabel'] == 'bt':\n self.ui.btRadio.setChecked(True)\n try:\n os.system(\"blueman-manager\")\n except:\n print \"Please install 'blueman' package\"\n elif self.default['serialLabel'] == 'usb':\n self.ui.usbRadio.setChecked(True)\n else:\n self.ui.devRadio.setChecked(True)\n\n if self.default['units'] == 'metric':\n self.ui.units_metric_radio.setChecked(True)\n else:\n self.ui.units_US_radio.setChecked(True)\n\n return", "def __init__(self) -> None:\n self._settings = {}\n\n # Load values from global_settings (only uppercase)\n self.filter_and_set(global_settings)\n\n settings_env_value: str = os.environ.get(SETTINGS_ENV)\n if settings_env_value:\n # Load values from custom settings\n try:\n module = importlib.import_module(settings_env_value)\n except ModuleNotFoundError:\n msg = \"Can't import custom settings. Is it under PYTHONPATH?\"\n raise ModuleError(msg)\n self.filter_and_set(module)", "def parse(cls, dom, configDir, fileName, strict=True):\n topComment = None\n rcNode = None\n for kid in dom.childNodes:\n if kid.nodeType == Node.TEXT_NODE:\n continue\n\n if kid.nodeType == Node.COMMENT_NODE:\n topComment = kid.nodeValue\n continue\n\n if kid.nodeType == Node.ELEMENT_NODE:\n if kid.nodeName == \"runConfig\":\n if rcNode is None:\n rcNode = kid\n else:\n msg = \"Found multiple <runConfig> tags in %s\" % \\\n fileName\n raise ProcessError(msg)\n\n if rcNode is None:\n raise ProcessError(\"No <runConfig> tag found in %s\" % fileName)\n\n domcfgDir = os.path.join(configDir, \"domconfigs\")\n\n runCfg = DAQConfig(fileName)\n if topComment is not None:\n runCfg.setTopComment(topComment)\n\n for kid in rcNode.childNodes:\n if kid.nodeType == Node.TEXT_NODE:\n continue\n\n if kid.nodeType == Node.COMMENT_NODE:\n continue\n\n if kid.nodeType == Node.ELEMENT_NODE:\n if kid.nodeName == \"domConfigList\":\n if kid.attributes is None or len(kid.attributes) == 0:\n hub = None\n else:\n if len(kid.attributes) != 1:\n raise ProcessError((\"<%s> node has extra\" +\n \" attributes\") % kid.nodeName)\n attrName = \"hub\"\n if not kid.attributes.has_key(attrName):\n raise ProcessError((\"<%s> node should have\" +\n \" \\\"%s\\\" attribute, not\" +\n \" \\\"%s\\\"\") %\n (kid.nodeName, attrName,\n kid.attributes.keys()[0]))\n\n hub = int(kid.attributes[attrName].value)\n\n dcName = cls.getChildText(kid).strip()\n if hub is None or cls.PARSE_DOM_CONFIG:\n domCfg = DomConfigParser.load(dcName, domcfgDir,\n strict)\n runCfg.addDomConfig(domCfg, hub)\n else:\n runCfg.addDomConfigName(dcName, hub)\n elif kid.nodeName == \"triggerConfig\":\n trigCfg = cls.getChildText(kid)\n cls.__parseTriggerConfig(configDir, trigCfg)\n runCfg.setTriggerConfig(trigCfg)\n elif kid.nodeName == \"hubFiles\":\n if kid.attributes is None or len(kid.attributes) == 0:\n raise ProcessError(\"<%s> node has no attributes\" %\n kid.nodeName)\n if len(kid.attributes) != 1:\n raise ProcessError(\"<%s> node has extra attributes\" %\n kid.nodeName)\n attrName = \"baseDir\"\n if not kid.attributes.has_key(attrName):\n raise ProcessError((\"<%s> node should have \\\"%s\\\"\" +\n \" attribute, not \\\"%s\\\"\") %\n (kid.nodeName, attrName,\n kid.attributes.keys()[0]))\n\n runCfg.setReplayBaseDir(kid.attributes[attrName].value)\n\n cls.__parseHubFiles(kid, runCfg)\n elif kid.nodeName == \"stringHub\":\n cls.__parseSenderOption(kid, runCfg)\n elif kid.nodeName == \"runComponent\":\n if kid.attributes is None or len(kid.attributes) == 0:\n raise ProcessError(\"<%s> node has no attributes\" %\n kid.nodeName)\n if len(kid.attributes) != 1:\n raise ProcessError(\"<%s> node has extra attributes\" %\n kid.nodeName)\n if not kid.attributes.has_key(\"name\"):\n raise ProcessError((\"<%s> node should have \\\"name\\\"\" +\n \" attribute, not \\\"%s\\\"\") %\n (kid.nodeName,\n kid.attributes.keys()[0]))\n\n runCfg.addComponent(kid.attributes[\"name\"].value, strict)\n elif kid.nodeName == \"watchdog\":\n valStr = kid.attributes[\"period\"].value\n try:\n period = int(valStr)\n except:\n raise ProcessError((\"<%s> node has invalid\" +\n \" \\\"period\\\" value \\\"%s\\\"\") %\n (kid.nodeName, valStr))\n runCfg.setWatchdogPeriod(period)\n elif kid.nodeName == \"monitor\":\n valStr = kid.attributes[\"period\"].value\n try:\n period = int(valStr)\n except:\n raise ProcessError((\"<%s> node has invalid\" +\n \" \\\"period\\\" value \\\"%s\\\"\") %\n (kid.nodeName, valStr))\n runCfg.setMonitorPeriod(period)\n elif kid.nodeName == \"defaultLogLevel\":\n pass\n elif kid.nodeName == \"stream\":\n if cls.STRAY_STREAM_HACK:\n cls.__parseStrayStream(kid, runCfg)\n else:\n print >>sys.stderr, \"Ignoring stray <stream> in %s\" % \\\n fileName\n else:\n raise ProcessError(\"Unknown runConfig node <%s> in %s\" %\n (kid.nodeName, fileName))\n continue\n\n raise ProcessError(\"Found unknown runConfig node <%s>\" %\n kid.nodeName)\n\n if strict:\n runCfg.validate()\n\n return runCfg", "def __init__(self, element):\n self._element = element\n self._element.SetTemplateValue('importManager', self)\n self._class_name_to_qualified_name = {}\n self._google_imports = set()\n self._other_imports = set()\n self._java_imports = set()", "def setUp(self):\n self.scraped = SimpleContent(Selector.from_text(SIMPLE_HTML))", "def project_settings(request):\n webnode_settings = kakocase_settings(request)\n webnode_settings['settings']['IS_WEBNODE'] = True\n return webnode_settings", "def load(self):\n for field in self.fields:\n value = self.siteconfig.get(field)\n self.fields[field].initial = value\n\n if field in self.disabled_fields:\n self.fields[field].widget.attrs['disabled'] = 'disabled'", "def load_settings(self, outfile='settings.p'):\n settings = pickle.load(open(path,'rb'))\n self.__dict__.update(settings)", "def _import():\n global webbrowser, contextlib, yaml, psutil, snakemake, logger, setup_logger, xdg, datetime, _logging\n import yaml\n import psutil\n import webbrowser\n import contextlib\n import datetime\n\n import snakemake\n from snakemake.logging import logger, setup_logger, _logging\n import xdg", "def load_form_settings(self, group, item):\n settings = QSettings(self._company, self._section)\n settings.beginGroup(group)\n self.restoreState(settings.value(item, QByteArray()))\n self.restoreGeometry(settings.value(item, QByteArray()))\n settings.endGroup()", "def setup(self):\r\n self.text_input_values = {}\r\n if self.tag == 'radiotextgroup':\r\n self.html_input_type = \"radio\"\r\n elif self.tag == 'checkboxtextgroup':\r\n self.html_input_type = \"checkbox\"\r\n else:\r\n raise Exception(\"ChoiceGroup: unexpected tag {0}\".format(self.tag))\r\n\r\n if self.value == '':\r\n # Make `value` an empty dictionary, if it currently has an empty\r\n # value. This is necessary because the template expects a\r\n # dictionary.\r\n self.value = {}\r\n self.choices = self.extract_choices(self.xml)", "def from_xml_node(cls, xml_node):\n from_ = get_xml_text_value(xml_node, xml_tags.Elements.FROM)\n to = get_xml_text_value(xml_node, xml_tags.Elements.TO)\n return cls(xml_tags.Elements.SRC, from_, to)", "def __load(self, node):\n\n self.tiles = node['data']\n self.name = node['name']\n self.opacity = node['opacity']\n self.visible = node['visible']", "def importMasterSettings(self):\n\t\tpickleData = pickle.load( open( self.masterPath.path, \"rb\" ) )\n\t\tmaster = rlayer.RenderLayer( 'defaultRenderLayer' )\n\t\tmaster.makeCurrent()\n\t\tfor a in pickleData.keys():\n\t\t\ttry:\n\t\t\t\ta.v = pickleData[a]\n\t\t\texcept:\n\t\t\t\tcontinue", "def parseDomNode(self,node):\n if node.nodeType != dom.Node.ELEMENT_NODE and node.nodeType != dom.Node.DOCUMENT_NODE:\n return\n name=node.tagName\n handler=avnav_handlerList.findHandlerByConfigName(name)\n if handler is not None:\n AVNLog.info(\"parsing entry for handler %s\",name)\n self.parseHandler(node,handler)\n else:\n nextElement=node.firstChild\n while nextElement is not None:\n self.parseDomNode(nextElement)\n nextElement=nextElement.nextSibling", "def parseDomNode(self,node):\n if node.nodeType != dom.Node.ELEMENT_NODE and node.nodeType != dom.Node.DOCUMENT_NODE:\n return\n name=node.tagName\n handler=avnav_handlerList.findHandlerByConfigName(name)\n if handler is not None:\n AVNLog.info(\"parsing entry for handler %s\",name)\n self.parseHandler(node,handler)\n else:\n nextElement=node.firstChild\n while nextElement is not None:\n self.parseDomNode(nextElement)\n nextElement=nextElement.nextSibling", "def from_html(self, content):\r\n pass", "def _parse_preset(self, xmldata):\r\n\r\n raise NotImplementedError", "def startup(self):\n self.settings = sublime.load_settings(self.settings_base)\n self.sublime_settings = sublime.load_settings(self.sublime_base)", "def import_registry_settings(site):\n PROFILE_ID = 'profile-interlegis.portalmodelo.policy:default'\n setup = api.portal.get_tool('portal_setup')\n setup.runImportStepFromProfile(PROFILE_ID, 'plone.app.registry')", "def FromXML(cls, doc, element, default=\"absolute\"):\n return cls(element.get(\"type\", default), NumberDef(element.text))", "def from_xml_node(cls, xml_node):\n raise NotImplementedError(\"from_xml_node must be implemented by derived classes.\")", "def importXml ( r ):\n rawText = r.read ()\n rawText = rawText.strip ()\n pattern = re.compile (r'[^\\S ]+')\n text = re.sub ( pattern, '', rawText )\n xml = ET.fromstring ( text )\n assert str ( type ( xml ) ) == \"<type 'instance'>\"\n return xml", "def loadSettings(self, filename='short_240.settings'):\n global master_run_no\n self.settingsFilename = filename\n # print 'self.settingsFilename = ', self.settingsFilename\n if os.path.exists(filename):\n stream = open(filename, 'r')\n else:\n stream = open(master_lattice_location+filename, 'r')\n self.settings = yaml.load(stream, Loader=yaml.UnsafeLoader)\n self.globalSettings = self.settings['global']\n master_run_no = self.globalSettings['run_no'] if 'run_no' in self.globalSettings else 1\n self.fileSettings = self.settings['files']\n elements = self.settings['elements']\n self.groups = self.settings['groups'] if 'groups' in self.settings and self.settings['groups'] is not None else {}\n stream.close()\n\n # for name, elem in list(self.groups.items()):\n # group = globals()[elem['type']](name, self.elementObjects, **elem)\n # self.groupObjects[name] = group\n\n for name, elem in list(elements.items()):\n self.read_Element(name, elem)\n\n # for name, lattice in list(self.fileSettings.items()):\n # self.read_Lattice(name, lattice)", "def __setup(self, SETTINGS_FILE):\n config = ConfigParser()\n try:\n config.read(SETTINGS_FILE)\n self.settings = Settings(config)\n self.data = Data()\n except IOError:\n raise FileMissing(SETTINGS_FILE)\n except Exception as e:\n raise e", "def load_settings(env=\"prod\"):\n global config\n config = configparser.SafeConfigParser()\n config.read(CONFIG_FILES.get(env))", "def __enter__(self):\n self.root = ET.parse(self.manifest).getroot()\n self._parse_content()\n return self", "def parser(self):\n\t\tdom = ET.parse(self.input_filename)\n\t\tself.doc = dom.getroot()", "def _init_node_attributes(self):\n assert False", "def create_dd_settings(xml_document, parent_element):\n dd_properties_element = xml_document.createElement(\"dd_properties\")\n parent_element.appendChild(dd_properties_element)\n\n option_element = xml_document.createElement(\"Option\")\n option_element.setAttribute('type', 'Map')\n dd_properties_element.appendChild(option_element)\n\n option_child1_element = xml_document.createElement(\"Option\")\n option_child1_element.setAttribute('type', 'QString')\n option_child1_element.setAttribute('name', 'name')\n option_child1_element.setAttribute('value', '')\n option_element.appendChild(option_child1_element)\n\n option_child2_element = xml_document.createElement(\"Option\")\n option_child2_element.setAttribute('name', 'properties')\n option_element.appendChild(option_child2_element)\n\n option_child3_element = xml_document.createElement(\"Option\")\n option_child3_element.setAttribute('type', 'QString')\n option_child3_element.setAttribute('name', 'type')\n option_child3_element.setAttribute('value', 'collection')\n option_element.appendChild(option_child3_element)", "def load_cElementTree(finder, module):\n finder.IncludeModule(\"elementtree.ElementTree\")", "def load_data(self):\n with open(self.FILE, 'r') as html_file:\n document = html_file.read()\n self.HTML = document", "def __init__( settings={} ):", "def import_from_string(val, setting_name):\n try:\n return import_string(val)\n except ImportError as e:\n raise e", "def setup_parser(self, parser):", "def perform_import(val, setting_name):\n if val is None:\n return None\n elif isinstance(val, str):\n return import_from_string(val, setting_name)\n elif isinstance(val, (list, tuple)):\n return [import_from_string(item, setting_name) for item in val]\n return val", "def load(self):\n\n from django.conf import settings as st\n\n loaded: namedtuple = default\n\n if hasattr(st, 'AUTOMATED_LOGGING'):\n loaded = ConfigSchema().load(st.AUTOMATED_LOGGING)\n\n # be sure `loaded` has globals as we're working with those,\n # if that is not the case early return.\n if not hasattr(loaded, 'globals'):\n return loaded\n\n # use the binary **or** operator to apply globals to Set() attributes\n values = {}\n for name in loaded._fields:\n field = getattr(loaded, name)\n values[name] = field\n\n if not isinstance(field, tuple) or name == 'globals':\n continue\n\n values[name] = field | loaded.globals\n\n self.loaded = loaded._replace(**values)\n return self", "def from_node(self, root):\n \n self.set_at_from_string(root.getAttribute(\"at\"))\n self.action = root.getAttribute(\"action\")\n self.actor = root.getAttribute(\"actor\")\n self.regarding = root.getAttribute(\"regarding\")\n self.source = root.getAttribute(\"source\")\n self.tags = root.getAttribute(\"tags\").split(\",\")\n self.to = root.getAttribute(\"to\")\n self.url = root.getAttribute(\"url\")\n\n for node in root.childNodes:\n if node.tagName == 'payload':\n for subnode in node.childNodes:\n if subnode.tagName == 'body':\n self.body = subnode.childNodes[0].nodeValue\n elif subnode.tagName == 'raw':\n self.raw = subnode.childNodes[0].nodeValue", "def Import(content: bytes):\n config_set = _ParseConfigSet(content)\n\n # Remove any older data\n info = config_set.info\n if info and info.key.get():\n Delete(info.url)\n\n config_encoder.Load(config_set)\n info.put()\n return mtt_messages.Convert(info, mtt_messages.ConfigSetInfo)", "def read(self):\r\n tree = ElementTree(file=self._fn)\r\n self._root = tree.getroot()\r\n for prop in self.prpnames:\r\n self.__setattr__(prop, DocItem.get_attr(self, prop))", "def replace(*args, **kwds):\n ns = utils.ns_prepare(args + (kwds,), install=False)\n settings = utils.settings_from_ns(ns, update=True)\n return settings", "def und_add_setting(udb_file, configuration_path_file):\n subprocess.call(f\"und import {configuration_path_file} {udb_file}\")", "def manual_import_prop(self, path):\n dtu = DtuLoader.DtuLoader(path)\n fbx_path = dtu.get_fbx_path()\n self.prop_import(fbx_path, dtu)", "def initParser():\n libxml2mod.xmlInitParser()", "def parse_element(self, element):\n if element.firstChild:\n self.url = element.firstChild.data\n else:\n # This happens with these 'cache' url's. Need to figure\n # out where the cache _comes from_\n #\n self.url = None\n\n self.spoof_url = element.getAttribute(\"spoof\")\n if element.hasAttribute(\"post\"):\n self.use_post = True\n self.function = element.getAttribute(\"function\")\n self.cache_key = element.getAttribute(\"cache\")\n return", "def merge_into_settings(self, settings):\n if not self._meta_dict:\n self._load_from_file()\n\n settings.chat_name = self._meta_dict[DumpMetadata.CHAT_NAME]\n settings.last_message_id = self._meta_dict[DumpMetadata.LAST_MESSAGE_ID]\n settings.exporter = self._meta_dict[DumpMetadata.EXPORTER]", "def __init__(self, element, parent):\n super(XMLObj, self).__setattr__('element', element)\n super(XMLObj, self).__setattr__('parent', parent)", "def update(self):\n registry = getUtility(IRegistry)\n site_settings = registry.forInterface(\n ISiteSchema, prefix=\"plone\", check=False)\n try:\n if site_settings.webstats_js:\n self.webstats_js = site_settings.webstats_js\n except AttributeError:\n pass", "def _local_settings(cls):\n # type: () -> QSettings\n filename = \"{}.ini\".format(qname(cls))\n fname = os.path.join(settings.widget_settings_dir(), filename)\n return QSettings(fname, QSettings.IniFormat)", "def load_settings(self):\n # config file from branch's asdf\n config_exists = os.path.isfile(self.config_path)\n\n if config_exists:\n\n config_file = open(self.config_path, 'r')\n self.config_json = json.load(config_file)\n config_file.close()\n\n else:\n raise Exception(\"Error BranchConfig: could not find config json\")\n\n\n try:\n self.branch_settings = self.config_json[self.branch]\n\n self.branch_keys = self.branch_settings.keys()\n\n for attr in self.branch_keys:\n setattr(self, attr, self.branch_settings[attr])\n\n except:\n raise Exception(\"Error BranchConfig: could not add config settings to BranchConfig\")", "def loadState(self, e, force=False):\n if e.tag == self.type and e.get('name') == self.name:\n c = e\n else:\n c = e.find(\".//\" + self.type + \"[@name='\" + self.name + \"']\")\n if c is None:\n print \"Error: no settings found for constrain %s\" % self.name\n return c", "def settings(self):\n if self._settings is not None:\n return self._settings\n\n settings = self.binaries['KeeAgent.settings'].content\n self._settings = objectify.fromstring(settings)\n return self._settings", "def loadSettings(self, e):\n c = constrain.loadSettings(self, e)\n g = c.get(\"status\")\n self.status = (g == 'true')\n return c", "def initialize_from_config(self):" ]
[ "0.6894675", "0.54793346", "0.5375706", "0.5349439", "0.5210967", "0.50370497", "0.4952911", "0.49231037", "0.48926273", "0.48800564", "0.48647884", "0.48480543", "0.4817451", "0.48003417", "0.47981542", "0.4741064", "0.47284755", "0.4716894", "0.46843192", "0.46814933", "0.46778256", "0.46706027", "0.46440727", "0.4634674", "0.4625217", "0.4621957", "0.45781228", "0.4509231", "0.44961867", "0.4485351", "0.4480031", "0.44643733", "0.44509986", "0.44103476", "0.44033888", "0.44006848", "0.44006535", "0.4397514", "0.43944123", "0.43914422", "0.43876126", "0.43835935", "0.43554395", "0.43430358", "0.43424395", "0.4314916", "0.43039533", "0.43020722", "0.43002424", "0.42918053", "0.42823103", "0.42788786", "0.42771068", "0.42737967", "0.42620355", "0.42548457", "0.4247327", "0.4230496", "0.42266855", "0.42262256", "0.42236567", "0.42236567", "0.4222754", "0.42200315", "0.4213155", "0.42118838", "0.42090464", "0.42079955", "0.42071232", "0.42018622", "0.4200755", "0.4199474", "0.41993335", "0.41938096", "0.419037", "0.41894284", "0.4185671", "0.41811442", "0.4180572", "0.4175985", "0.41755456", "0.41657242", "0.4155714", "0.41553324", "0.41524252", "0.41489792", "0.4144594", "0.41417894", "0.4133762", "0.41258964", "0.41214764", "0.41185024", "0.41177705", "0.41177008", "0.41171342", "0.41052985", "0.41025075", "0.41022035", "0.4095539", "0.40901476" ]
0.58705205
1
Label watersheds based on Barnes' priority flood algorithm
def watersheds(np.ndarray[dtype=float_t, ndim=2, mode="c"] z, missing_value = 0): cdef np.ndarray[dtype=int_t, ndim=2, mode="c"] output output = np.empty_like(z, dtype='i') priority_flood_c.priority_flood_watersheds_wrapper(z.shape[1], z.shape[0], <float*>z.data, <int*>output.data, missing_value) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def watershed_supervised(graph, seeds):\n \n size_data = graph.shape[0]\n \n u, v, w = sp.sparse.find(graph)\n list_edges = list(zip(u,v,w))\n list_edges.sort(key = lambda x : x[2])\n \n UF = unionfind(size_data)\n labels = np.array(seeds, dtype=np.int32, copy=True)\n \n for e in list_edges:\n ex, ey, ew = e\n px, py = UF.find(ex), UF.find(ey)\n if px != py:\n lx, ly = labels[px], labels[py]\n if (lx != 0) and (ly !=0 ):\n # Both are labelled. Do nothing\n pass\n else:\n max_label = max(lx, ly) # Pick the non-zero label!!\n labels[px], labels[py] = max_label, max_label\n UF.union(ex, ey)\n \n \n ans = np.array([labels[UF.find(x)] for x in range(size_data) ] )\n \n# assert np.all(ans>0)\n \n return ans", "def powerWatershed_multipleLabels(graph, seeds, bucketing='kmeans', eps=1e-2, k=3, beta=5., eps_weight=1e-6):\n\n ans = []\n for i in np.sort(np.unique(seeds)):\n if i > 0:\n seed_tmp = np.array((seeds == i)*1, dtype=np.int32) + 1\n seed_tmp[np.where(seeds == 0)] = 0\n ans.append(powerWatershed(graph, seed_tmp, bucketing, eps, k, beta, eps_weight))\n return np.argmax(ans, 0) + 1", "def powerWatershed(graph, seeds, bucketing='kmeans', eps=1e-2, k=3, beta=5., eps_weight=1e-6):\n\n size = graph.shape[0]\n\n graph_tmp = csr_matrix(graph, copy=True)\n graph_tmp.data = np.exp(-1*beta*graph_tmp.data/graph_tmp.data.std()) + eps_weight\n \n labels = np.array(seeds, dtype=np.float64) - 1\n\n uf = unionfind(size)\n\n edge_generator = generate_edges(graph_tmp, bucketing, eps, k)\n edges_till_now = []\n for edges in edge_generator:\n\n # Add all the edges to the graph\n for e in edges:\n uf.union(e[0], e[1])\n\n edges_till_now += edges\n\n for (_, comp) in uf.generate_comps():\n label_unique = set(np.unique(labels[comp]))\n\n # continue if all points in the component are labelled\n if -1 not in label_unique:\n continue\n\n # othwerwise solve rw\n if len(label_unique) <= 2:\n l = max(label_unique)\n labels[comp] = l\n\n if len(label_unique) > 2:\n _randomwalkV2(graph, labels, comp, beta, eps_weight)\n\n return labels", "def watershed(img):\n\ttmp = img.copy()\n\tgray = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)\n\tret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\tkernel = np.ones((3,3), np.uint8)\n\topening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)\n\tsure_bg = cv2.dilate(opening, kernel, iterations=3)\n\tdist_transform = cv2.distanceTransform(opening, cv2.cv.CV_DIST_L2, 5)\n\tret, sure_fg = cv2.threshold(dist_transform, 0.7*dist_transform.max(), 255, 0)\n\tsure_fg = np.uint8(sure_fg)\n\tunknown = cv2.subtract(sure_bg, sure_fg)\n\tret, markers = cv2.connectedComponents(sure_fg) #IS THIS REALLY NOT IMPLEMENTED IN PYTHON?\n\tmarkers = markers+1\n\tmarkers[unknown==255] = 0\n\tmarkers = cv2.watershed(tmp, markers)\n\ttmp[markers == -1] = [255,0,0]\n\treturn tmp", "def heuristic2B_label_OBD(n, P, label, critical=None):\n nodes_labeled = []\n\n flag_critical = False # if I will label more than one neighbor from now on, then the labels will be critical (not to be changed by others)\n new_label = label + 1\n \n neighbors = P.neighbors(n)\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys(): # if it has a label\n if P.node[neigh]['OBDlabel'] > new_label: # and it is higher than what I would use for labeling\n new_label = P.node[neigh]['OBDlabel']\n # we got maximum of current label or any node that neighbors have - now we label them all with that\n \n neighbors_to_label = []\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if (P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']) or (P.node[neigh]['OBDlabel'] == None): # now they can have it, but set to None (because of removal in failers)\n neighbors_to_label.append(neigh)\n else: # if set and smaller than mine, leave them alone\n pass\n else: # if not set, then not lower and not labelled\n neighbors_to_label.append(neigh)\n # now we have all the neighbors that need to be labeled\n \n if len(neighbors_to_label) > 1:\n flag_critical = True\n # and now labeling all these nodes\n \n for neigh in neighbors_to_label:\n if ('critical' in P.node[neigh].keys()) and (P.node[neigh]['critical']==True) and (P.node[neigh]['OBDlabel'] != new_label) :\n return (False, nodes_labeled) # being critical, we could avoid failure only if the label to set would be the same (it happens)\n else:\n P.node[neigh]['OBDlabel'] = new_label\n nodes_labeled.append(neigh) # this is a list that gets passed through recursions\n if flag_critical == True:\n P.node[neigh]['critical'] = True\n # labeling part done\n \n # and now recursive step - going into each neighbor to continue, in any order if necessary\n permutations = itertools.permutations(neighbors_to_label) # iterator : gets exhausted as we access elements\n for perm in permutations:\n this_run_success = True\n this_run_labeled = []\n for el in perm:\n (s, nl) = heuristic2B_label_OBD(el, P, new_label, flag_critical)\n this_run_labeled = this_run_labeled + nl\n if s == False:\n this_run_success = False\n if this_run_success == False:\n # then unlabel all that were labelled up to now\n for nn in this_run_labeled:\n P.node[nn]['OBDlabel'] = None\n P.node[nn]['critical'] = False\n else: # obviously success is True, we managed to label all others...\n nodes_labeled = nodes_labeled + this_run_labeled\n return (True, nodes_labeled)\n break\n # if no permutation is successful, we end up returning the last line\n return (False, nodes_labeled)", "def propagate_labels_majority(image,labels):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels)\n outputs = zeros(amax(rlabels)+1,'i')\n counts = zeros(amax(rlabels)+1,'i')\n for rlabel, label_, count in cors.T:\n if not rlabel or not label_:\n # ignore background correspondences\n continue\n if counts[rlabel] < count:\n outputs[rlabel] = label_\n counts[rlabel] = count\n outputs[0] = 0\n return outputs[rlabels]", "def _get_watershed_boundaries(self, class_mask, dist_thresh=0.6):\n\n kernel = np.ones((5, 5), np.float32)\n\n # Use a distance transform to find the seed points for watershed\n tmp = class_mask\n tmp[tmp>0] = 1 # here\n dist = cv2.distanceTransform(tmp, cv2.DIST_L2, 5) # here .astype(np.uint8), cv2.DIST_L2, 5)\n dist = (dist / np.max(dist)) * 255.\n\n # Since there may be multiple peaks, we use dilation to find them\n dilate = cv2.dilate(dist, kernel, iterations=3)\n peaks = np.float32(np.where(dilate == dist, 1, 0))\n peaks = peaks * class_mask * 255\n\n sure_fg = np.where(peaks > 125, 255., 0.)\n sure_fg = cv2.dilate(sure_fg, kernel, iterations=2)\n sure_fg = np.uint8(sure_fg)\n\n sure_bg = cv2.dilate(class_mask, kernel, iterations=3) * 255\n unknown = sure_bg - sure_fg\n\n # Add one to all labels so that known background is not 0, but 1\n _, markers = cv2.connectedComponents(sure_fg)\n markers = markers + 1\n\n markers[unknown == 255] = 0\n\n markers = cv2.watershed(self.image, markers)\n\n watershed_superpixels = np.zeros(class_mask.shape, dtype=np.uint8)\n watershed_superpixels[markers == -1] = 255\n\n return watershed_superpixels", "def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()", "def labelNeighbours26(data, label, x0,y0,z0, index):\n shape = label.shape;\n for xp in range(max(0,-1+x0),min(2+x0, shape[0])):\n for yp in range(max(0,-1+y0),min(2+y0, shape[1])):\n for zp in range(max(0,-1+z0),min(2+z0, shape[2])):\n if data[xp,yp,zp] and label[xp,yp,zp] == 0:\n label[xp,yp,zp] = index;\n label = labelNeighbours26(data, label, xp,yp,zp, index);\n return label;", "def heuristic2_label_OBD(n, P, label, critical=None):\n print \"trying to label \" + str(n) + \" with \" + str(label)\n nodes_labeled = []\n if ('critical' in P.node[n].keys()) and (P.node[n]['critical']==True) and (P.node[n]['OBDlabel'] != label) :\n print \"FAIL on critical and not the same label.\"\n return (False, []) # being critical, we could avoid failure only if the label to set would be the same (it happens)\n else:\n P.node[n]['OBDlabel'] = label\n nodes_labeled.append(n) # this is a list that gets passed through recursions\n if critical == True:\n P.node[n]['critical'] = True\n # labeling part done\n flag_critical = False # if I will label more than one neighbor from now on, then the labels will be critical (not to be changed by others)\n new_label = label + 1\n neighbors = P.neighbors(n)\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if P.node[neigh]['OBDlabel'] > new_label:\n new_label = P.node[neigh]['OBDlabel']\n # we got maximum of current label or any node that neighbors have - now we label them all with that\n neighbors_to_label = []\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if (P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']) or (P.node[neigh]['OBDlabel'] == None): # now they can have it, but set to None (because of removal in failers)\n neighbors_to_label.append(neigh)\n else: # if set and smaller than mine, leave them alone\n pass\n else: # if not set, then not lower and not labelled\n neighbors_to_label.append(neigh)\n # now we have all the neighbors that need to be labeled\n if len(neighbors_to_label) > 1:\n flag_critical = True\n # and now the recursive step - labeling all these nodes\n permutations = itertools.permutations(neighbors_to_label) # iterator : gets exhausted as we access elements\n for perm in permutations:\n print \"trying perm: \" + str(perm)\n this_run_success = True\n this_run_labeled = []\n for el in perm:\n (s, nl) = heuristic2_label_OBD(el, P, new_label, flag_critical)\n this_run_labeled = this_run_labeled + nl\n if s == False:\n this_run_success = False\n break\n if this_run_success == False:\n # then unlabel all that were labelled up to now\n for nn in this_run_labeled:\n print \"removing label of \" + str(nn)\n P.node[nn]['OBDlabel'] = None\n P.node[nn]['critical'] = False\n else: # obviously success is True, we managed to label all others...\n nodes_labeled = nodes_labeled + this_run_labeled\n print \"Win in labeling neighbors of \" + str(n)\n return (True, nodes_labeled)\n break\n # if no permutation is successful, we end up returning the last line\n return (False, nodes_labeled)\n print \"FAIL of all permutations from \" + str(n)", "def watershed(mask, img, plotImage = False, kernelSize = None):\n imgCopy = img.copy()\n maskCopy = np.array(mask.copy(), dtype=np.uint8)\n \n if kernelSize is None:\n kernelSize = 2\n\n # Finding sure foreground area\n #dist_transform = cv2.distanceTransform(mask, cv2.DIST_L2, 5)\n #ret, sure_fg = cv2.threshold(dist_transform,0.3*dist_transform.max(),255,0) #change the second argument to change the sensitivity \n maskClosed = skimage.morphology.closing(np.array(maskCopy, dtype=np.uint8))\n maskClosed = skimage.morphology.closing(np.array(maskClosed, dtype=np.uint8))\n kernel = np.ones((kernelSize,kernelSize), np.uint8)\n # maskCopy = img_as_bool(maskCopy)\n sure_fg = cv2.erode(maskClosed, kernel, iterations = 2) ###\n sure_fg = skimage.morphology.closing(np.array(sure_fg, dtype=np.uint8))\n # kernel = np.ones((2,2), np.uint8)\n # sure_fg = binary_closing(sure_fg, kernel)\n \n # sure background area\n #kernel = np.ones((5, 5), np.uint8)\n #sure_bg = cv2.dilate(mask, kernel, iterations = 1)\n sure_fg_bool = 1 - img_as_bool(sure_fg)\n # sure_bg = np.uint8(1 - morphology.medial_axis(sure_fg_bool)) ### \n sure_bg = np.uint8(1 - morphology.skeletonize(sure_fg_bool))\n sure_bg[0, :] = 1\n sure_bg[-1, :] = 1\n sure_bg[:, 0] = 1\n sure_bg[:, -1] = 1\n \n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg, sure_fg)\n \n if plotImage:\n plt.figure()\n plt.imshow(sure_fg)\n plt.title(\"Inner Marker\")\n plt.figure()\n plt.imshow(sure_bg)\n plt.title(\"Outer Marker\")\n plt.figure()\n plt.imshow(unknown)\n plt.title(\"Unknown\")\n \n # Marker labelling\n ret, markers = cv2.connectedComponents(sure_fg)\n\n # Add one to all labels so that sure background is not 0, but 1\n markers = markers+1\n\n # Now, mark the region of unknown with zero\n markers[unknown==1] = 0\n \n if plotImage:\n plt.figure()\n plt.imshow(markers, cmap='jet')\n plt.title(\"Markers\")\n \n # Do watershed\n markers = cv2.watershed(imgCopy, markers)\n \n imgCopy[markers == -1] = [0, 255 ,0]\n\n if plotImage:\n plt.figure()\n plt.imshow(markers,cmap='jet')\n plt.title(\"Mask\")\n plt.figure()\n plt.imshow(img)\n plt.title(\"Original Image\")\n plt.figure()\n plt.imshow(imgCopy)\n plt.title(\"Marked Image\")\n plt.show()\n\n return markers", "def cwatershed(f, g, Bc=None, LINEREG=\"LINES\"):\n from numpy import ones, zeros, nonzero, array, put, take, argmin, transpose, compress, concatenate\n if Bc is None: Bc = secross()\n return g\n print 'starting'\n withline = (LINEREG == 'LINES')\n if isbinary(g):\n g = label(g,Bc)\n print 'before 1. pad4n'\n status = pad4n(to_uint8(zeros(f.shape)),Bc, 3)\n f = pad4n( f,Bc,0) #pad input image\n print 'before 2. pad4n'\n y = pad4n( g,Bc,0) # pad marker image with 0\n if withline:\n y1 = intersec(binary(y), 0)\n costM = limits(f)[1] * ones(f.shape) # cuulative cost function image\n mi = nonzero(gradm(y,sebox(0),Bc).ravel()) # 1D index of internal contour of marker\n print 'before put costM'\n put(costM.ravel(),mi, 0)\n HQueue=transpose([mi, take(costM.ravel(), mi)]) # init hierarquical queue: index,value\n print 'before se2list0'\n Bi=se2list0(f,Bc) # get 1D displacement neighborhood pixels\n x,v = mat2set(Bc)\n while HQueue:\n print 'Hq=',HQueue\n i = argmin(HQueue[:,1]) # i is the index of minimum value\n print 'imin=',i\n pi = HQueue[i,0]\n print 'pi=',pi\n ii = ones(HQueue.shape[0])\n ii[i] = 0\n print 'ii=',ii\n HQueue = transpose(array([compress(ii,HQueue[:,0]),\n compress(ii,HQueue[:,1])])) # remove this pixel from queue\n print 'H=',HQueue\n put(status.ravel(), pi, 1) # make it a permanent label\n for qi in pi+Bi : # for each neighbor of pi\n if (status.flat[qi] != 3): # not image border\n if (status.flat[qi] != 1): # if not permanent\n cost_M = max(costM.flat[pi], f.flat[qi])\n if cost_M < costM.flat[qi]:\n print 'qi=',qi\n costM.flat[qi] = cost_M\n y.flat[qi] = y.flat[pi] # propagate the label\n aux = zeros(array(HQueue.shape) + [1,0])\n aux[:-1,:] = HQueue\n aux[-1,:]=[qi, cost_M]\n HQueue = aux # insert pixel in the queue\n print 'insert H=',HQueue\n elif (withline and\n (y.flat[qi] != y.flat[pi]) and\n (y1.flat[pi] == 0) and\n (y1.flat[qi] == 0) ):\n y1.flat[pi] = 1\n if withline:\n Y = y1\n else:\n Y = y\n return Y", "def convert_to_original_labels(array, threshold=0.5, initialization_value=999):\r\n \r\n binarized, belief = get_binarized_and_belief(array=array, threshold=threshold)\r\n \r\n #sanity check\r\n if binarized.shape != belief.shape:\r\n raise ValueError('Sanity check did not pass.')\r\n \r\n # initialize with a crazy label we will be sure is gone in the end\r\n slice_all_but_last_channel = tuple([slice(None) for _ in array.shape[:-1]] + [0])\r\n original_labels = initialization_value * np.ones_like(array[slice_all_but_last_channel])\r\n \r\n # the outer keys correspond to the binarized values\r\n # the inner keys correspond to the order of indices comingn from argsort(ascending) on suspicion, i.e. \r\n # how far the binarized sigmoid outputs were from the original sigmoid outputs \r\n # for example, (2, 1, 0) means the suspicion from least to greatest was: 'WT', 'TC', 'ET'\r\n # (recall that the order of the last three channels is expected to be: 'ET', 'TC', and 'WT')\r\n mapper = {(0, 0, 0): 0, \r\n (1, 1, 1): 4,\r\n (0, 1, 1): 1,\r\n (0, 0, 1): 2,\r\n (0, 1, 0): {(2, 0, 1): 0,\r\n (2, 1, 0): 0, \r\n (1, 0, 2): 1,\r\n (1, 2, 0): 1,\r\n (0, 2, 1): 0,\r\n (0, 1, 2): 1}, \r\n (1, 1, 0): {(2, 0, 1): 0,\r\n (2, 1, 0): 0, \r\n (1, 0, 2): 4,\r\n (1, 2, 0): 4,\r\n (0, 2, 1): 4,\r\n (0, 1, 2): 4},\r\n (1, 0, 1): {(2, 0, 1): 4,\r\n (2, 1, 0): 2, \r\n (1, 0, 2): 2,\r\n (1, 2, 0): 2,\r\n (0, 2, 1): 4,\r\n (0, 1, 2): 4}, \r\n (1, 0, 0): {(2, 0, 1): 0,\r\n (2, 1, 0): 0, \r\n (1, 0, 2): 0,\r\n (1, 2, 0): 0,\r\n (0, 2, 1): 4,\r\n (0, 1, 2): 4}}\r\n \r\n \r\n \r\n done_replacing = False\r\n \r\n for binary_key, inner in mapper.items():\r\n mask1 = check_subarray(array1=binarized, array2=np.array(binary_key))\r\n if isinstance(inner, int):\r\n original_labels, done_replacing = replace_initializations(done_replacing=done_replacing, \r\n array=original_labels, \r\n mask=mask1, \r\n replacement_value=inner, \r\n initialization_value=initialization_value)\r\n else:\r\n for inner_key, inner_value in inner.items():\r\n mask2 = np.logical_and(mask1, check_subarray(array1=belief, array2=np.array(inner_key)))\r\n original_labels, done_replacing = replace_initializations(done_replacing=done_replacing,\r\n array=original_labels, \r\n mask=mask2, \r\n replacement_value=inner_value, \r\n initialization_value=initialization_value)\r\n \r\n if not done_replacing:\r\n raise ValueError('About to return so should have been done replacing but told otherwise.')\r\n \r\n return original_labels.astype(np.uint8)", "def island_loss_of_weight(self):\n for y in self.island_map:\n for cell in y:\n cell.loss_of_weight()", "def compute_wl_subtree_kernel(graphs, h):\n for G in graphs:\n for node in G.nodes():\n G.node[node]['label'] = G.degree(node)\n\n start_time = time.time()\n\n labels = {}\n label_lookup = {}\n label_counter = 0\n\n N = len(graphs)\n\n orig_graph_map = {it: {i: defaultdict(lambda: 0) for i in range(N)} for it in range(-1, h)}\n\n # initial labeling\n ind = 0\n for G in graphs:\n labels[ind] = np.zeros(G.number_of_nodes(), dtype=np.int32)\n node2index = {}\n for node in G.nodes():\n node2index[node] = len(node2index)\n\n for node in G.nodes():\n label = G.node[node]['label']\n if not label_lookup.has_key(label):\n label_lookup[label] = len(label_lookup)\n\n labels[ind][node2index[node]] = label_lookup[label]\n orig_graph_map[-1][ind][label] = orig_graph_map[-1][ind].get(label, 0) + 1\n\n ind += 1\n\n compressed_labels = copy.deepcopy(labels)\n\n # WL iterations\n for it in range(h):\n unique_labels_per_h = set()\n label_lookup = {}\n ind = 0\n for G in graphs:\n node2index = {}\n for node in G.nodes():\n node2index[node] = len(node2index)\n\n for node in G.nodes():\n node_label = tuple([labels[ind][node2index[node]]])\n neighbors = G.neighbors(node)\n if len(neighbors) > 0:\n neighbors_label = tuple([labels[ind][node2index[neigh]] for neigh in neighbors])\n node_label = str(node_label) + \"-\" + str(sorted(neighbors_label))\n if not label_lookup.has_key(node_label):\n label_lookup[node_label] = len(label_lookup)\n\n compressed_labels[ind][node2index[node]] = label_lookup[node_label]\n orig_graph_map[it][ind][node_label] = orig_graph_map[it][ind].get(node_label, 0) + 1\n\n ind += 1\n\n print \"Number of compressed labels at iteration %s: %s\" % (it, len(label_lookup))\n labels = copy.deepcopy(compressed_labels)\n\n K = np.zeros((N, N))\n\n for it in range(-1, h):\n for i in range(N):\n for j in range(N):\n common_keys = set(orig_graph_map[it][i].keys()) & set(orig_graph_map[it][j].keys())\n K[i][j] += sum([orig_graph_map[it][i].get(k, 0) * orig_graph_map[it][j].get(k, 0) for k in common_keys])\n\n end_time = time.time()\n print \"Total time for WL subtree kernel: \", (end_time - start_time)\n\n return K", "def labels(self, threshold, segment=True, exclude_border=0):\n data = self.unmasked_data\n isfin = numpy.isfinite(data)\n data[~isfin] = numpy.amin(data[isfin])\n regions = (data > threshold)\n if segment:\n local_max = peak_local_max(data, indices=False,\n exclude_border=0,\n footprint=numpy.ones((3, 3)),\n labels=regions)\n markers = measurements.label(local_max)[0]\n labels = watershed(-data, markers, mask=regions)\n if exclude_border > 0:\n # Remove basins originating from edge peaks\n diff = numpy.zeros_like(local_max)\n for i in range(local_max.ndim):\n local_max = local_max.swapaxes(0, i)\n diff = diff.swapaxes(0, i)\n diff[:exclude_border] = local_max[:exclude_border]\n diff[-exclude_border:] = local_max[-exclude_border:]\n diff = diff.swapaxes(0, i)\n local_max = local_max.swapaxes(0, i)\n \n for l in numpy.sort(labels[diff])[::-1]:\n labels[labels == l] = 0\n labels[labels > l] -= 1\n ulabels = numpy.unique(labels)\n n = ulabels[ulabels != 0].size\n else:\n data_thres = numpy.zeros_like(data)\n data_thres[regions] = data[regions]\n labels, n = measurements.label(data_thres)\n return labels, n", "def voronoi_labelling(self, seed):\n import heapq\n if hasattr(seed, '__iter__') == False:\n seed = [seed]\n try:\n if (self.weights < 0).any():\n raise ValueError('some weights are non-positive')\n except:\n raise ValueError('undefined weights')\n dist, active = np.inf * np.ones(self.V), np.ones(self.V)\n label = - np.ones(self.V, np.int_)\n idx, neighb, weight = self.compact_neighb()\n dist[seed] = 0\n label[seed] = np.arange(len(seed))\n dg = list(zip(np.zeros_like(seed), seed))\n heapq.heapify(dg)\n for j in range(self.V):\n end = False\n while True:\n if len(dg) == 0:\n end = True\n break\n node = heapq.heappop(dg)\n if active[node[1]]:\n break\n if end:\n break\n dwin, win = node\n active[win] = False\n # the folllowing loop might be vectorized\n for i in range(idx[win], idx[win + 1]):\n l, newdist = neighb[i], dwin + weight[i]\n if newdist < dist[l]:\n heapq.heappush(dg, (newdist, l))\n dist[l] = newdist\n label[l] = label[win]\n return label", "def watershed(self, debug=False):\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\n opening = cv2.morphologyEx(self.th[:, :, 0], cv2.MORPH_OPEN, kernel, iterations=2)\n sure_bg = cv2.dilate(self.th[:, :, 0], kernel, iterations=3)\n dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)\n ret, sure_fg = cv2.threshold(dist_transform, 0.1 * dist_transform.max(), 255, 0)\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg, sure_fg)\n ret, markers = cv2.connectedComponents(sure_fg)\n markers += 1\n markers[unknown == 255] = 0\n markers = cv2.watershed(self.img, markers)\n self.add_color(markers)\n if debug:\n cv2.imshow(\"fg\", unknown)\n cv2.imshow(\"op\", opening)\n cv2.imshow(\"o3\", sure_bg)", "def instance_label(task, pred, k=15, n_iters=1, dist_thresh=5, watershed=False):\n mask = pred\n\n # noise removal\n if k > 1 and n_iters > 0:\n kernel = np.ones((k, k), np.uint8)\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,\n iterations=n_iters)\n\n if watershed:\n from clab.live import filters\n mask = filters.watershed_filter(mask, dist_thresh=dist_thresh)\n\n mask = mask.astype(np.uint8)\n n_ccs, cc_labels = cv2.connectedComponents(mask, connectivity=4)\n return cc_labels", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def from_minimal_schnyder_wood(graph):\n from sage.graphs.digraph import DiGraph\n from sage.combinat.dyck_word import DyckWord\n color_a = graph.incoming_edges('a')[0][2]\n color_b = graph.incoming_edges('b')[0][2]\n\n embedding = graph.get_embedding()\n graph0 = DiGraph([e for e in graph.edges() if e[2] == color_a],\n format='list_of_edges')\n restricted_embedding = {u: [v for v in embedding[u]\n if v in graph0.neighbors_in(u) or\n v in graph0.neighbors_out(u)]\n for u in graph0}\n\n voisins_in = {}\n for u in graph0:\n if u != 'a':\n bad_emb = restricted_embedding[u]\n sortie = graph0.neighbors_out(u)[0]\n idx = bad_emb.index(sortie)\n restricted_embedding[u] = bad_emb[idx:] + bad_emb[:idx]\n voisins_in[u] = restricted_embedding[u][1:]\n else:\n voisins_in[u] = list(restricted_embedding[u])\n voisins_in[u].reverse() # pour les avoir dans le bon sens\n\n graph0.set_embedding(restricted_embedding)\n\n def clockwise_labelling(gr, vertex):\n if len(gr) == 1:\n return [vertex]\n else:\n lbl = [vertex]\n for w in voisins_in[vertex]:\n lbl += clockwise_labelling(gr, w)\n return lbl\n\n def profil(gr, vertex):\n if len(gr) == 1:\n return []\n else:\n lbl = []\n for w in voisins_in[vertex]:\n lbl += [1] + profil(gr, w) + [0]\n return lbl\n\n dyckword_bottom = profil(graph0, 'a')\n # this is the profile of the planar graph graph0\n\n liste = clockwise_labelling(graph0, 'a')[1:]\n relabelling = {l: i for i, l in enumerate(liste)}\n for l in ['a', 'b', 'c']:\n relabelling[l] = l\n new_graph = graph.relabel(relabelling, inplace=False)\n\n dyckword_top = []\n for i in range(1, len(graph) - 3):\n indegree1 = len([u for u in new_graph.incoming_edges(i)\n if u[2] == color_b])\n dyckword_top += [1] + [0] * indegree1\n indegree1 = len([u for u in new_graph.incoming_edges('b')\n if u[2] == color_b])\n dyckword_top += [1] + [0] * indegree1\n\n dyckword_bottom = DyckWord(dyckword_bottom)\n dyckword_top = DyckWord(dyckword_top)\n TIP = TamariIntervalPosets(len(dyckword_bottom) // 2)\n return TIP.from_dyck_words(dyckword_bottom, dyckword_top)", "def propagate_labels(image,labels,conflict=0):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n oops = -(1<<30)\n for o,i in cors.T:\n if outputs[o]!=0: outputs[o] = oops\n else: outputs[o] = i\n outputs[outputs==oops] = conflict\n outputs[0] = 0\n return outputs[rlabels]", "def leaf_count(args: Dict[str, Union[bool, str]],\n model: str = \"PLANTCV\") -> JSON_TYPE:\n threshold: int = 116\n # Code from PlantCV Watershed:\n # https://plantcv.readthedocs.io/en/stable/tutorials/watershed_segmentation_tutorial/\n pcv_args = options(image=args.input)\n pcv.params.debug = pcv_args.debug\n\n # Read in image to apply watershedding to\n ##img, path, filename = pcv.readimage(filename=pcv_args.image)\n img = cv2.imread(pcv_args.image)\n # Converting from RGB to LAB and keep green-magenta channel\n a = pcv.rgb2gray_lab(rgb_img=img, channel='a')\n # Set up a binary threshold image\n img_binary = pcv.threshold.binary(gray_img=a, threshold=threshold,\n max_value=255, object_type='dark')\n # Blur image to reduce noise\n img_binary = pcv.median_blur(gray_img=img_binary, ksize=20)\n # Overlay of mask onto image\n id_objects, obj_hierarchy = pcv.find_objects(img=img, mask=img_binary)\n\n while (not id_objects):\n threshold += 4\n img_binary = pcv.threshold.binary(gray_img=a, threshold=threshold,\n max_value=255, object_type='dark')\n # Blur image to reduce noise\n img_binary = pcv.median_blur(gray_img=img_binary, ksize=20)\n # Overlay of mask onto image\n id_objects, obj_hierarchy = pcv.find_objects(img=img, mask=img_binary)\n # Reset threshold\n threshold = 116\n\n # Combine objects\n obj, mask = pcv.object_composition(img=img,\n contours=id_objects,\n hierarchy=obj_hierarchy)\n # Apply mask\n masked = pcv.apply_mask(img=img, mask=mask, mask_color=\"black\")\n\n # Using OpenCV for thresholding\n if model == \"OPENCV\":\n return opencv_watershed(masked, mask)\n\n # Using ML model for thresholding\n if model == \"ML\":\n mask_path: str = \"temp/mask.png\"\n cv2.imwrite(mask_path, masked)\n print(\"masked: \", masked)\n return ml_watershed(pcv_args.image, mask_path)\n\n # Using PlantCV watershed functionality\n return plantcv_watershed(masked, mask)", "def preprocessing(image, smooth_size, folder):\n from skimage.restoration import denoise_tv_chambolle\n \n dim = int(image.shape[0] / 50.)\n smoothed = rank.median(image, disk(smooth_size))\n #smoothed = denoise_tv_chambolle(image, weight=0.002)\n smoothed = rank.enhance_contrast(smoothed, disk(smooth_size))\n \n pl.subplot(2, 3, 1)\n pl.title(\"after median\")\n pl.imshow(smoothed)\n pl.gray()\n # If after smoothing the \"dot\" disappears\n # use the image value\n \n # TODO: wat do with thresh?\n try:\n im_max = smoothed.max()\n thresh = threshold_otsu(image)\n except:\n im_max = image.max()\n thresh = threshold_otsu(image)\n\n \n if im_max < thresh:\n labeled = np.zeros(smoothed.shape, dtype=np.int32)\n \n else:\n binary = smoothed > thresh\n \n # TODO: this array size is the fault of errors\n bin_open = binary_opening(binary, np.ones((dim, dim)), iterations=5)\n bin_close = binary_closing(bin_open, np.ones((5,5)), iterations=5)\n \n pl.subplot(2, 3, 2)\n pl.title(\"threshold\")\n pl.imshow(binary, interpolation='nearest')\n pl.subplot(2, 3, 3)\n pl.title(\"opening\")\n pl.imshow(bin_open, interpolation='nearest')\n pl.subplot(2, 3, 4)\n pl.title(\"closing\")\n pl.imshow(bin_close, interpolation='nearest')\n \n distance = ndimage.distance_transform_edt(bin_open)\n local_maxi = peak_local_max(distance,\n indices=False, labels=bin_open)\n \n markers = ndimage.label(local_maxi)[0]\n \n labeled = watershed(-distance, markers, mask=bin_open)\n pl.subplot(2, 3, 5)\n pl.title(\"label\")\n pl.imshow(labeled)\n #pl.show()\n pl.savefig(folder)\n pl.close('all')\n\n #misc.imsave(folder, labeled)\n# labels_rw = random_walker(bin_close, markers, mode='cg_mg')\n# \n# pl.imshow(labels_rw, interpolation='nearest')\n# pl.show()\n\n return labeled", "def watershed_segment_2(M,click_coords):\n \n # todo: choose these structures based on aspect ratio of M and input parameters\n sel = np.ones((4,10)) # for opening\n sel2 = np.ones((15,75)) # for local thresholding\n sel3 = np.ones((2,5)) # for erosion\n # get a few points in the center of each blob\n \n # threshold\n #bw = ((M>=ndi.percentile_filter(M,80,footprint=sel2)) & (M>=scoreatpercentile(M.flatten(),60)))\n \n score = stats.percentileofscore(M.flatten(),M[int(click_coords[0][1]),int(click_coords[0][0])])\n bw = (M>=stats.scoreatpercentile(M.flatten(),score))\n\n # open and erode\n #bools = sp.zeros((M.shape[0],M.shape[1]),int)\n #bools[int(click_coords[0]),int(click_coords[1])] = 1\n #blobs = sp.where(bools == 1,True,False)\n blobs = snm.binary_opening(bw,structure=sel)\n blobs = snm.binary_dilation(blobs,iterations=3)\n blobs = snm.binary_erosion(blobs,structure=sel3)\n \n \n # label\n labels,_ = ndi.label(blobs)\n labels[labels > 0] += 1\n #labels[0,0] = 1\n\n # rescale and cast to int16, then use watershed\n M2 = rescaled(M,0,65000).astype(np.uint16)\n newlabels = ndi.watershed_ift(M2,labels)\n \n # get rid of groups unless they have the right number of pixels\n counts = np.bincount(newlabels.flatten())\n old2new = np.arange(len(counts))\n old2new[(counts < 100) | (counts > 600)] = 0\n newlabels = old2new[newlabels]\n \n return newlabels", "def watershed_segment(M,xM=None,yM=None):\n\n if xM != None and yM != None:\n sel = np.ones((int(ceil(23.9*xM)),int(ceil(23.9*yM)))) # for opening\n sel2 = np.ones((int(ceil(127.2*xM)),int(ceil(127.2*yM)))) # for local thresholding\n sel3 = np.ones((int(ceil(11.9*xM)),int(ceil(11.9*yM)))) # for erosion\n ma,mi =(44245.21*xM*yM),(316.037*xM*yM) \n else:\n selD = np.array([int(M.shape[0]*.012),int(M.shape[1]*.012)])\n selD = np.where(selD!=0,selD,1)\n \n sel2D = np.array([int(M.shape[0]*.12),int(M.shape[1]*.12)])\n sel2D = np.where(sel2D!=0,sel2D,1)\n\n sel3D = np.array([int(M.shape[0]*.01),int(M.shape[1]*.01)])\n sel3D = np.where(sel3D!=0,sel3D,1)\n\n\n sel = np.ones(selD) # for opening\n sel2 = np.ones(sel2D) # for local thresholding\n sel3 = np.ones(sel3D) # for erosion\n ma,mi = (M.shape[0]*M.shape[1]*.0075),(M.shape[0]*M.shape[1]*.0003)\n\n # get a few points in the center of each blob\n \n # threshold\n bw = ((M>=ndi.percentile_filter(M,80,footprint=sel2)))\n #& (M>=stats.scoreatpercentile(M.flatten(),80)))\n\n # open and erode\n blobs = snm.binary_opening(bw,structure=sel)\n blobs = snm.binary_erosion(blobs,structure=sel3,iterations=2)\n \n # label\n labels,_ = ndi.label(blobs)\n labels[labels > 0] += 1\n labels[0,0] = 1\n\n # rescale and cast to int16, then use watershed\n #M2 = rescaled(M,0,65000).astype(np.uint16)\n #newlabels = ndi.watershed_ift(M2,labels)\n newlabels = labels\n \n # get rid of groups unless they have the right number of pixels\n\n counts = np.bincount(newlabels.flatten())\n old2new = np.arange(len(counts)) \n old2new[(counts < int(mi)) | (counts > int(ma))] = 0\n newlabels = old2new[newlabels]\n\n return newlabels", "def label_simplex(grid, simplex, thresh):\n coords = [grid[:,x] for x in simplex]\n dist = squareform(pdist(coords,'euclidean'))\n adjacency = dist<thresh\n adjacency = adjacency.astype(int) \n graph = csr_matrix(adjacency)\n n_components, labels = connected_components(csgraph=graph, directed=False, return_labels=True)\n\n return n_components", "def label(gt_dataset, volume_dim, voxel_dim, labeling_params):\n labeled_volumes = dict()\n labeled_cells = dict()\n #Use global density and reduce the size of gt_dataset here\n global_density = labeling_params[\"global_density\"]\n gt_dataset = {k: v for k,v in gt_dataset.items() if random_sample() < global_density}\n #Label in the order specified in the configuration\n layers = sorted(labeling_params.keys())\n #Remove global_density\n layers.remove(\"global_density\")\n for layer in layers:\n print \"Labeling {}\".format(layer)\n fluorophore = labeling_params[layer]['fluorophore']\n volume, cells = brainbow(gt_dataset, volume_dim, voxel_dim, **labeling_params[layer])\n if fluorophore in labeled_volumes:\n labeled_volumes[fluorophore] += volume\n labeled_cells[fluorophore] |= cells\n else:\n labeled_volumes[fluorophore] = volume\n labeled_cells[fluorophore] = cells\n return labeled_volumes, labeled_cells", "def label_building_polys(burnt_polys, building_polys):\n\n for b in building_polys:\n for r in burnt_polys:\n if b[0].intersects(r):\n b[1] = [b[1][0], 'blue', True] # mark building polygon as 'blue' if found in burnt region\n continue", "def textDetectWatershed(thresh, original):\n # According to: http://docs.opencv.org/trunk/d3/db4/tutorial_py_watershed.html\n img = resize(original, 3000)\n thresh = resize(thresh, 3000)\n # noise removal\n kernel = np.ones((3,3),np.uint8)\n opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 3)\n \n # sure background area\n sure_bg = cv2.dilate(opening,kernel,iterations=3)\n\n # Finding sure foreground area\n dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)\n ret, sure_fg = cv2.threshold(dist_transform,0.01*dist_transform.max(),255,0)\n\n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg,sure_fg)\n \n # Marker labelling\n ret, markers = cv2.connectedComponents(sure_fg)\n\n # Add one to all labels so that sure background is not 0, but 1\n markers += 1\n\n # Now, mark the region of unknown with zero\n markers[unknown == 255] = 0\n \n markers = cv2.watershed(img, markers)\n implt(markers, t='Markers')\n image = img.copy()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n # Creating result array\n boxes = []\n for mark in np.unique(markers):\n # mark == 0 --> background\n if mark == 0:\n continue\n\n # Draw it on mask and detect biggest contour\n mask = np.zeros(gray.shape, dtype=\"uint8\")\n mask[markers == mark] = 255\n\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n c = max(cnts, key=cv2.contourArea)\n \n # Draw a bounding rectangle if it contains text\n x,y,w,h = cv2.boundingRect(c)\n cv2.drawContours(mask, c, 0, (255, 255, 255), cv2.FILLED)\n maskROI = mask[y:y+h, x:x+w]\n # Ratio of white pixels to area of bounding rectangle\n r = cv2.countNonZero(maskROI) / (w * h)\n \n # Limits for text\n if r > 0.1 and 2000 > w > 15 and 1500 > h > 15:\n boxes += [[x, y, w, h]]\n \n # Group intersecting rectangles\n boxes = group_rectangles(boxes)\n bounding_boxes = np.array([0,0,0,0])\n for (x, y, w, h) in boxes:\n cv2.rectangle(image, (x, y),(x+w,y+h), (0, 255, 0), 8)\n bounding_boxes = np.vstack((bounding_boxes, np.array([x, y, x+w, y+h])))\n \n implt(image)\n\n # Recalculate coordinates to original size\n boxes = bounding_boxes.dot(ratio(original, img.shape[0])).astype(np.int64)\n return boxes[1:]", "def detection(test_img):\n # TODO: Step 2 : Your Detection code should go here.\n no_row = len(test_img)\n no_col = len(test_img[0])\n thresh = 100\n label_img = np.zeros((no_row, no_col))\n uf = []\n \n #first pass\n l = 1\n for i in range(0, no_row):\n for j in range(0, no_col):\n if test_img[i,j] < 255 - thresh:\n if i == 0 and j == 0:\n label_img[i,j] = l\n l = l+1\n elif i == 0 and j != 0:\n if label_img[i,j-1] == 0:\n label_img[i,j] = l\n l = l+1\n else:\n label_img[i,j] = label_img[i,j-1]\n elif i != 0 and j == 0:\n if label_img[i-1,j] == 0:\n label_img[i,j] = l\n l = l+1\n else:\n label_img[i,j] = label_img[i-1,j]\n else:\n if label_img[i-1,j] == 0 and label_img[i,j-1] == 0:\n label_img[i,j] = l\n l = l+1\n elif label_img[i-1,j] == 0 and label_img[i,j-1] != 0:\n label_img[i,j] = label_img[i,j-1]\n elif label_img[i-1,j] != 0 and label_img[i,j-1] == 0:\n label_img[i,j] = label_img[i-1,j]\n else:\n if label_img[i,j-1] == label_img[i-1,j]:\n label_img[i,j] = label_img[i,j-1]\n else:\n label_img[i,j] = min(label_img[i-1,j],label_img[i,j-1])\n uf.append([min(label_img[i,j-1],label_img[i-1,j]),max(label_img[i,j-1],label_img[i-1,j])])\n l = l - 1\n \n #2nd pass\n def ufds(x,l):\n b = []\n for i in range(1,l+1):\n b.append([i])\n for j in x:\n i1 = 0\n i2 = 0\n for k in range(0,len(b)):\n if j[0] in b[k]:\n i1 = k\n if j[1] in b[k]:\n i2 = k\n if i1 != i2:\n b[i1] = b[i1] + b[i2]\n del b[i2]\n return b\n \n bman = ufds(uf,l)\n \n #3rd pass\n uf_arr = np.zeros(l)\n for i in range(0,len(uf_arr)):\n for j in bman:\n if i+1 in j:\n uf_arr[i] = min(j) \n \n fin_img = np.zeros((no_row, no_col))\n for i in range(0, no_row):\n for j in range(0, no_col):\n if label_img[i,j] != 0:\n fin_img[i,j] = uf_arr[int(label_img[i,j] - 1)]\n \n all_label = []\n '''\n for i in bman:\n all_label.append(min(i))\n ''' \n for i in range(0, no_row):\n for j in range(0, no_col):\n if fin_img[i,j] !=0 and fin_img[i,j] not in all_label:\n all_label.append(fin_img[i,j])\n \n # main image\n k_img = np.zeros((no_row, no_col))\n for i in range(0, no_row):\n for j in range(0, no_col):\n if fin_img[i,j] != 0:\n k_img[i,j] = (all_label.index(fin_img[i,j]) + 1)\n \n k_list = []\n for i in range(1,len(all_label)+1):\n x = None\n y = None\n hx = None\n hy = None\n for j in range(0,no_row):\n for k in range(0,no_col):\n if k_img[j,k] == i:\n if y == None and x == None:\n y = j\n hy = j\n x = k\n hx = k\n else:\n if y > j:\n y = j\n if hy < j:\n hy = j\n if x > k:\n x = k\n if hx < k:\n hx = k\n k_list.append({\"bbox\": [x, y, hx-x, hy-y]})\n \n return k_list\n \n #raise NotImplementedError", "def wetting(lgca):\n if hasattr(lgca, 'spheroid'):\n birth = npr.random(lgca.nodes[lgca.spheroid].shape) < lgca.r_b\n ds = (1 - lgca.nodes[lgca.spheroid]) * birth\n lgca.nodes[lgca.spheroid, :] = np.add(lgca.nodes[lgca.spheroid, :], ds, casting='unsafe')\n lgca.update_dynamic_fields()\n newnodes = lgca.nodes.copy()\n relevant = (lgca.cell_density[lgca.nonborder] > 0)\n coords = [a[relevant] for a in lgca.nonborder]\n nbs = lgca.nb_sum(lgca.cell_density) # + lgca.cell_density\n nbs *= np.clip(1 - nbs / lgca.n_crit, a_min=0, a_max=None) / lgca.n_crit * 2\n g_adh = lgca.gradient(nbs)\n pressure = np.clip(lgca.cell_density - lgca.rho_0, a_min=0., a_max=None) / (lgca.K - lgca.rho_0)\n g_pressure = -lgca.gradient(pressure)\n\n resting = lgca.nodes[..., lgca.velocitychannels:].sum(-1)\n resting = lgca.nb_sum(resting) / lgca.velocitychannels / lgca.rho_0\n g = lgca.calc_flux(lgca.nodes)\n g = lgca.nb_sum(g)\n\n for coord in zip(*coords):\n n = lgca.cell_density[coord]\n permutations = lgca.permutations[n]\n restc = permutations[:, lgca.velocitychannels:].sum(-1)\n j = lgca.j[n]\n j_nb = g[coord]\n weights = np.exp(\n lgca.beta * (j_nb[0] * j[0] + j_nb[1] * j[1]) / lgca.velocitychannels / 2\n + lgca.beta * resting[coord] * restc #* np.clip(1 - restc / lgca.rho_0 / 2, a_min=0, a_max=None) * 2\n + lgca.beta * np.einsum('i,ij', g_adh[coord], j)\n # + lgca.alpha * np.einsum('i,ij', g_subs[coord], j)\n + restc * lgca.ecm[coord]\n + lgca.gamma * np.einsum('i,ij', g_pressure[coord], j)\n ).cumsum()\n ind = bisect_left(weights, random() * weights[-1])\n newnodes[coord] = permutations[ind]\n\n lgca.nodes = newnodes\n lgca.ecm -= lgca.alpha * lgca.ecm * lgca.cell_density / lgca.K", "def detect_labels(img: np.ndarray):\n \n # Create a range of allowed colors.\n lower_color = np.array([20, 50, 0])\n upper_color = np.array([255, 255, 255])\n\n # Keep the pixels that lie within the range.\n color_filtered = cv.inRange(\n cv.cvtColor(img, cv.COLOR_RGB2HSV),\n lower_color,\n upper_color\n )\n \n # Keeping only the really bright pixels (converted to 255), change the dull ones to 0.\n # Helps distinguish the labels from other dull colors.\n _, thresholded = cv.threshold(color_filtered, 254, 255, cv.THRESH_BINARY)\n\n # Reduce the thickness of regions. Every 30x30 sliding window of 255 in the image gets replaced by a white pixel.\n # The stronger the erosion, the more the noise is removed, with a chance of removal of good pixels as well.\n eroded = cv.erode(thresholded, np.ones((30, 30)))\n\n # Now find outlines of the bright regions that remain after the thickness reduction.\n contours, _ = cv.findContours(eroded, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n \n # Identify the contours that represent our labels.\n # Gotta be the two largest ones in terms of area.\n contour_areas = [(cv.contourArea(c), idx) for (idx, c) in enumerate(contours)]\n\n contour_largest_idx = max(contour_areas)[1]\n contour_second_largest_idx = max(filter(lambda item: item[1] != contour_largest_idx, contour_areas))[1]\n\n # Since the labels are sorta rectangular, find the mean of the contours' y-axes to approximate the vertical center of the labels.\n largest_vertical_center = np.mean(contours[contour_largest_idx][:, :, 1])\n second_largest_vertical_center = np.mean(contours[contour_second_largest_idx][:, :, 1])\n\n # Higher center implies the value is more towards the bottom of the image, and hence the vertical center of the bottom label.\n bottom_label = min(largest_vertical_center, second_largest_vertical_center)\n \n # Lower center implies the value is more towards the top of the image, and hence the vertical center of the top label.\n top_label = max(largest_vertical_center, second_largest_vertical_center)\n\n return bottom_label, top_label", "def region_labeling(image, th=None, black_blobs=True, set_recursion_limit=True,\n recursion_limit=10000):\n\n # Setup\n shape = np.shape(image)\n old_recursion_limit = sys.getrecursionlimit()\n if set_recursion_limit:\n sys.setrecursionlimit(recursion_limit)\n\n if len(shape) == 3:\n image = image.mean(axis=2)\n elif len(shape) > 3:\n raise ValueError('Must be at 2D image')\n\n # Threshold image\n labeled = threshold(image, th=th).astype(int)\n labeled = 255-labeled if black_blobs else labeled\n labeled[labeled == 255] = -1\n\n # Label blobs\n blobs = 0\n for i in range(shape[0]):\n for j in range(shape[1]):\n if labeled[i, j] == -1:\n blobs += 1\n flood_fill(labeled, y=i, x=j, colour=blobs)\n\n # Cleanup\n sys.setrecursionlimit(old_recursion_limit)\n\n return labeled", "def _get_label_weight(opts, data):\n experiments = data[\"exp_names\"].value\n label_mat = numpy.zeros((experiments.size, 7))\n vid_lengths = numpy.zeros((experiments.size,))\n for i in range(experiments.size):\n exp_key = experiments[i]\n exp = data[\"exps\"][exp_key]\n for j in range(6):\n # label_counts[j] += exp[\"org_labels\"].value[:, j].sum()\n label_mat[i, j] = exp[\"org_labels\"].value[:, j].sum()\n # label_counts[-1] +=\\\n # exp[\"org_labels\"].shape[0] - exp[\"org_labels\"].value.sum()\n label_mat[i, -1] =\\\n exp[\"org_labels\"].shape[0] - exp[\"org_labels\"].value.sum()\n\n # vid_lengths[i] = exp[\"hoghof\"].shape[0]\n vid_lengths[i] = exp[\"org_labels\"].shape[0]\n\n # label_counts = label_mat.sum(axis=0)\n label_weight = 1.0 / numpy.mean(label_mat, axis=0)\n # label_weight[-2] = label_weight[-2] * 10\n if opts[\"flags\"].reweight is False:\n label_weight = [5, 5, 5, 5, 5, 5, .01]\n # import pdb; pdb.set_trace()\n return label_weight", "def hebb_rule(dados):\n # Passo 0: Inicializar todos os pesos\n n = len(dados[0][0]) - 1\n weight = zeros(n + 1)\n print(weight)\n\n # Passo 1: Para cada vetor de treinamento na entrada e par de objetivos na saída (e : s)\n for _, dado in enumerate(dados):\n # Passo 2: Ajuste as ativações para as unidades de entrada\n x = dado[0]\n # Passo 3: Ajuste a ativação para a unidade de saída\n y = dado[1]\n # Passo 4: Ajuste os pesos e o bias\n for j in range(n):\n weight[j] += x[j] * y\n weight[n] += + y # Bias é weight[n]\n print(weight)", "def labelComponents26(cube):\n x,y,z = np.where(cube);\n label = np.zeros(cube.shape, dtype = 'uint8');\n ncomp = 0;\n for xp,yp,zp in zip(x,y,z):\n if label[xp,yp,zp] == 0:\n ncomp += 1;\n label = labelNeighbours26(cube, label, xp,yp,zp, ncomp);\n return ncomp, label", "def _refinement_random_walker(\n self,\n ds_labels,\n ds_maskROI,\n ds_mask,\n target_label):\n\n ds_labels[(ds_maskROI == False) & ds_mask] = target_label\n ds_labels[(ds_maskROI == False) & (ds_mask == False)] = -1\n\n labels = zoom(\n ds_labels,\n zoom=np.float32(\n self.size) /\n self.ds_size,\n order=0)\n maskROI = zoom(\n ds_maskROI,\n zoom=np.float32(\n self.size) /\n self.ds_size,\n order=0).astype(\n np.bool)\n\n # Extract labelled and unlabelled vertices\n m_unlabeled = (labels == 0) & (maskROI)\n m_foreground = (labels == target_label)\n\n unlabeled = np.ravel_multi_index(np.where(m_unlabeled), self.size)\n labeled = np.ravel_multi_index(np.where(labels != 0), self.size)\n #labeled = np.ravel_multi_index(np.where((m_foreground) | (labels > 0)), self.size)\n\n # Preparing the right handside of the equation BT xs\n B = self.L[unlabeled][:, labeled]\n mask = (labels[labels != 0]).flatten() == target_label\n fs = sparse.csr_matrix(mask).transpose()\n rhs = B * fs\n\n # Preparing the left handside of the equation Lu\n Lu = self.L[unlabeled][:, unlabeled]\n\n # Solve the linear equation Lu xu = -BT xs\n if self._pyamg_found:\n ml = ruge_stuben_solver(Lu)\n M = ml.aspreconditioner(cycle='V')\n else:\n M = None\n xu = cg(Lu, -rhs.todense(), tol=1e-3, M=M, maxiter=120)[0]\n\n probability = np.zeros(self.size, dtype=np.float32)\n probability[m_unlabeled] = xu\n probability[m_foreground] = 1\n\n return probability", "def watershed(f, Bc=None, LINEREG=\"LINES\"):\n from string import upper\n if Bc is None: Bc = secross()\n return cwatershed(f,regmin(f,Bc),upper(LINEREG))\n return y", "def ray_label_simplex(grid, simplex, thresh):\n coords = [grid[:,x] for x in simplex]\n dist = squareform(pdist(coords,'euclidean'))\n adjacency = dist<thresh\n adjacency = adjacency.astype(int) \n graph = csr_matrix(adjacency)\n n_components, labels = connected_components(csgraph=graph, directed=False, return_labels=True)\n\n return n_components", "def calculate_class_weights(label_data):\n neg, pos = np.bincount(label_data)\n weight_for_0 = 1 / neg\n weight_for_1 = 1 / pos\n return {0: weight_for_0, 1: weight_for_1}", "def opencv_watershed(masked, mask) -> JSON_TYPE:\n # For code and detailed explanation see:\n # http://datahacker.rs/007-opencv-projects-image-segmentation-with-watershed-algorithm/\n threshold: int = 30\n gray = cv2.cvtColor(masked, cv2.COLOR_RGB2GRAY)\n ret, thresh_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)\n # Noise removal\n kernel = np.ones((3), np.uint8)\n opening_img = cv2.morphologyEx(thresh_img, cv2.MORPH_OPEN, kernel, iterations=9)\n # Noise removal\n closing_img = cv2.morphologyEx(thresh_img, cv2.MORPH_CLOSE, kernel, iterations=4)\n dist_transform = cv2.distanceTransform(255 - closing_img, cv2.DIST_L2, 3)\n local_max_location = peak_local_max(dist_transform, min_distance=1, indices=True)\n\n n_increases: int = 0\n while local_max_location.shape[0] < 30 and n_increases < 15:\n threshold += 20\n ret, thresh_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)\n # Noise removal\n kernel = np.ones((3), np.uint8)\n opening_img = cv2.morphologyEx(thresh_img, cv2.MORPH_OPEN, kernel, iterations=9)\n # Noise removal\n closing_img = cv2.morphologyEx(thresh_img, cv2.MORPH_CLOSE, kernel, iterations=4)\n dist_transform = cv2.distanceTransform(255 - closing_img, cv2.DIST_L2, 3)\n local_max_location = peak_local_max(dist_transform, min_distance=1, indices=True)\n n_increases += 1\n # Reset threshold\n threshold = 30\n\n num_clusters: int = 30\n if n_increases >= 15:\n num_clusters = local_max_location.shape[0]\n kmeans = KMeans(n_clusters=num_clusters)\n # If local_max_location size is 0, return 0 predictions\n if not local_max_location.size:\n return {\n \"count\": 0\n }\n kmeans.fit(local_max_location)\n local_max_location = kmeans.cluster_centers_.copy()\n # Kmeans is returning a float data type so we need to convert it to an int. \n local_max_location = local_max_location.astype(int)\n dist_transform_copy = dist_transform.copy()\n for i in range(local_max_location.shape[0]):\n cv2.circle(dist_transform_copy, (local_max_location[i][1], local_max_location[i][0]), 5, 255)\n # markers = np.zeros_like(dist_transform)\n ret, sure = cv2.threshold(dist_transform, 0.01*dist_transform.max(), 255, 0)\n sure = np.uint8(sure)\n ret, markers = cv2.connectedComponents(sure)\n labels = np.arange(kmeans.n_clusters)\n markers[local_max_location[:,0], local_max_location[:,1]] = labels + 1\n # Convert all local markers to an integer. This because cluster centers will be float numbers. \n markers = markers.astype(int)\n markers_copy = markers.copy()\n index_non_zero_markers = np.argwhere(markers != 0)\n markers_copy = markers_copy.astype(np.uint8)\n font = cv2.FONT_HERSHEY_SIMPLEX\n for i in range(index_non_zero_markers.shape[0]):\n string_text = str(markers[index_non_zero_markers[i][0], index_non_zero_markers[i][1]])\n cv2.putText(markers_copy, string_text, (index_non_zero_markers[i][1], index_non_zero_markers[i][0]), font, 1, 255)\n markers = markers.astype(np.int32)\n segmented = cv2.watershed(masked, markers)\n count_segments(markers)\n #return {\n # \"count\": local_max_location.shape[0]\n #}\n return {\n \"count\": count_segments(markers),\n }", "def cross_junctions(I, bounds, Wpts):\n #--- FILL ME IN ---\n\n Ipts = np.zeros((2, 48))\n\n#parameters\n alpha = 0.15 #typically 0.04 to 0.06\n threshold = 1500 #default 2000\n sigma = 2\n ws = 12 #window size for saddle point\n\n#building Harris Detecter\n I = I/255.0\n gradx, grady = np.gradient(I)\n IxIx = gaussian_filter(gradx*gradx,sigma)\n IxIy = gaussian_filter(gradx*grady,sigma)\n IyIy = gaussian_filter(grady*grady,sigma)\n print(I.shape)\n\n #get harris score\n cand_score = []\n cand_index = []\n cand = []\n s_cand = []\n\n for j in range(len(I)):\n for i in range(len(I[0])):\n a11 = IxIx[j][i]\n a12 = IxIy[j][i]\n a21 = a12\n a22 = IyIy[j][i]\n A = np.array([[a11, a12],[a21, a22]])\n ev0, ev1 = np.linalg.eigvals(A)\n h_score = ev0*ev1 - alpha*(ev0+ev1)**2\n cand_score.append(-h_score)\n cand_index.append([i, j])\n\n #get the coordinates of the top 5000 scores\n sorted_ind = np.argsort(cand_score)\n sorted_score = np.sort(cand_score).tolist()\n\n for ind in sorted_ind[:threshold]:\n cand.append(cand_index[ind])\n s_cand = sorted_score[:threshold]\n\n\n#clustering\n #using homography to project candidate points to a up-front view\n new_bbox = np.array([[0, 100, 100, 0],[0, 0, 80, 80]])\n H = dlt_homography(bounds, new_bbox)\n cand = np.array(cand).T\n cand = np.vstack((cand, np.ones(cand.shape[1])))\n Ho_cand = np.matmul(H,cand).T\n for pt in Ho_cand:\n pt[0] = pt[0]/pt[2]\n pt[1] = pt[1]/pt[2]\n Ho_cand = Ho_cand[:,:2]\n Ho_cand = Ho_cand.tolist()\n\n #get rid of points that are not in the boundry\n temp_Ho_cand = []\n temp_s_cand = []\n for i in range(len(Ho_cand)):\n pt = Ho_cand[i]\n if (pt[0]>=100) or (pt[0]<0) or (pt[1]>=80) or (pt[1]<0):\n continue\n else:\n temp_Ho_cand.append(pt)\n temp_s_cand.append(s_cand[i])\n Ho_cand = np.array(temp_Ho_cand)\n s_cand = temp_s_cand\n #divide candidates into clusters\n assignment = []\n assignment_score = []\n\n #first put in the point that has the highest score\n assignment.append([Ho_cand[0]])\n assignment_score.append([s_cand[0]])\n for i in range(len(Ho_cand)):\n pt = Ho_cand[i]\n dist = []\n for c in assignment:\n dist.append(np.linalg.norm(pt - c[0]))\n if min(dist) > 6:\n assignment.append([pt])\n assignment_score.append([s_cand[i]])\n\n assignment = np.array(assignment)\n\n #assign points to clusters\n for i in range(len(Ho_cand)):\n pt = Ho_cand[i]\n if (pt[0] == Ho_cand[0][0]) and (pt[1] == Ho_cand[0][1]):\n continue\n dist = []\n for c in assignment:\n dist.append(np.linalg.norm(pt - c[0]))\n index = np.argsort(dist)[-1]\n np.append(assignment[index], pt)\n assignment_score[index].append(s_cand[i])\n\n #get centroids for each cluster\n Ho_centroids = []\n for i in range(len(assignment)):\n cl = assignment[i]\n cl = np.array(cl)\n Ho_centroids.append([np.mean(cl.T[0]),np.mean(cl.T[1])])\n assignment_score[i] = sum(assignment_score[i])\n\n print(len(assignment_score))\n\n Ho_centroids = np.array(Ho_centroids)\n #get rid of edge points\n\n xmin = np.amin(Ho_centroids.T[0])\n xmax = np.amax(Ho_centroids.T[0]) \n ymin = np.amin(Ho_centroids.T[1])\n ymax = np.amax(Ho_centroids.T[1])\n\n final_cand = []\n final_score = []\n for i in range(len(Ho_centroids)):\n pt = Ho_centroids[i]\n if (abs(pt[0] - xmin) <= 3) or (abs(pt[0] - xmax) <= 3) or (abs(pt[1] - ymin) <= 3) or (abs(pt[1] - ymax) <= 3):\n continue\n else:\n final_cand.append(pt)\n final_score.append(assignment_score[i])\n print(\"Number of corner found: \")\n print(len(final_cand))\n\n #get rid of fake corners\n if (len(final_cand)>48):\n ultimate_cand =[]\n for ind in np.argsort(final_score)[:48]:\n ultimate_cand.append(final_cand[ind])\n final_cand = ultimate_cand\n print(\"real corners count:\", len(ultimate_cand))\n\n\n #sort the points\n final_cand = np.array(final_cand)\n y_sort_ind = np.argsort(final_cand.T[1])\n final_cand = final_cand.tolist()\n rows = []\n for i in range(6):\n row = []\n for ind in y_sort_ind[i*8:(i+1)*8]:\n row.append(final_cand[ind])\n rows.append(row)\n\n ordered = []\n for row in rows:\n r = []\n x_sort_ind = np.argsort(np.array(row).T[0])\n for ind in x_sort_ind:\n r.append(row[ind])\n ordered.append(r)\n\n final_cand = []\n for row in ordered:\n for pt in row:\n final_cand.append(pt)\n \n\n\n #get coordinates of the centroids in the original frame\n Ho_centroids = np.array(final_cand)\n\n centroids = np.vstack((Ho_centroids.T, np.ones(Ho_centroids.shape[0])))\n centroids = np.matmul(np.linalg.inv(H), centroids).T\n for pt in centroids:\n pt[0] = int(pt[0]/pt[2])\n pt[1] = int(pt[1]/pt[2])\n centroids = centroids[:,:2]\n\n#finding saddle points around the centroids\n saddle_points = []\n for pt in centroids:\n img = I[int(pt[1]-ws):int(pt[1]+ws), int(pt[0]-ws):int(pt[0]+ws)]\n saddle = saddle_point(img)\n saddle = [saddle[0][0]+pt[0]-ws, saddle[1][0]+pt[1]-ws]\n saddle_points.append(saddle)\n\n saddle_points = np.array(saddle_points)\n #------------------\n print(saddle_points.T)\n return saddle_points.T", "def classify(k, sorted_labels):\n k_neighbors = sorted_labels[:k]\n men_occurencies = np.count_nonzero(k_neighbors == 'M')\n women_occurencies = np.count_nonzero(k_neighbors == 'W')\n\n return 'M' if men_occurencies > women_occurencies else 'W'", "def bridge_problem3(here):\r\n\r\n def all_over(state):\r\n here, _ = state\r\n return not here or here == set([\"light\"])\r\n\r\n start = (frozenset(here) | frozenset([\"light\"]), frozenset())\r\n return lowest_cost_search(start, bsuccessors2, all_over, bcost)", "def cell_merge(wsh, pred):\n wshshape=wsh.shape\n \n # masks for the original cells\n objs = np.zeros((wsh.max()+1,wshshape[0],wshshape[1]), dtype=bool)\t\n \n # masks for dilated cells\n dil_objs = np.zeros((wsh.max()+1,wshshape[0],wshshape[1]), dtype=bool)\n \n # bounding box coordinates\t\n obj_coords = np.zeros((wsh.max()+1,4))\n \n # cleaned watershed, output of function\t\n wshclean = np.zeros((wshshape[0],wshshape[1]))\n \n # kernel to dilate objects\n kernel = np.ones((3,3), dtype=bool)\t\n \n for obj1 in range(wsh.max()):\n # create masks and dilated masks for obj\n objs[obj1,:,:] = wsh==(obj1+1)\t\n dil_objs[obj1,:,:] = dilation(objs[obj1,:,:], kernel)\t\n \n # bounding box\n obj_coords[obj1,:] = get_bounding_box(dil_objs[obj1,:,:])\n \n objcounter = 0\t# counter for new watershed objects\n \n for obj1 in range(wsh.max()):\t\n dil1 = dil_objs[obj1,:,:]\n\n # check if mask has been deleted\n if np.sum(dil1) == 0:\n continue\n \n objcounter = objcounter + 1\n orig1 = objs[obj1,:,:]\n\n for obj2 in range(obj1+1,wsh.max()):\n dil2 = dil_objs[obj2,:,:]\n \n # only check border if bounding box overlaps, and second mask \n # is not yet deleted\n if (do_box_overlap(obj_coords[obj1,:], obj_coords[obj2,:])\n and np.sum(dil2) > 0):\n \n border = dil1 * dil2\t\n border_pred = pred[border]\n \n # Border is too small to be considered\n if len(border_pred) < 32:\n continue\n \n # Sum of top 25% of predicted border values\n q75 = np.quantile(border_pred, .75)\n top_border_pred = border_pred[border_pred >= q75]\n top_border_height = top_border_pred.sum()\n top_border_area = len(top_border_pred)\n \n # merge cells\n if top_border_height / top_border_area > .99:\n orig1 = np.logical_or(orig1, objs[obj2,:,:])\n dil_objs[obj1,:,:] = np.logical_or(dil1, dil2)\n dil_objs[obj2,:,:] = np.zeros((wshshape[0], wshshape[1]))\n obj_coords[obj1,:] = get_bounding_box(dil_objs[obj1,:,:])\n \n wshclean = wshclean + orig1*objcounter\n \n return wshclean", "def propagateLabel(self, l1, l2):\n\n if l1 != l2:\n winner = min(l1, l2)\n loser = max(l1, l2)\n loserN = 0\n superiorN = 0\n for i,l in enumerate(self.labels):\n if l == loser:\n loserN += 1\n self.labels[i] = winner\n if l > loser:\n superiorN += 1\n self.labels[i] = l - 1\n\n # print('Loser Label is ' + str(loser) + ' . With ' + str(loserN) + ' associated cells. Winner label is ' + str(winner))", "def BFTM_(adj_list,labels):\n G_prime = nx.Graph()\n num_clusters = list(np.unique(labels))\n clusters = {i:[] for i in num_clusters}\n hood = {n.id:[i for i in num_clusters if i != labels[n.id]] for n in adj_list}\n \n #Add nodes to clusters\n for idx,n in enumerate(adj_list):\n clusters[labels[idx]].append(n.id)\n \n root_cluster = random.choice(num_clusters)\n root_id = random.choice(list(clusters[root_cluster]))\n queue = [adj_list[root_id]]\n clusters[labels[root_id]].remove(root_id)\n \n \n #BFTM\n while len(queue) > 0:\n node = queue.pop(0)\n for c_id in hood[node.id]:\n if len(clusters[c_id]) > 0:\n sample_id = random.choice(clusters[c_id])\n clusters[labels[sample_id]].remove(sample_id)\n queue.append(adj_list[sample_id])\n hood[sample_id].remove(labels[node.id])\n G_prime.add_edge(node,adj_list[sample_id])\n hood[node.id] = None\n #Handle leftover nodes\n if len(queue) == 0:\n remaining = [c for i,c in clusters.items() if len(c) > 0]\n for rem_cluster in remaining:\n for n in rem_cluster:\n added = False\n while not added:\n rand_n = random.choice(list(G_prime.nodes))\n if labels[rand_n.id] != labels[n.id]:\n G_prime.add_edge(n,rand_n)\n added = True\n \n \n #Cliqify\n for node in list(G_prime.nodes):\n if G_prime.degree(node) < len(num_clusters) - 1:\n for _1_hop in list(G_prime.neighbors(node)):\n for _2_hop in list(G_prime.neighbors(_1_hop)):\n if _2_hop != node and G_prime.degree(_2_hop) < len(num_clusters) - 1:\n G_prime.add_edge(node,_2_hop)\n \n return G_prime", "def update_labels(mask1, mask2):\n # Find the object in mask2 that has maximum overlap with an object in max1,\n # (as a fraction of the objects pixels in mask1)\n def get_max_overlap(mask1, mask2, label1):\n # Count overlapping pixels.\n labels, counts = np.unique(mask2[mask1 == label1], return_counts=True)\n # Sort labels by counts (ascending).\n labels_sorted = labels[np.argsort(counts)]\n counts_sorted = counts[np.argsort(counts)]\n # Select new label with maximum overlap.\n max_overlap = labels_sorted[-1]\n return max_overlap\n \n def main(mask1, mask2):\n if not (mask1.shape == mask2.shape):\n raise ValueError(\"Masks do not have the same shape.\")\n # Initialize blank mask.\n updated_mask = np.zeros(mask2.shape)\n # Go one-by-one through the labels in mask2\n for label in np.unique(mask2)[1:]:\n # Find label in mask1 with maximum overlap with nuc from mask2.\n mask1_besthit = get_max_overlap(mask2, mask1, label)\n # Find reverse: best hit for the mask1 label in mask2.\n mask2_besthit = get_max_overlap(mask1, mask2, mask1_besthit)\n # If the labels are reciprocal best hits, update label in \n # new mask to have the shape of the object in mask 2 with \n # the label propagated from mask1.\n if ((mask2_besthit == label) and (mask1_besthit != 0)):\n updated_mask[mask2 == label] = mask1_besthit\n\n return updated_mask\n return main(mask1, mask2)", "def swatershed(f, g, B=None, LINEREG=\"LINES\"):\n\n if B is None: B = secross()\n print 'Not implemented yet'\n return None\n return y", "def flood_fill():\n global CURRENT_LABEL\n global mouse_pos\n\n im_mask = (source_msk==CURRENT_LABEL).astype(np.uint8)\n cv.floodFill(im_mask, None, mouse_pos, CURRENT_LABEL)\n source_msk[im_mask!=0] = CURRENT_LABEL", "def segment_func2(self):\n # computing neighboors graph\n A = self.boundaryprob_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels", "def compute_patch_loss(self, inputs, outputs, next_to_prev_weight = [1.0, 1.0]):\n bsz, w, h, __ = inputs['input_image'].shape \n\n pred_next_image = outputs[\"next_position\"]\n\n path_state = inputs['path_state'].reshape(bsz, 1, w, h).float() \n true_next_image = image_to_tiles(path_state, self.patch_size) \n\n # binarize patches\n next_sum_image = torch.sum(true_next_image, dim = 2, keepdim=True) \n next_patches = torch.zeros_like(next_sum_image)\n # any patch that has a 1 pixel in it gets 1 \n next_patches[next_sum_image != 0] = 1\n\n pred_next_image = pred_next_image.squeeze(-1)\n next_patches = next_patches.squeeze(-1).to(self.device).long() \n\n pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')\n\n next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image, next_patches) \n\n total_loss = next_pixel_loss \n print(f\"loss {total_loss.item()}\")\n\n return total_loss", "def test_lcwa_label_smoothing(self):\n # Create dummy dense labels\n labels = torch.zeros(self.batch_size, self.num_entities)\n for i in range(self.batch_size):\n labels[i, self.random.randint(self.num_entities)] = 1.0\n # Check if labels form a probability distribution\n np.testing.assert_allclose(torch.sum(labels, dim=1).numpy(), 1.0)\n\n # Apply label smoothing\n smooth_labels = apply_label_smoothing(labels=labels, epsilon=self.epsilon, num_classes=self.num_entities)\n # Check if smooth labels form probability distribution\n np.testing.assert_allclose(torch.sum(smooth_labels, dim=1).numpy(), 1.0, rtol=self.relative_tolerance)", "def pad_edges(self, pad):\n weights=[]\n for dim, xy in zip([0, 1], [self.x, self.y]):\n xy0 = np.mean(xy)\n W = xy[-1]-xy[0]\n dist = np.abs(xy-xy0)\n wt=np.ones_like(dist)\n wt[ dist >= W/2 - pad] = 0\n weights += [wt]\n self.weight *= weights[0][:,None].dot(weights[1][None,:])", "def label_smooth_pcc(f):\n n = f.shape[0]\n labels = tf.eye(n)\n labels = tf.reshape(labels,[-1,1])\n labels = (1.0 - args.smoothing) * labels + args.smoothing / 2\n pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])\n bce = tf.keras.losses.BinaryCrossentropy()\n return -bce(labels, pre_prob)", "def b4Wan():\n \n tors, edges = tp.mesh_topo()\n G = build_graph(edges)\n \n # Get the routing path of all nodes\n table_file_name = '../outputs/mesh_routing_table.txt'\n table = all_routing(G, tors, table_file_name)\n if((os.path.isfile(table_file_name)) == False):\n table = all_routing(G, tors, table_file_name)\n else:\n json_data = open(table_file_name).read()\n table = json.loads(json_data)\n \n seeds, polys = cf.get_seeds_table(tors) #\n\n return G, tors, edges, table, seeds, polys", "def label_image(image):\n \n #Label the blobs using ndimage\n labeled_blobs, n_features = ndimage.label(b_image)\n \n #calculate the center of mass of each labelled feature\n centers = ndimage.center_of_mass(b_image, labeled_blobs, np.arange(n_features) + 1)\n \n return labeled_blobs, n_features, centers", "def label_smoothing_regularization(self, chars_labels, weight=0.1):\n one_hot_labels = tf.one_hot(\n chars_labels, depth=self.num_char_classes, axis=-1)\n pos_weight = 1.0 - weight\n neg_weight = weight / self.num_char_classes\n return one_hot_labels * pos_weight + neg_weight", "def feature_dist(input_labels, struct=np.ones((3, 3))):\n # remove the pixels inside the coherent\n input_errosion = binary_erosion(input_labels, structure=struct)\n input_labels[input_errosion] = 0\n I, J = np.nonzero(input_labels)\n labels = input_labels[I, J]\n coords = np.column_stack((I, J))\n sorter = np.argsort(labels)\n labels = labels[sorter]\n coords = coords[sorter]\n I = I[sorter]\n J = J[sorter]\n sq_dists = cdist(coords, coords, 'sqeuclidean')\n start_idx = np.flatnonzero(np.r_[1, np.diff(labels)])\n nonzero_vs_feat = np.minimum.reduceat(sq_dists, start_idx, axis=1)\n feat_vs_feat = np.minimum.reduceat(nonzero_vs_feat, start_idx, axis=0)\n # calculate the distance between every two coherent areas\n # distance factor for one pixel to meter: 100\n distance_matrix = 100 * np.sqrt(feat_vs_feat)\n nRow, nCol = sq_dists.shape[0], start_idx.shape[0]\n # add the index of the final element to the slice array\n slice_indices = np.concatenate((start_idx, np.array([nRow])))\n col_args = np.zeros((nRow, nCol)).astype(int)\n row_index = np.zeros((nCol, nCol)).astype(int)\n '''\n How the following commands work:\n find closest pixel from label A to label B:\n Label A --> Label B\n row_index[A-1, B-1] = alpha\n col_index[A-1, B-1] = col_args[alpha, B-1] = beta\n index of pixel from Label A: input_labels[ I[alpha], J[alpha]]\n index of pixel from Label B: input_labels[ I[beta], J[beta]]\n '''\n for i in range(nCol):\n col_args[:, i] = start_idx[i] + \\\n np.argmin(sq_dists[:, slice_indices[i]:\n slice_indices[i+1]], axis=1)\n elements = sq_dists[np.arange(nRow).reshape((nRow, 1)), col_args]\n for i in range(nCol):\n row_index[i, :] = start_idx[i] + \\\n np.argmin(elements[slice_indices[i]:\n slice_indices[i+1], :], axis=0)\n col_index = col_args[row_index, np.arange(nCol).reshape((1, nCol))]\n # Change col_index and row_index to input array index.\n row_index_from_label = I[row_index]\n col_index_from_label = J[row_index]\n row_index_to_label = I[col_index]\n col_index_to_label = J[col_index]\n return distance_matrix, row_index_from_label, col_index_from_label, \\\n row_index_to_label, col_index_to_label", "def bwdiagfill(bwimage):\n # fills pixels matching the following neighborhoods:\n hoods = [[[0, 1, 0],\n [1, 0, 0],\n [0, 0, 0]],\n [[0, 0, 0],\n [1, 0, 0],\n [0, 1, 0]],\n [[0, 0, 0],\n [0, 0, 1],\n [0, 1, 0]],\n [[0, 1, 0],\n [0, 0, 1],\n [0, 0, 0]]]\n output = bwimage.copy()\n # for each neighborhood, find matching pixels and set them to 1 in the img\n for hood in hoods:\n output = np.logical_or(output,\n ndimage.binary_hit_or_miss(bwimage, hood))\n return output", "def _get_bbox_regression_labels(bbox_target_data, num_classes, front_2_1_points_targets_data, front_2_2_points_targets_data, front_center_targets_data, back_2_1_points_targets_data, back_2_2_points_targets_data, back_center_targets_data, center_targets_data):\n # Inputs are tensor\n\n clss = bbox_target_data[:, 0]\n bbox_targets = clss.new(clss.numel(), 4 * num_classes).zero_()\n \n front_2_1_points_targets = clss.new(clss.numel(), 4 * num_classes).zero_()\n front_2_2_points_targets = clss.new(clss.numel(), 4 * num_classes).zero_()\n front_center_targets = clss.new(clss.numel(), 2 * num_classes).zero_()\n\n back_2_1_points_targets = clss.new(clss.numel(), 4 * num_classes).zero_()\n back_2_2_points_targets = clss.new(clss.numel(), 4 * num_classes).zero_()\n back_center_targets = clss.new(clss.numel(), 2 * num_classes).zero_()\n\n center_targets = clss.new(clss.numel(), 2 * num_classes).zero_()\n\n front_center_inside_weights = clss.new(front_center_targets.shape).zero_()\n\n bbox_inside_weights = clss.new(bbox_targets.shape).zero_()\n \n inds = (clss > 0).nonzero().view(-1)\n if inds.numel() > 0:\n clss = clss[inds].contiguous().view(-1,1)\n\n dim1_inds = inds.unsqueeze(1).expand(inds.size(0), 4)\n dim2_inds = torch.cat([4*clss, 4*clss+1, 4*clss+2, 4*clss+3], 1).long()\n # print(dim2_inds) # e.g. dim 16 * 4\n\n dim3_inds = inds.unsqueeze(1).expand(inds.size(0), 2)\n dim4_inds = torch.cat([2*clss, 2*clss+1], 1).long()\n\n # fang[-1]\n bbox_targets[dim1_inds, dim2_inds] = bbox_target_data[inds][:, 1:]\n\n front_2_1_points_targets[dim1_inds, dim2_inds] = front_2_1_points_targets_data[inds][:, 0:]\n front_2_2_points_targets[dim1_inds, dim2_inds] = front_2_2_points_targets_data[inds][:, 0:]\n front_center_targets[dim3_inds, dim4_inds] = front_center_targets_data[inds][:, 0:]\n\n back_2_1_points_targets[dim1_inds, dim2_inds] = back_2_1_points_targets_data[inds][:, 0:]\n back_2_2_points_targets[dim1_inds, dim2_inds] = back_2_2_points_targets_data[inds][:, 0:]\n back_center_targets[dim3_inds, dim4_inds] = back_center_targets_data[inds][:, 0:]\n\n center_targets[dim3_inds, dim4_inds] = center_targets_data[inds][:, 0:]\n\n bbox_inside_weights[dim1_inds, dim2_inds] = bbox_targets.new(cfg.TRAIN.BBOX_INSIDE_WEIGHTS).view(-1, 4).expand_as(dim1_inds)\n\n front_center_inside_weights[dim3_inds, dim4_inds] = front_center_targets.new(cfg.TRAIN.CENTER_INSIDE_WEIGHTS).view(-1, 2).expand_as(dim3_inds)\n\n return bbox_targets, bbox_inside_weights, front_2_1_points_targets, front_2_2_points_targets, front_center_targets, back_2_1_points_targets, \\\n back_2_2_points_targets, back_center_targets, center_targets, front_center_inside_weights", "def slicing(features, seeds_features, seeds_label, label_map, adjacency,\n sigma=1., resize_shape=(480, 854)):\n label_map_flatten = np.reshape(label_map, [-1])\n num_seeds = np.max(label_map)+1\n # Label_map_one_hot [num_pixels, num_seeds_current]\n label_map_one_hot = np.zeros((label_map_flatten.shape[0], num_seeds), dtype=np.int16)\n label_map_one_hot[np.arange(label_map_flatten.shape[0]), label_map_flatten] = 1\n # weight_idx: [num_pixels, num_seeds_cur_prev_following_frame]\n # Only neighbouring seeds have weights > 0\n weight_idx = np.matmul(label_map_one_hot, adjacency)\n feature_dim = features.shape[2]\n\n # This implementation is not very efficient\n # It computes pairwise distance between all pixels and all seeds (from 3 frames)\n # dist: [num_pixels, num_seeds_cur_prev_following_frame]\n dist = euclidean_distances(np.reshape(features, [-1, feature_dim]), seeds_features)\n weight = np.exp(-dist*dist/sigma/sigma)\n weight *= weight_idx\n fg_votes = np.max(weight*np.expand_dims(seeds_label==1, 0), axis=1)\n bg_votes = np.max(weight*np.expand_dims(seeds_label==0, 0), axis=1)\n height = features.shape[0]\n width = features.shape[1]\n fg_votes = fg_votes.reshape((height, width))+1e-8\n bg_votes = bg_votes.reshape((height, width))+1e-8\n fg_votes = cv2.resize(fg_votes, (resize_shape[1], resize_shape[0]),\n interpolation=cv2.INTER_LINEAR)\n bg_votes = cv2.resize(bg_votes, (resize_shape[1], resize_shape[0]),\n interpolation=cv2.INTER_LINEAR)\n\n prob = np.stack([bg_votes, fg_votes], axis=2)\n dist_vis = utils.get_heatmap(np.concatenate([fg_votes, bg_votes], axis=0))\n prob = prob/np.sum(prob, axis=2, keepdims=True)\n\n return prob, dist_vis", "def analyze_belief_strength_with_bias(self, G):\r\n n = []\r\n nbs_list = []\r\n for node in G.nodes: #cycles through the nodes of the graph to mine the attributes\r\n n.append(node) #appends each node to a list that will be put into a dictionary\r\n pbs_list = []\r\n og_bs = G.nodes[node]['belief_strength'] #mines the numerical value for a nodes belief strength, from a pre-set node attribute\r\n unc = G.nodes[node]['uncertainty'] #mines the numerical value for a nodes belief uncertainty, from a pre-set node attribute\r\n prob = G.nodes[node]['probability']\r\n for pre in G.predecessors(node):\r\n ew = G.edges[pre, node]['weight'] #mines the numerical value of an edge's weight, from a pre-set edge attribute\r\n pre_bs = G.nodes[pre]['belief_strength'] #mines the numerical value for a predecessors belief strength, from a pre-set node attribute\r\n x = ew * pre_bs #determines how much a node values its neighbor's opinion.\r\n pbs_list.append(x) #puts all values for predecessor belief strangths in a list\r\n if len(pbs_list) == 0:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n else:\r\n apbs = sum(pbs_list)/len(pbs_list) #calculates the average predecessor belief strength value for a node\r\n if apbs*og_bs > 0:\r\n if apbs > 0:\r\n nbs = min(og_bs + (0.1*prob*unc*apbs), 100)\r\n else:\r\n nbs = max(og_bs + (0.1*prob*unc*apbs), -100)\r\n nbs = int(nbs)\r\n else:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n nbs_list.append(nbs) #the new belief strengths are appended to a list that will be put into adictionary\r\n change = dict(zip(n, nbs_list)) #creates a dictionary from two lists which stores the nodes as keys and their new belief strengths as values\r\n print(change)\r\n return change #this will be used to update the list in a different function\r", "def get_classification_simulator(self, image):\n\n r_channel = image[:,:,2]\n g_channel = image[:,:,1]\n\n\n\n # Threshold color channel\n s_rgy_min = 50\n s_thresh_min = 245\n s_thresh_max = 255\n \n #s_binary = np.zeros_like(r_channel)\n r_binary = np.zeros_like(r_channel)\n g_binary = np.zeros_like(r_channel)\n y_binary = np.zeros_like(r_channel)\n \n #s_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) | ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n \n r_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & (g_channel <= s_rgy_min)] = 1\n g_binary[((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max)) & (r_channel <= s_rgy_min)] = 1\n y_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n\n #res = cv2.bitwise_and(img,img,mask = s_binary)\n \n #maxx=image.shape[1]\n maxy=image.shape[0]\n \n y_top=0\n window_size_y=50\n y_bottom=y_top+window_size_y\n \n max_color=0\n tf_color=TrafficLight.UNKNOWN\n \n while (y_bottom< maxy):\n #print(img[y_top:y_bottom,:,:])\n rs= r_binary[y_top:y_bottom,:].sum()\n gs= g_binary[y_top:y_bottom,:].sum()\n ys= y_binary[y_top:y_bottom,:].sum()\n if (rs>max_color):\n max_color=rs\n tf_color=TrafficLight.RED\n if (gs>max_color):\n max_color=gs\n tf_color=TrafficLight.GREEN\n if (ys>max_color):\n max_color=ys\n tf_color=TrafficLight.YELLOW\n y_top+=window_size_y\n y_bottom+=window_size_y\n \n if (max_color<100):\n tf_color=TrafficLight.UNKNOWN\n \n\n\n return tf_color", "def watershed(image, markers=None, connectivity=1, offset=None, mask=None,\n compactness=0, watershed_line=False):\n from ..segmentation import watershed as _watershed\n return _watershed(image, markers, connectivity, offset, mask,\n compactness, watershed_line)", "def make_graph(imageAnnotated, imageGaussian):\n nodeNumber = imageAnnotated.max() - 1\n distanceDiagonalPixels, distanceDiagonalPixelsCubic = np.sqrt(2.0), np.sqrt(3.0)\n distanceMatrix = np.array([[distanceDiagonalPixelsCubic, distanceDiagonalPixels, distanceDiagonalPixelsCubic], [distanceDiagonalPixels, 1, distanceDiagonalPixels],\n [distanceDiagonalPixelsCubic, distanceDiagonalPixels, distanceDiagonalPixelsCubic]])\n nodePositions = np.transpose(np.where(imageAnnotated > 1))[:, ::-1]\n imagePropagatedNodes = imageAnnotated.copy()\n imageFilamentLength = 1.0 * (imageAnnotated.copy() > 0)\n imageFilamentIntensity = 1.0 * (imageAnnotated.copy() > 0)\n dimensionY, dimensionX = imageAnnotated.shape\n filament = (imagePropagatedNodes == 1).sum()\n while (filament > 0):\n nodePixel = np.transpose(np.where(imagePropagatedNodes > 1))\n for posY, posX in nodePixel:\n xMin, xMax, yMin, yMax = bounds(posX - 1, 0, dimensionX), bounds(posX + 2, 0, dimensionX), bounds(posY - 1, 0, dimensionY), bounds(posY + 2, 0, dimensionY)\n nodeNeighborhood = imagePropagatedNodes[yMin:yMax, xMin:xMax]\n nodeFilamentLength = imageFilamentLength[yMin:yMax, xMin:xMax]\n nodeFilamentIntensity = imageFilamentIntensity[yMin:yMax, xMin:xMax]\n imagePropagatedNodes[yMin:yMax, xMin:xMax] = np.where(nodeNeighborhood == 1, imagePropagatedNodes[posY, posX], nodeNeighborhood)\n imageFilamentLength[yMin:yMax, xMin:xMax] = np.where(nodeFilamentLength == 1, distanceMatrix[0:yMax - yMin, 0:xMax - xMin] + imageFilamentLength[posY, posX], nodeFilamentLength)\n imageFilamentIntensity[yMin:yMax, xMin:xMax] = np.where(nodeFilamentIntensity == 1, imageGaussian[posY, posX] + imageFilamentIntensity[posY, posX], nodeFilamentIntensity)\n filament = (imagePropagatedNodes == 1).sum()\n graph = nx.empty_graph(nodeNumber, nx.MultiGraph())\n filamentY, filamentX = np.where(imagePropagatedNodes > 1)\n for posY, posX in zip(filamentY, filamentX):\n nodeIndex = imagePropagatedNodes[posY, posX]\n xMin, xMax, yMin, yMax = bounds(posX - 1, 0, dimensionX), bounds(posX + 2, 0, dimensionX), bounds(posY - 1, 0, dimensionY), bounds(posY + 2, 0, dimensionY)\n filamentNeighborhood = imagePropagatedNodes[yMin:yMax, xMin:xMax].flatten()\n filamentLength = imageFilamentLength[yMin:yMax, xMin:xMax].flatten()\n filamentIntensity = imageFilamentIntensity[yMin:yMax, xMin:xMax].flatten()\n for index, pixel in enumerate(filamentNeighborhood):\n if (pixel != nodeIndex and pixel > 1):\n node1, node2 = np.sort([nodeIndex - 2, pixel - 2])\n nodeDistance = sp.linalg.norm(nodePositions[node1] - nodePositions[node2])\n filamentLengthSum = imageFilamentLength[posY, posX] + filamentLength[index]\n filamentIntensitySum = imageFilamentIntensity[posY, posX] + filamentIntensity[index]\n minimumEdgeWeight = max(1e-9, filamentIntensitySum)\n edgeCapacity = 1.0 * minimumEdgeWeight / filamentLengthSum\n edgeLength = 1.0 * filamentLengthSum / minimumEdgeWeight\n edgeConnectivity = 0\n edgeJump = 0\n graph.add_edge(node1, node2, edist=nodeDistance, fdist=filamentLengthSum, weight=minimumEdgeWeight, capa=edgeCapacity, lgth=edgeLength, conn=edgeConnectivity, jump=edgeJump)\n return(graph, nodePositions)", "def compute_sw_threshold(flanking_reads, paf_dict, fasta_dict, window_size):\n\n max_scores = []\n for query, target in itertools.product(flanking_reads, flanking_reads):\n\n if str(query + target) in paf_dict:\n overlap_info = paf_dict[query+target]\n elif str(target + query) in paf_dict:\n # get info and swap them\n overlap_info = paf_dict[target+query]\n query, target = target, query\n else:\n continue\n\n query_start = overlap_info['query_start']\n query_end = overlap_info['query_end']\n target_start = overlap_info['target_start']\n target_end = overlap_info['target_end']\n\n query_seq = fasta_dict[query][query_start:query_end]\n target_seq = fasta_dict[target][target_start:target_end]\n\n # Get scores for this pair; store in cur_scores\n cur_scores = []\n if window_size:\n # Use rolling window\n min_len = min(len(query_seq), len(target_seq))\n for start, end in utils.pairwise(range(0, min_len, window_size)):\n qs = query_seq[start:end]\n ts = target_seq[start:end]\n score = smith_waterman.smith_waterman(qs, ts)\n cur_scores.append(score)\n\n if cur_scores:\n score = max(cur_scores)\n max_scores.append(score)\n else:\n # No rolling window\n score = smith_waterman.smith_waterman(query_seq, target_seq)\n max_scores.append(score)\n\n threshold = 0.9 * max(max_scores)\n\n print(\"using {} as threshold\".format(threshold))\n\n plt.subplot(2, 3, 2)\n plt.hist(max_scores)\n plt.title(\"FLANKING READS\\nhistogram of num_gaps / len(aligned_sequence)\\nthreshold = {}\\nwindow_size = {}\\nshowing {} scores\"\n .format(threshold, window_size, len(max_scores)))\n\n\n\n return threshold", "def spread_labels(labels,maxdist=9999999):\n #distances,features = morphology.distance_transform_edt(labels==0,return_distances=1,return_indices=1)\n #indexes = features[0]*labels.shape[1]+features[1]\n #spread = labels.ravel()[indexes.ravel()].reshape(*labels.shape)\n if not labels.any():\n return labels\n distances,indexes = cv2.distanceTransformWithLabels(array(labels==0,uint8),cv2.DIST_L2,cv2.DIST_MASK_PRECISE,labelType=cv2.DIST_LABEL_PIXEL)\n spread = labels[where(labels>0)][indexes-1]\n if maxdist is None:\n return spread, distances\n spread *= (distances<maxdist)\n return spread", "def heuristic(self):\r\n # 1.\r\n blacks, whites = 0, 0\r\n weights = [0 for _ in range(6)]\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n user_dir = directions[:2] if self.current_player == 'n' else directions[2:]\r\n for i in range(8):\r\n for j in range(8):\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n if self.matrix[i][j] == self.current_player or self.matrix[i][j] == self.current_player.upper():\r\n\r\n # numarul de piese rege\r\n if self.matrix[i][j] == self.current_player.upper():\r\n weights[1] += 7.75\r\n\r\n # numarul de piese normale\r\n else:\r\n weights[0] += 5\r\n\r\n # numarul de piese de pe baseline in functie de tipul de piesa\r\n # conform strategiilor de joc este o strategie buna sa ai cat mai multe\r\n # piesa pe baseline pentru a preveni creare de piese de tip rege ale adversarului\r\n if self.current_player in ['n', 'N']:\r\n if i == 7:\r\n weights[2] += 4\r\n elif self.current_player in ['a', 'A']:\r\n if i == 0:\r\n weights[2] += 4\r\n\r\n # numarul de piese din mijlocul tablei\r\n # la fel este o strategie buna pentru atac\r\n if 3 <= i <= 4 and 3 <= j <= 4:\r\n weights[3] += 2\r\n\r\n # numar piese vulnerabile\r\n # adica piese ce pot fi capturate de oponent la urmatoare tura\r\n for d in user_dir:\r\n\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n weights[4] -= 3\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n # daca elimin o piesa rege este o mutare mai buna\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[5] += 10\r\n else:\r\n weights[5] += 7\r\n\r\n diff = (blacks - whites) if self.current_player == 'n' else (whites - blacks)\r\n # cand sunt mai putin piese, AI adopta o tactica mai ofensiva\r\n if blacks + whites <= 10:\r\n return sum(weights) + diff\r\n return sum(weights)", "def predict_boosting_example(x, h_ens):\r\n\r\n arr = []\r\n sum_alpha = 0\r\n\r\n for y in h_ens:\r\n # splitting hypothesis, weight pairs\r\n alpha, tree = h_ens[y]\r\n tst_pred = predict_example(x, tree)\r\n # appending prediction\r\n arr.append(tst_pred*alpha)\r\n sum_alpha += alpha\r\n predict_egz = np.sum(arr) / sum_alpha\r\n # weak learner\r\n if predict_egz >= 0.5:\r\n return 1\r\n else:\r\n return 0", "def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')", "def labelled_attachment(gold_trees, pred_trees):\n count_match, count_total = 0, 0\n for gold, pred in zip(gold_trees, pred_trees):\n triples_pairs = zip(\n gold.get_triples(include_root=True),\n pred.get_triples(include_root=True),\n )\n for (g_src, g_trg, g_rel), (p_src, p_trg, p_rel) in triples_pairs:\n assert g_src == p_src\n count_total += 1\n if g_trg == p_trg and g_rel == p_rel:\n count_match += 1\n if count_match == 0 or count_total == 0:\n return 0.0\n else:\n return float(count_match) / count_total", "def cell(x, y):\n try:\n if cells[y][x]['filled'] == 1:\n return # this has already been processed\n except IndexError:\n return\n cells[y][x]['filled'] = 1 # this cell is now filled\n\n nn = []\n for nx, ny in neighbours(x, y):\n try:\n if cells[ny][nx]['filled']:\n nn.append(cells[ny][nx])\n except IndexError:\n continue\n \n c = 0 # colour weighting\n \n #------ Flippedness\n flipped = sum([i['inverted'] for i in nn if i['inverted']])\n cells[y][x]['inverted'] = (randint(0, 3) + flipped) % 4\n \n #------- Colour calculation\n avg_colour = sum([i['colour'][0] for i in nn]) / len(nn)\n avg_sat = sum([i['colour'][1] for i in nn]) / len(nn)\n avg_bri = sum([i['colour'][2] for i in nn]) / len(nn)\n \n # small chance of going totally random otherwise small variation from neighbours\n if random(100) > 90:\n h = randint(0, 100)\n s = randint(0, 100)\n b = randint(0, 100)\n else:\n h = (avg_colour + randint(-15, 15)) % 100\n s = (avg_sat + randint(-15, 15)) % 100\n b = (avg_bri + randint(-15, 15)) % 100\n cells[y][x]['colour'] = (h, s, b)\n \n #------- Alpha calculation\n d = sqrt((x*cell_size - rx)**2 + (y*cell_size - ry)**2) # distance from epicenter\n mx = sqrt((w-rx*cell_size)**2 + (h-ry*cell_size)**2)\n a = d/sqrt(w**2+h**2)*255\n cells[y][x]['alpha'] = a\n \n for cx,cy in neighbours(x, y):\n cell(cx, cy)", "def clusterbatplacement(b, h, cluster, connectedhomes):\n for batt in b:\n b[batt].posx = cluster[batt][0]\n b[batt].posy = cluster[batt][1]\n for house in connectedhomes[batt]:\n b[batt].connected.append(h[house])", "def count_blood_cells(image_path):\n\n # TODO - Prebrojati crvena i bela krvna zrnca i vratiti njihov broj kao povratnu vrednost ove procedure\n \"\"\"\n White cells\n \"\"\"\n # Getting image\n white_cells_img = cv2.imread(image_path)\n gray_img = cv2.cvtColor(white_cells_img, cv2.COLOR_BGR2GRAY)\n\n # Apply median filter for smoothing\n smooth_img_white = cv2.medianBlur(gray_img, 5)\n\n # Morphological operations\n kernel = np.ones((5, 5), np.uint8)\n closing_img = cv2.morphologyEx(smooth_img_white, cv2.MORPH_CLOSE, kernel)\n\n # Adaptive threshold gaussian filter\n threshold_img = cv2.adaptiveThreshold(closing_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv2.THRESH_BINARY, 9, 2)\n\n # Segmentation of white cells\n circles_a = cv2.HoughCircles(threshold_img, cv2.HOUGH_GRADIENT, 1.2, 105,\n param1=50, param2=28, minRadius=2, maxRadius=28)\n\n # Getting count of white cells\n cell_count_a = []\n if circles_a is not None:\n circles_a = np.round(circles_a[0, :]).astype(\"int\")\n for (r) in circles_a:\n cell_count_a.append(r)\n # print(len(cell_count_a))\n white_blood_cell_count = len(cell_count_a)\n\n \"\"\"\n Red cells\n \"\"\"\n # Getting image\n red_cells_img = cv2.imread(image_path)\n\n # Getting red color\n red = [(150, 137, 168), (218, 209, 208)] # (lower), (upper)\n colors = [red]\n\n # Apply median filter for smoothing\n smooth_img_red = cv2.medianBlur(red_cells_img, 3)\n\n cell_count_b = 0\n output = red_cells_img.copy()\n for lower, upper in colors:\n mask = cv2.inRange(smooth_img_red, lower, upper)\n\n # Segmentation of red cells\n circles_b = cv2.HoughCircles(mask, cv2.HOUGH_GRADIENT, 1, 20, param1=15, param2=17,\n minRadius=2, maxRadius=60)\n\n # Getting count of red cells\n if circles_b is not None:\n circles_b = np.round(circles_b[0, :]).astype(\"int\")\n\n for (x, y, r) in circles_b:\n cv2.circle(output, (x, y), r, (255, 0, 255), 2)\n cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (255, 0, 255), -1)\n cell_count_b += 1\n\n # cv2.imwrite('output.png', output)\n # print(cell_count_b)\n red_blood_cell_count = cell_count_b\n\n # TODO - Odrediti da li na osnovu broja krvnih zrnaca pacijent ima leukemiju i vratiti True/False kao povratnu\n # vrednost ove procedure\n\n if (white_blood_cell_count > 2\n or\n white_blood_cell_count >= (red_blood_cell_count / 3)):\n has_leukemia = True\n else:\n has_leukemia = False\n\n return red_blood_cell_count, white_blood_cell_count, has_leukemia", "def fill_holes(img):\n neg = 1 - img\n s = ndimage.generate_binary_structure(3,1) # iterate structure\n labeled_array, numpatches = ndimage.label(neg,s) # labeling\n sizes = ndimage.sum(neg,labeled_array,range(1,numpatches+1)) \n sizes_list = [sizes[i] for i in range(len(sizes))]\n sizes_list.sort()\n max_size = sizes_list[-1]\n max_label = np.where(sizes == max_size)[0] + 1\n component = labeled_array == max_label\n return 1 - component", "def get_hardwired_speed_weights(self):\n \n phase_shift=self.speed_phase_shift\n \n # row 1 has the weights of speed cells to grid cell 1\n self.W_speed_east=np.zeros_like(self.W_ee) \n self.W_speed_west=np.zeros_like(self.W_ee) \n self.W_speed_north=np.zeros_like(self.W_ee) \n self.W_speed_south=np.zeros_like(self.W_ee) \n\n if self.use_eight_directions is True:\n self.W_speed_north_east=np.zeros_like(self.W_ee) \n self.W_speed_north_west=np.zeros_like(self.W_ee) \n self.W_speed_south_east=np.zeros_like(self.W_ee) \n self.W_speed_south_west=np.zeros_like(self.W_ee) \n\n\n for phase_idx,phase in enumerate(self.gp.phases):\n shifted_north_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/2.),self.gp.phases)\n shifted_south_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/2.),self.gp.phases)\n shifted_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(0),self.gp.phases)\n shifted_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi),self.gp.phases)\n\n self.W_speed_north[phase_idx,:]=self.W_ee[shifted_north_phase_idx,:]\n self.W_speed_south[phase_idx,:]=self.W_ee[shifted_south_phase_idx,:]\n self.W_speed_east[phase_idx,:]=self.W_ee[shifted_east_phase_idx,:]\n self.W_speed_west[phase_idx,:]=self.W_ee[shifted_west_phase_idx,:] \n \n if self.use_eight_directions is True:\n shifted_north_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/4),self.gp.phases)\n shifted_north_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi*3/4),self.gp.phases)\n shifted_south_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/4),self.gp.phases)\n shifted_south_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi*3/4),self.gp.phases)\n \n self.W_speed_north_east[phase_idx,:]=self.W_ee[shifted_north_east_phase_idx,:]\n self.W_speed_north_west[phase_idx,:]=self.W_ee[shifted_north_west_phase_idx,:]\n self.W_speed_south_east[phase_idx,:]=self.W_ee[shifted_south_east_phase_idx,:]\n self.W_speed_south_west[phase_idx,:]=self.W_ee[shifted_south_west_phase_idx,:]", "def weights_walls(pc, boundary_indx, n_action, n_neurons):\n # find index on a 40 point circle \n boundary_cells = pc[boundary_indx, :]\n angles = (np.arctan2(boundary_cells[:, 0], boundary_cells[:, 1]) + 2*np.pi) % (2*np.pi)\n \n # find index of boundary cell (in a ring of 40 action neurons)\n thetas = theta_action(n_action)\n diff = np.abs(angles[:, np.newaxis] - thetas[np.newaxis, :])\n indx = np.argmin(diff, axis=1) # the closest match\n\n w_walls = np.ones((n_action, n_neurons))\n \n for ii in np.arange(len(boundary_indx)):\n cell_ind = boundary_indx[ii]\n \n d = collections.deque(np.arange(n_action))\n w_walls[:, cell_ind] = 0 # first set all weights to 0\n d.rotate(n_action-(indx[ii]+ 13)) # 1st permitted action is 13 segments away\n actions_ind = list(itertools.islice(d, 0, 14))\n w_walls[actions_ind, cell_ind] = 1\n \n return w_walls", "def learn_with_bootstrapping(self, sample_count=10000):\n tic = time.clock()\n training_set_size = 150 # TODO: change to 1000, 500 or something\n sample_pool = self.training_stream.extract_training_patches(sample_count, negative_ratio=1.)\n # initialize weights\n weighted_patches = []\n for patch in sample_pool: # weight all patches: training pool P\n weighted_patches.append([patch, 1. / len(sample_pool)])\n # if patch.label == +1:\n # pos_patch = patch # PRESENTATION, REPORT\n # shuffle training pool\n weighted_patches = random_sample_weighted_patches(weighted_patches, len(weighted_patches))\n\n if self.algorithm == 'adaboost': # Shuffle the training data\n training_data = random_sample_weighted_patches(weighted_patches, len(weighted_patches))\n elif self.algorithm == 'wald': # Sample training_set_size samples\n training_data = random_sample_weighted_patches(weighted_patches, training_set_size)\n\n for t in range(self.layers): # choose the weak classifier with the minimum error\n print \"Learn with bootstrapping using %s, layer #%d\" % (self.algorithm.title(), t+1)\n\n if self.algorithm == 'adaboost':\n h_t = self._fetch_best_weak_classifier(weighted_patches)\n elif self.algorithm == 'wald':\n h_t = self._fetch_best_weak_classifier(training_data)\n # h_t.visualize(pos_patch) # PRESENTATION, REPORT\n self.classifiers.append(copy.deepcopy(h_t)) # add it to the strong classifier\n\n if self.algorithm == 'adaboost':\n self.classifiers[-1].update_alpha(weighted_patches)\n weighted_patches = self._adaboost_reweight(weighted_patches, t)\n elif self.algorithm == 'wald':\n kde_n, kde_p, xs_n, xs_p = self._estimate_ratios(training_data, t)\n # find decision thresholds for the strong classifier\n self._tune_thresholds(kde_n, kde_p, xs_n, xs_p, t)\n # throw away training samples that fall in our thresholds\n weighted_patches = self._reweight_and_discard_irrelevant(weighted_patches, t)\n # sample new training data\n training_data = random_sample_weighted_patches(weighted_patches, training_set_size)\n if len(training_data) == 0:\n print \"no more training data!\"\n break\n toc = time.clock()\n print toc - tic\n print self", "def train_label_weigthed(self, train_dataset, validation_dataset, label, lr = 0.02, epochs_num = 100, batch_size = 40, alpha = 0, momentum = 0.9):\n \n def get_proportions(dataset):\n \n positive_label = dataset.labels_tensor[:,label].sum()\n \n negative_label = (1 - dataset.labels_tensor[:,label]).sum()\n \n total_examples = positive_label + negative_label\n \n imbalance = abs(positive_label - 0.5) > 0.4\n \n if imbalance:\n \n if positive_label < negative_label:\n \n w_p = 1\n \n w_n = positive_label / negative_label\n \n else:\n \n w_p = negative_label / positive_label\n \n w_n = 1\n \n else:\n \n w_p = w_n = 1\n \n return w_p, w_n\n \n def get_w(labels, w_p, w_n):\n \n positives = labels\n \n negatives = 1 - labels\n \n w = w_p * positives + w_n * negatives\n \n return w\n \n# positive_label = train_dataset.labels_tensor[:,label].sum()\n \n# negative_label = (1 - train_dataset.labels_tensor[:,label]).sum()\n \n# total_examples = positive_label + negative_label\n \n# print('num examples {}'.format(positive_label + negative_label))\n \n# print('% positive labels: {}'.format(positive_label/total_examples))\n \n# print('% negative labels: {}'.format(negative_label/total_examples))\n \n# imbalance = abs(positive_label - 0.5) > 0.4\n \n# if imbalance:\n \n# if positive_label < negative_label:\n \n# w_p = 1\n \n# w_n = positive_label / negative_label\n \n# else:\n \n# w_p = negative_label / positive_label\n \n# w_n = 1\n \n# else:\n \n# w_p = w_n = 1\n \n# print('w_p: {}'.format(w_p))\n \n# print('w_n: {}'.format(w_n))\n\n w_p, w_n = get_proportions(train_dataset) \n \n label_name = train_dataset.labels.items()[label][0]\n \n print(\"Training label {} ... \".format(label_name))\n \n optimizer = SGD(self.parameters(), lr = lr, weight_decay = alpha, momentum = momentum)\n\n train_losses = []\n\n validation_losses = []\n\n epochs = []\n\n start = time.time()\n\n remaining_time = 0\n\n train_dataloader = DataLoader(train_dataset, batch_size = batch_size, collate_fn = PPD.collate_data)\n \n validation_segments, validation_labels = PPD.collate_data(validation_dataset)\n \n weight_matrix_v = get_w(validation_labels[:,label].unsqueeze(1), w_p, w_n)\n \n criterion_v = nn.BCELoss(weight=weight_matrix_v.float())\n \n print('w_p: {} and w_n: {}'.format(w_p, w_n))\n\n for epoch in range(epochs_num):\n\n for i_batch, sample_batched in enumerate(train_dataloader):\n\n input = sample_batched[0]\n\n target = sample_batched[1][:,label].unsqueeze(1)\n \n weight_matrix = w_p * target + w_n * (1 - target)\n \n criterion = nn.BCELoss(weight=weight_matrix.float())\n\n self.zero_grad()\n\n output = self(input)\n\n train_loss = criterion(output, target.float())\n\n train_loss.backward()\n\n optimizer.step()\n\n validation_loss = criterion_v(self(validation_segments.long()), validation_labels[:,label].unsqueeze(1).float())\n\n end = time.time()\n\n remaining_time = remaining_time * 0.90 + ((end - start) * (epochs_num - epoch + 1) / (epoch + 1)) * 0.1\n\n remaining_time_corrected = remaining_time / (1 - (0.9 ** (epoch + 1)))\n\n epoch_str = \"last epoch finished: \" + str(epoch)\n\n progress_str = \"progress: \" + str((epoch + 1) * 100 / epochs_num) + \"%\"\n\n time_str = \"time: \" + str(remaining_time_corrected / 60) + \" mins\"\n\n sys.stdout.write(\"\\r\" + epoch_str + \" -- \" + progress_str + \" -- \" + time_str)\n\n sys.stdout.flush()\n\n train_losses.append(train_loss.item())\n\n validation_losses.append(validation_loss.item())\n\n epochs.append(epoch)\n\n print(\"\\n\" + \"Training completed. Total training time: \" + str(round((end - start) / 60, 2)) + \" mins\")\n\n return epochs, train_losses, validation_losses", "def joint_bilateral(filename,flash_image,noflash_image,sigma_spatial,sigma_intensity):\n\t# make a simple Gaussian function taking the squared radius\n\tgaussian = lambda r2, sigma: np.exp(-0.5*r2/sigma**2)\n\tflash_image = cv2.cvtColor(flash_image,cv2.COLOR_BGR2RGB)\n\tnoflash_image = cv2.cvtColor(noflash_image,cv2.COLOR_BGR2RGB)\n\n\t# define the window width to be the 2 time the spatial std. dev. to\n\t# be sure that most of the spatial kernel is actually captured\n\twin_width = int(3*sigma_spatial +1)\n\twgt_sum = np.zeros_like(flash_image).astype(np.float64)\n\tresult = np.zeros_like(flash_image).astype(np.float64)\n\tout= np.zeros_like(flash_image).astype(np.float64)\n\t\n\t\n\tfor i in tqdm(range(flash_image.shape[-1]),desc=\"Going through color channels\"):\n\t\tnorm_flash_image = normalize(flash_image[:,:,i])\n\t\tnorm_noflash_image = normalize(noflash_image[:,:,i])\n\t\tfor shft_x in range(-win_width,win_width+1):\n\t\t\tfor shft_y in range(-win_width,win_width+1):\n\t\t\t\t# compute the spatial contribution\n\t\t\t\tspatial = gaussian(shft_x**2+shft_y**2, sigma_spatial )\n\t\n\t\t\t\t# shift by the offsets to get image window\n\t\t\t\twindow = np.roll(norm_flash_image, [shft_y, shft_x], axis=[0,1])\n\t\t\t\twindow1 = np.roll(norm_noflash_image, [shft_y, shft_x], axis=[0,1])\n\t\t\t\t# compute the intensity contribution\n\t\t\t\tcombined_filter = spatial*gaussian((window-norm_flash_image)**2, sigma_intensity )\n\t\n\t\t\t\t# result stores the mult. between combined filter and image window\n\t\t\t\tresult[:,:,i] += window1*combined_filter\n\t\t\t\twgt_sum[:,:,i] += combined_filter\n\tout = normalize(result/wgt_sum)\n\t# normalize the result and return\n\tplt.imsave(\"outputImages/JointBilateral_\"+filename+\"_\"+str(sigma_spatial)+\"_\"+ str(sigma_intensity) + \".png\" ,out,dpi=600)\n\treturn out", "def create_labelled_dataset(self):\n\n print(\"-------------------------------------------------------------------\")\n print(\" How to Use the Pole Hull Label Tool\")\n print(\"-------------------------------------------------------------------\")\n print(\"- If a hull is NOT associated to a pole: press the 1 button\")\n print(\"- If a hull IS associated to a pole: press the 2 button\")\n print(\"\\n- If any other key is pressed, the program EXITS\")\n print(\"-------------------------------------------------------------------\")\n\n detector = gate_detector.GateDetector(im_resize=3.0/4)\n\n imgs = []\n labels = []\n directory = os.path.dirname(os.getcwd())\n \n # Get absolute path of all images in the images folder\n for dirpath,_,filenames in os.walk(os.path.join(directory, 'images', 'gate')):\n for f in filenames:\n imgs.append(os.path.abspath(os.path.join(dirpath, f)))\n\n # Get the hulls from the segmented image and run the display and label program for each image\n for img in imgs:\n src = cv.imread(img, 1)\n pre = detector.preprocess(src)\n seg = detector.segment(pre)\n mor = detector.morphological(seg)\n hulls = detector.create_convex_hulls(seg)\n labels += self.display_and_label_hulls(hulls, pre)\n return labels", "def worker(i):\n res_read = read_dicom(os.path.join(DIR, fnames[i]))\n if res_read is None:\n print(\"failed on {0}, {1}\".format(i, fnames[i]))\n return ' '.join(map(str,[fnames[i]] + [-1]*4 +[-1]*4))\n\n img, spacing = res_read\n R, C = img.shape\n split_point = C/2\n\n right_l = img[:,:split_point]\n left_l = img[:,split_point:]\n\n prop = get_joint_y_proposals(right_l)\n\n # We will store the coordinates of the top left and the bottom right corners of the bounding box\n hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins)\n\n\n # Making proposals for the right leg\n R, C = right_l.shape\n displacements = range(-C//4,1*C//4+1,step)\n best_score = -999999999\n sizepx = int(sizemm/spacing) # Proposal size\n\n for y_coord in prop:\n for x_displ in displacements:\n for scale in scales:\n if C/2+x_displ-R/scale/2 >= 0:\n # Candidate ROI\n roi = np.array([C/2+x_displ-R/scale/2, y_coord-R/scale/2, R/scale, R/scale]).astype(int)\n x1, y1 = roi[0], roi[1]\n x2, y2 = roi[0]+roi[2], roi[1]+roi[3]\n patch = cv2.resize(img[y1:y2,x1:x2],(64, 64))\n\n hog_descr = hog.compute(patch,winStride,padding)\n score = np.inner(w,hog_descr.ravel())+b\n\n if score > best_score:\n jc = np.array([C/2+x_displ, y_coord])\n best_score = score\n\n\n roi_R = np.array([jc[0]-sizepx//2, jc[1]-sizepx//2, jc[0]+sizepx//2, jc[1]+sizepx//2])\n # Making proposals for the left leg\n R, C = left_l.shape\n displacements = range(-C//4,1*C//4+1,step)\n prop = get_joint_y_proposals(left_l)\n best_score = -999999999\n for y_coord in prop:\n for x_displ in displacements:\n for scale in scales:\n if split_point+x_displ+R/scale/2 < img.shape[1]:\n roi = np.array([split_point+C/2+x_displ-R/scale/2, y_coord-R/scale/2, R/scale, R/scale]).astype(int)\n\n x1, y1 = roi[0], roi[1]\n x2, y2 = roi[0]+roi[2], roi[1]+roi[3]\n patch = np.fliplr(cv2.resize(img[y1:y2,x1:x2],(64, 64)))\n\n hog_descr = hog.compute(patch,winStride,padding)\n score = np.inner(w,hog_descr.ravel())+b\n\n if score > best_score:\n jc = np.array([split_point+C/2+x_displ, y_coord])\n best_score = score\n\n roi_L = np.array([jc[0]-sizepx//2, jc[1]-sizepx//2, jc[0]+sizepx//2, jc[1]+sizepx//2])\n\n print(\"Done with {}, {}\".format(i, fnames[i]))\n return ' '.join(map(str,[fnames[i]] + np.round(roi_L).astype(int).tolist() + np.round(roi_R).astype(int).tolist()))", "def kmerNeighbors(text,k):\r\n L=set()\r\n for i in range(0,len(text)-k+1):\r\n for d in range(0,k+1):\r\n L.update(Neighbors(kmer(text,i,k),d))\r\n D=dict()\r\n for l in L:\r\n D[l]=minHamm(text,l)\r\n return D", "def compute_gradient_saliency_maps(samples: torch.tensor,\n true_labels: torch.tensor,\n model: nn.Module):\n \"\"\"INSERT YOUR CODE HERE, overrun return.\"\"\"\n return torch.rand(6, 256, 256)", "def do_a_propagation(self):\n random.seed(self.seeding)\n random.shuffle(self.nodes)\n for node in tqdm(self.nodes):\n neighbors = nx.neighbors(self.graph, node)\n pick = self.make_a_pick(node, neighbors)\n self.labels[node] = pick\n current_label_count = len(set(self.labels.values()))\n if self.label_count == current_label_count:\n self.flag = False\n else:\n self.label_count = current_label_count", "def findHighWeightFeatures(self, label):\n featuresWeights = []\n\n \"*** YOUR CODE HERE ***\"\n\n return featuresWeights", "def get_action(self, state):\n\n \"\"\"\n XXX: DO NOT MODIFY THAT FUNCTION !!!\n Doing so will result in a 0 grade.\n \"\"\"\n\n # XXX : You shouldn't care on what is going on below.\n # Variables are specified in constructor.\n if self.beliefGhostStates is None:\n self.beliefGhostStates = state.getGhostBeliefStates()\n if self.walls is None:\n self.walls = state.getWalls()\n\n # @TODO Put this back to normal\n ret = self.updateAndGetBeliefStates(\n self._computeNoisyPositions(state))\n\n if self.i < 25:\n debug = ret[0]\n self.l.append(np.max(debug))\n self.i += 1\n #if debug == 1: # To Stop as soon as convergence happens\n #self.i = 25\n\n prefix = 'data/' # To indicate path\n\n if self.i == 25:\n\n if os.path.exists(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".txt\")):\n os.remove(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".txt\"))\n\n f = open(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".txt\"), \"a\")\n first = True\n for data in self.l:\n if first:\n first = False\n f.write(str(data))\n else:\n f.write(\",\" + str(data))\n self.i += 1\n f.close()\n print(\"Done\")\n plt.plot(range(1, len(self.l)+1), self.l)\n plt.xlabel('Time step')\n plt.ylabel('Maximum probability')\n plt.title('Bayes Filter')\n plt.axis([0, self.i, 0, 1])\n plt.savefig(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".pdf\"), bbox_inches='tight')\n plt.show()\n\n return ret", "def build_labels():\n l_title = GLabel('Which one is Karel?')\n l_title.font = 'Courier-25'\n l_title.color = 'black'\n window.add(l_title, x=260, y=60)\n l_num = GLabel('19')\n l_num.font = 'Courier-50'\n l_num.color = 'whitesmoke'\n window.add(l_num, x=37, y=242)\n l_skip = GLabel('skip')\n l_skip.font = 'Courier-20'\n l_skip.color = 'whitesmoke'\n window.add(l_skip, x=726, y=152)\n l_ans1 = GLabel('Answers')\n l_ans1.font = 'Courier-20-italic'\n l_ans1.color = 'black'\n window.add(l_ans1, x=698, y=270)\n l_ans2 = GLabel('0')\n l_ans2.font = 'Courier-50-italic'\n l_ans2.color = 'black'\n window.add(l_ans2, x=722, y=252)\n l_game_pin = GLabel('Game PIN: SC101')\n l_game_pin.font = 'Courier-20'\n l_game_pin.color = 'black'\n window.add(l_game_pin, x=20, y=540)\n l_1 = GPolygon()\n l_1.add_vertex((210, 360))\n l_1.add_vertex((197, 380))\n l_1.add_vertex((221, 380))\n l_1.filled = True\n l_1.color = 'whitesmoke'\n l_1.fill_color= 'whitesmoke'\n window.add(l_1)\n l_2_1 = GPolygon()\n l_2_1.add_vertex((210+380, 359))\n l_2_1.add_vertex((198+380, 370))\n l_2_1.add_vertex((221+380, 370))\n l_2_1.filled = True\n l_2_1.fill_color = 'whitesmoke'\n l_2_1.color = 'whitesmoke'\n window.add(l_2_1)\n l_2_2 = GPolygon()\n l_2_2.add_vertex((210+380, 381))\n l_2_2.add_vertex((198+380, 370))\n l_2_2.add_vertex((221+380, 370))\n l_2_2.filled = True\n l_2_2.fill_color = 'whitesmoke'\n l_2_2.color = 'whitesmoke'\n window.add(l_2_2)\n l_3 = GOval(23, 23, x=198, y=450)\n l_3.filled = True\n l_3.fill_color = 'whitesmoke'\n l_3.color = 'whitesmoke'\n window.add(l_3)\n l_4 = GRect(20, 20, x=583, y=450)\n l_4.filled = True\n l_4.fill_color = 'whitesmoke'\n l_4.color = 'whitesmoke'\n window.add(l_4)", "def neighbors(pattern, d):\n\n if d == 0:\n return [pattern]\n if len(pattern) == 1:\n return ['A', 'C', 'G', 'T']\n neighborhood = []\n suffix_pattern = pattern[1:]\n suffix_neighbors = neighbors(suffix_pattern, d)\n for text in suffix_neighbors:\n hdist = compute_hamming_distance(suffix_pattern, text)\n if hdist < d:\n for n in ['A', 'C', 'G', 'T']:\n neighbor = n + text\n neighborhood.append(neighbor)\n else:\n neighbor = pattern[0] + text\n neighborhood.append(neighbor)\n return neighborhood", "def agglo_from_labelmask(\n h5path_in,\n h5path_lv='',\n ratio_threshold=0,\n h5path_out='',\n save_steps=False,\n protective=False,\n ):\n\n # check output paths\n outpaths = {'out': h5path_out}\n status = utils.output_check(outpaths, save_steps, protective)\n if status == \"CANCELLED\":\n return\n\n # open data for reading\n h5file_in, ds_in, elsize, axlab = utils.h5_load(h5path_in)\n h5file_lv, ds_lv, _, _ = utils.h5_load(h5path_lv)\n\n # open data for writing\n h5file_out, ds_out = utils.h5_write(None, ds_in.shape, ds_in.dtype,\n h5path_out,\n element_size_um=elsize,\n axislabels=axlab)\n\n ulabels = np.unique(ds_in)\n maxlabel = np.amax(ulabels)\n print(\"number of labels in watershed: {:d}\".format(maxlabel))\n\n fwmap = np.zeros(maxlabel + 1, dtype='i')\n\n areas_ws = np.bincount(ds_in.ravel())\n\n labelsets = {}\n rp_lw = regionprops(ds_lv, ds_in)\n for prop in rp_lw:\n\n maskedregion = prop.intensity_image[prop.image]\n counts = np.bincount(maskedregion)\n svoxs_in_label = [l for sl in np.argwhere(counts) for l in sl]\n\n ratios_svox_in_label = [float(counts[svox]) / float(areas_ws[svox])\n for svox in svoxs_in_label]\n fwmask = np.greater(ratios_svox_in_label, ratio_threshold)\n labelset = np.array(svoxs_in_label)[fwmask]\n labelsets[prop.label] = set(labelset) - set([0])\n\n basepath = h5path_in.split('.h5/')[0]\n utils.write_labelsets(labelsets, basepath + \"_svoxsets\",\n filetypes=['pickle'])\n\n ds_out[:] = utils.forward_map(np.array(fwmap), ds_in, labelsets)\n\n # close and return\n h5file_in.close()\n h5file_lv.close()\n try:\n h5file_out.close()\n except (ValueError, AttributeError):\n return ds_out", "def binarize(img, s_thres=(170, 255), l_thres=(50, 255), sobel_thres=(30, 80)):\n hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n hls[:, :, 1] = clahe.apply(hls[:, :, 1])\n\n l_image = hls[:, :, 1]\n l_blur = cv2.GaussianBlur(l_image, (0, 0), 9)\n l_image = cv2.addWeighted(l_image, 1, l_blur, -1, 0)\n l_image = cv2.normalize(l_image, np.zeros_like(l_image), 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)\n l_binary = np.zeros_like(l_image)\n l_binary[(l_image >= l_thres[0]) & (l_image <= l_thres[1])] = 1\n\n # Sobel x\n # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # gray = hls[:, :, 1]\n # sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x\n # abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal\n # scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))\n # sxbinary = np.zeros_like(scaled_sobel)\n # sxbinary[(scaled_sobel >= sobel_thres[0]) & (scaled_sobel <= sobel_thres[1])] = 1\n # sxbinary = s_binary\n\n s_channel = hls[:, :, 2]\n s_channel = cv2.normalize(s_channel, np.zeros_like(s_channel), 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thres[0]) & (s_channel <= s_thres[1])] = 1\n\n # Combine the two binary thresholds\n combined_binary = np.zeros_like(s_binary)\n combined_binary[(s_binary == 1) | (l_binary == 1)] = 1\n\n # we filter out the lines with too many active pixels\n combined_binary_rows = combined_binary.sum(1)\n combined_binary[combined_binary_rows > (combined_binary.shape[1] / 2)] = 0\n\n return combined_binary", "def calc_w_inference(g1, inf_g1, g2, inf_g2, consider_label):\n edges_g1 = np.count_nonzero(g1)\n edges_g2 = np.count_nonzero(g2)\n\n overlap_r1 = 0\n overlap_r2 = 0\n n_nodes = len(g1)\n for i in range(n_nodes):\n for j in range(n_nodes):\n if consider_label:\n if (g1[i][j] != NO_REL_SYMBOL and inf_g2[i][j]!= NO_REL_SYMBOL) and (g1[i][j] == inf_g2[i][j]):\n overlap_r1 += 1 # how much g1 recalls \"populated\"-g2\n if (inf_g1[i][j] != NO_REL_SYMBOL and g2[i][j]!= NO_REL_SYMBOL) and (inf_g1[i][j] == g2[i][j]):\n overlap_r2 += 1 # how much g2 recalls \"populated\"-g2\n else:\n if (g1[i][j] != NO_REL_SYMBOL and inf_g2[i][j]!= NO_REL_SYMBOL):\n overlap_r1 += 1\n if (inf_g1[i][j] != NO_REL_SYMBOL and g2[i][j]!= NO_REL_SYMBOL):\n overlap_r2 += 1\n\n r1 = float(overlap_r1) / float(edges_g1)\n r2 = float(overlap_r2) / float(edges_g2)\n return (r1 + r2) / float(2)", "def prefix_beam_search(ctc, labels, blank_index=0, lm=None,k=5,alpha=0.3,beta=5,prune=0.001,end_char='>',return_weights=False):\n assert (ctc.shape[1] == len(labels)), \"ctc size:%d, labels: %d\" % (ctc.shape[1], len(labels))\n assert ctc.shape[0] > 1, \"ctc length: %d was too short\" % ctc.shape[0]\n assert (ctc >= 0).all(), 'ctc output contains negative numbers'\n lm = (lambda l: 1) if lm is None else lm # if no LM is provided, just set to function returning 1\n word_count_re = re.compile(r'\\w+[\\s|>]')\n W = lambda l: word_count_re.findall(l)\n F = ctc.shape[1]\n \n ctc = np.vstack((np.zeros(F), ctc)) # just add an imaginative zero'th step (will make indexing more intuitive)\n T = ctc.shape[0]\n blank_char = labels[blank_index]\n\n # STEP 1: Initiliazation\n O = ''\n Pb, Pnb = defaultdict(Counter), defaultdict(Counter)\n Pb[0][O] = 1\n Pnb[0][O] = 0\n A_prev = [O]\n # END: STEP 1\n\n # STEP 2: Iterations and pruning\n for t in range(1, T):\n pruned_alphabet = [labels[i] for i in np.where(ctc[t] > prune)[0]]\n for l in A_prev:\n \n if len(l) > 0 and l[-1] == end_char:\n Pb[t][l] = Pb[t - 1][l]\n Pnb[t][l] = Pnb[t - 1][l]\n continue \n\n for c in pruned_alphabet:\n c_ix = labels.index(c)\n # END: STEP 2\n \n # STEP 3: “Extending” with a blank\n if c == blank_char:\n Pb[t][l] += ctc[t][blank_index] * (Pb[t - 1][l] + Pnb[t - 1][l])\n # END: STEP 3\n \n # STEP 4: Extending with the end character\n else:\n l_plus = l + c\n if len(l) > 0 and c == l[-1]:\n Pnb[t][l_plus] += ctc[t][c_ix] * Pb[t - 1][l]\n Pnb[t][l] += ctc[t][c_ix] * Pnb[t - 1][l]\n # END: STEP 4\n\n # STEP 5: Extending with any other non-blank character and LM constraints\n elif len(l.replace(' ', '')) > 0 and c in (' ', end_char):\n lm_prob = lm(l_plus.strip(' '+end_char)) ** alpha\n Pnb[t][l_plus] += lm_prob * ctc[t][c_ix] * (Pb[t - 1][l] + Pnb[t - 1][l])\n else:\n Pnb[t][l_plus] += ctc[t][c_ix] * (Pb[t - 1][l] + Pnb[t - 1][l])\n # END: STEP 5\n\n # STEP 6: Make use of discarded prefixes\n if l_plus not in A_prev:\n Pb[t][l_plus] += ctc[t][blank_index] * (Pb[t - 1][l_plus] + Pnb[t - 1][l_plus])\n Pnb[t][l_plus] += ctc[t][c_ix] * Pnb[t - 1][l_plus]\n # END: STEP 6\n\n # STEP 7: Select most probable prefixes\n A_next = Pb[t] + Pnb[t]\n sorter = lambda l: A_next[l] * (len(W(l)) + 1) ** beta\n A_prev = sorted(A_next, key=sorter, reverse=True)[:k]\n # END: STEP 7\n if len(A_prev) ==0:\n A_prev=['']\n if return_weights:\n return A_prev[0],A_next[A_prev[0]] * (len(W(A_prev[0])) + 1) ** beta\n return A_prev[0]\n #For N-best decode, return A_prev[0:N] - not tested yet.", "def label_users(self):\n record_unit = 1000\n print self.friendship_graph.number_of_nodes()\n print self.friendship_graph.number_of_edges()\n\n for num, node in enumerate(self.friendship_graph.nodes()):\n fake_flag = self.determine_spammer_by_percentage(node)\n self.friendship_graph.node[node]['fake'] = fake_flag\n # print self.friendship_graph.node[node]\n if num % record_unit == 0:\n print num\n print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n nx.write_gpickle(self.friendship_graph, \"graph/firendship_new_label%d.pickle\" % num)\n if num != 0:\n os.remove(\"graph/firendship_new_label%d.pickle\" % (num - record_unit))\n\n nx.write_gpickle(self.friendship_graph, \"graph/firendship_0.8fake_%d.pickle\" % num)", "def process_label(self, foreground_labels):\n # Find the unique (nonnegative) foreground_labels, map them to {0, ..., K-1}\n unique_nonnegative_indices = np.unique(foreground_labels)\n mapped_labels = foreground_labels.copy()\n for k in range(unique_nonnegative_indices.shape[0]):\n mapped_labels[foreground_labels == unique_nonnegative_indices[k]] = k\n foreground_labels = mapped_labels\n return foreground_labels", "def find_connected_components(thresh_image):\n\n rows, cols = thresh_image.shape\n\n # First find the connected components of the image\n # num_labels: the number of connected components found in the image\n # labels: a matrix with the labels for each pixel\n # stats: [top_left_x_coord, top_left_y_coord, width, height, area] statistics for each component\n # centroids: [x_coord, y_coord] of the centroid of each component\n num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh_image)\n\n # Do some filtering based on the characteristics of a shuttle\n for i in range(num_labels):\n\n # Filters out shapes that are too long\n size_ratio = stats[i, 2] / stats[i, 3]\n if size_ratio < 0.5 or size_ratio > 2:\n labels[labels == i] = 0\n\n # Filters out shapes that fit the bounding box too well (likely noise) or too bad (sparse structure)\n area_ratio = stats[i, 4] / (stats[i, 2] * stats[i, 3])\n if area_ratio < 0.4 or area_ratio > 0.9:\n labels[labels == i] = 0\n\n # Filters out shapes too small (in proportion to image size)\n if stats[i, 2] < (rows / 60) or stats[i, 3] < (cols / 60):\n labels[labels == i] = 0\n\n return labels", "def learn_pattern_Hebb(self, pattern):\n if pattern.shape != self.shape:\n # TODO: this could be written in a clearer way\n ValueError(\"The pattern shape does not match the network one.\")\n\n pattern_flat = pattern.flatten()\n\n # Convert the bool array to an array with +-1\n pattern_pm = 2*pattern_flat.astype(bool) - 1\n\n # Update adjacency matrix according to Hebb's rule \n adjmatrix_change = np.outer(pattern_pm, pattern_pm).astype(float)\n self.network.adjmatrix = np.average(\n [self.network.adjmatrix, adjmatrix_change], axis=0,\n weights=[self.npatterns, 1])\n\n # Update neighbour lists (isingmodel.Ising method)\n self.update_neighbours()\n\n # Store the pattern in the patterns list\n self.patterns.append(pattern)", "def splitCell(buff,index,ref_label,new_label):\n cell_before = np.copy(buff[:,:,index-1])\n cell_after = np.copy(buff[:,:,index])\n \n mask_after = cell_after ==ref_label\n \n cell_before[np.logical_not(mask_after)] = 0\n \n mask_ref_label = cell_before ==ref_label\n mask_new_label = cell_before==new_label\n \n after_sure_ref = np.logical_and(mask_ref_label,mask_after)\n after_sure_new = np.logical_and(mask_new_label,mask_after)\n after_unsure = np.logical_and(mask_after,np.logical_not(np.logical_or(after_sure_ref,after_sure_new) ) )\n\n xref,yref = np.where(after_sure_ref)\n ref_pts = np.concatenate((xref.reshape(-1,1),yref.reshape(-1,1)),axis=1)\n xnew,ynew = np.where(after_sure_new)\n new_pts = np.concatenate((xnew.reshape(-1,1),ynew.reshape(-1,1)),axis=1)\n \n labels_ref = np.ones(xref.shape[0])\n labels_new = np.zeros(xnew.shape[0])\n labels = np.concatenate((labels_ref,labels_new),axis=0)\n labels.reshape(-1,1)\n X= np.concatenate((ref_pts,new_pts),axis = 0)\n \n xu,yu = np.where(after_unsure)\n u_pts = np.concatenate((xu.reshape(-1,1),yu.reshape(-1,1)),axis=1)\n neigh = KNeighborsClassifier(n_neighbors=5)\n neigh.fit(X, labels)\n pred = neigh.predict(u_pts)\n for i in range(pred.shape[0]):\n #if pred is 1 goes to ref if 0 goes to new\n if pred[i]==1:\n after_sure_ref[u_pts[i,0],u_pts[i,1]]=True\n else:\n after_sure_new[u_pts[i,0],u_pts[i,1]]=True\n #Assigning the new values to the thing:\n buff[after_sure_ref,index] = ref_label\n buff[after_sure_new,index] = new_label" ]
[ "0.7240329", "0.65720975", "0.62097526", "0.6137392", "0.6120207", "0.5910127", "0.5895485", "0.585058", "0.58383656", "0.5824214", "0.57871556", "0.5696335", "0.5649472", "0.5627702", "0.5602631", "0.5593537", "0.5549893", "0.55149436", "0.5510126", "0.54956084", "0.54879755", "0.5461544", "0.54063094", "0.54040754", "0.539537", "0.53839386", "0.53625154", "0.53600675", "0.5355649", "0.535127", "0.53449005", "0.534269", "0.5324569", "0.5324493", "0.5313296", "0.5293137", "0.5285607", "0.5284158", "0.5276383", "0.5254221", "0.5252661", "0.52357966", "0.5230905", "0.5229232", "0.5226448", "0.5224193", "0.5216202", "0.5204197", "0.5202104", "0.5196458", "0.5193162", "0.5180036", "0.51765734", "0.5166866", "0.51629066", "0.51618814", "0.51582855", "0.51477516", "0.51439613", "0.51412123", "0.5140584", "0.51358086", "0.5135741", "0.5126402", "0.512575", "0.51190895", "0.5109461", "0.51092434", "0.510354", "0.5097495", "0.50957406", "0.50925905", "0.5088258", "0.5087214", "0.5085658", "0.50842154", "0.5077548", "0.50769097", "0.5076529", "0.50757176", "0.5072657", "0.50665426", "0.50583917", "0.5056687", "0.5056079", "0.5051099", "0.5050393", "0.5050317", "0.5049864", "0.5044294", "0.5044216", "0.5040614", "0.50345165", "0.50326306", "0.503155", "0.50280255", "0.5026652", "0.5026211", "0.5021653", "0.50191706" ]
0.51125866
66