signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def __parse_structures():
filename = get_file('<STR_LIT>')<EOL>with io.open(filename, '<STR_LIT:r>', encoding='<STR_LIT>') as textfile:<EOL><INDENT>next(textfile)<EOL>for line in textfile:<EOL><INDENT>tokens = line.strip().split('<STR_LIT:U+002C>')<EOL>if len(tokens) == <NUM_LIT:5>:<EOL><INDENT>if tokens[<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>__INCHI_KEYS[int(tokens[<NUM_LIT:1>])] =Structure(tokens[<NUM_LIT:2>],<EOL>Structure.InChIKey,<EOL>int(tokens[<NUM_LIT:4>][<NUM_LIT:0>]))<EOL><DEDENT>elif tokens[<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>__SMILES[int(tokens[<NUM_LIT:1>])] =Structure(tokens[<NUM_LIT:2>],<EOL>Structure.SMILES,<EOL>int(tokens[<NUM_LIT:4>][<NUM_LIT:0>]))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
COMMENT
f599:m43
def __get_default_structure_ids():
if len(__DEFAULT_STRUCTURE_IDS) == <NUM_LIT:0>:<EOL><INDENT>filename = get_file('<STR_LIT>')<EOL>with io.open(filename, '<STR_LIT:r>', encoding='<STR_LIT>') as textfile:<EOL><INDENT>next(textfile)<EOL>for line in textfile:<EOL><INDENT>tokens = line.strip().split('<STR_LIT:\t>')<EOL>__DEFAULT_STRUCTURE_IDS.append(int(tokens[<NUM_LIT:1>]))<EOL><DEDENT><DEDENT><DEDENT>return __DEFAULT_STRUCTURE_IDS<EOL>
COMMENT
f599:m44
def get_file(filename):
destination = __DOWNLOAD_PARAMS['<STR_LIT:path>']<EOL>filepath = os.path.join(destination, filename)<EOL>if not __is_current(filepath):<EOL><INDENT>if not os.path.exists(destination):<EOL><INDENT>os.makedirs(destination)<EOL><DEDENT>url = '<STR_LIT>' +'<STR_LIT>'<EOL>urlretrieve(urlparse.urljoin(url, filename), filepath)<EOL>urlcleanup()<EOL><DEDENT>if filepath.endswith('<STR_LIT>'):<EOL><INDENT>zfile = zipfile.ZipFile(filepath, '<STR_LIT:r>')<EOL>filepath = os.path.join(destination, zfile.namelist()[<NUM_LIT:0>])<EOL>zfile.extractall(destination)<EOL><DEDENT>elif filepath.endswith('<STR_LIT>'):<EOL><INDENT>unzipped_filepath = filepath[:-len('<STR_LIT>')]<EOL>if os.path.exists(unzipped_filepath)and __is_current(unzipped_filepath):<EOL><INDENT>filepath = unzipped_filepath<EOL><DEDENT>else:<EOL><INDENT>input_file = gzip.open(filepath, '<STR_LIT:rb>')<EOL>filepath = os.path.join(destination, input_file.name[:-len('<STR_LIT>')])<EOL>output_file = open(filepath, '<STR_LIT:wb>')<EOL>for line in input_file:<EOL><INDENT>output_file.write(line)<EOL><DEDENT>input_file.close()<EOL>output_file.close()<EOL><DEDENT><DEDENT>return filepath<EOL>
Downloads filename from ChEBI FTP site
f599:m45
def __is_current(filepath):
if not __DOWNLOAD_PARAMS['<STR_LIT>']:<EOL><INDENT>return True<EOL><DEDENT>if not os.path.isfile(filepath):<EOL><INDENT>return False<EOL><DEDENT>return datetime.datetime.utcfromtimestamp(os.path.getmtime(filepath))> __get_last_update_time()<EOL>
Checks whether file is current
f599:m46
def __get_last_update_time():
now = datetime.datetime.utcnow()<EOL>first_tuesday = __get_first_tuesday(now)<EOL>if first_tuesday < now:<EOL><INDENT>return first_tuesday<EOL><DEDENT>else:<EOL><INDENT>first_of_month = datetime.datetime(now.year, now.month, <NUM_LIT:1>)<EOL>last_month = first_of_month + datetime.timedelta(days=-<NUM_LIT:1>)<EOL>return __get_first_tuesday(last_month)<EOL><DEDENT>
Returns last FTP site update time
f599:m47
def __get_first_tuesday(this_date):
month_range = calendar.monthrange(this_date.year, this_date.month)<EOL>first_of_month = datetime.datetime(this_date.year, this_date.month, <NUM_LIT:1>)<EOL>first_tuesday_day = (calendar.TUESDAY - month_range[<NUM_LIT:0>]) % <NUM_LIT:7><EOL>first_tuesday = first_of_month + datetime.timedelta(days=first_tuesday_day)<EOL>return first_tuesday<EOL>
Get the first Tuesday of the month
f599:m48
def get_structure(self):
return self.__structure<EOL>
Returns structure
f600:c0:m1
def get_type(self):
return self.__typ<EOL>
Returns type
f600:c0:m2
def get_dimension(self):
return self.__dimension<EOL>
Returns dimension
f600:c0:m3
def _read_mol_file(chebi_id):
directory = os.path.dirname(os.path.realpath(__file__))<EOL>textfile_read = open(directory + '<STR_LIT>' + str(chebi_id) + '<STR_LIT>', '<STR_LIT:r>')<EOL>mol_file_read = textfile_read.read()<EOL>textfile_read.close()<EOL>return mol_file_read<EOL>
COMMENT
f601:m0
def setUp(self):
self.__existing = ChebiEntity('<STR_LIT>')<EOL>self.__secondary = ChebiEntity('<STR_LIT>')<EOL>
COMMENT
f601:c1:m0
def __get_mol_file(self, read_id, retrieved_id):
mol_read = _read_mol_file(read_id)<EOL>this_chebi_entity = ChebiEntity(str(retrieved_id))<EOL>textfile_retrieved = open(this_chebi_entity.get_mol_filename(), '<STR_LIT:r>')<EOL>mol_retrieved = textfile_retrieved.read()<EOL>textfile_retrieved.close()<EOL>self.assertEquals(mol_read, mol_retrieved)<EOL>
COMMENT
f601:c1:m51
def __get_mol_id(self, chebi_id):
directory = os.path.dirname(os.path.realpath(__file__))<EOL>textfile_read = open(directory + '<STR_LIT>' + str(chebi_id) +<EOL>'<STR_LIT>', '<STR_LIT:r>')<EOL>mol_read = textfile_read.read()<EOL>this_structure = Structure(mol_read, Structure.mol, <NUM_LIT:2>)<EOL>self.assertEquals(this_structure, parsers.get_mol(chebi_id))<EOL>
COMMENT
f601:c12:m16
def get_formula(self):
return self.__formula<EOL>
Returns formula
f603:c0:m1
def get_source(self):
return self.__source<EOL>
Returns source
f603:c0:m2
def search(term, exact=False, rows=<NUM_LIT>):
url = '<STR_LIT>' +'<STR_LIT>' + str(exact) + '<STR_LIT>' + term +'<STR_LIT>' + str(int(rows))<EOL>response = requests.get(url)<EOL>data = response.json()<EOL>return [ChebiEntity(doc['<STR_LIT>']) for doc in data['<STR_LIT>']['<STR_LIT>']]<EOL>
Searches ChEBI via ols.
f604:m0
def get_reference_id(self):
return self.__reference_id<EOL>
Returns reference_id
f606:c0:m1
def get__reference_db_name(self):
return self.__reference_db_name<EOL>
Returns _reference_db_name
f606:c0:m2
def get_location_in_ref(self):
return self.__location_in_ref<EOL>
Returns location_in_ref
f606:c0:m3
def get_reference_name(self):
return self.__reference_name<EOL>
Returns reference_name
f606:c0:m4
def get_type(self):
return self.__typ<EOL>
Returns type
f607:c0:m1
def get_target_chebi_id(self):
return '<STR_LIT>' + str(self.__target_chebi_id)<EOL>
Returns target_chebi_id
f607:c0:m2
def __get_status(self):
return self.__status<EOL>
Returns status
f607:c0:m3
def main():
chebi_entity = ChebiEntity(<NUM_LIT>)<EOL>print(chebi_entity.get_name())<EOL>for outgoing in chebi_entity.get_outgoings():<EOL><INDENT>target_chebi_entity = ChebiEntity(outgoing.get_target_chebi_id())<EOL>print(outgoing.get_type() + '<STR_LIT:\t>' + target_chebi_entity.get_name())<EOL><DEDENT>
Example code, showing the instantiation of a ChebiEntity, a call to get_name(), get_outgoings() and the calling of a number of methods of the returned Relation objects.
f608:m0
def get_id(self):
return '<STR_LIT>' + str(self.__chebi_id)<EOL>
Returns id
f608:c1:m1
def get_parent_id(self):
parent_id = parsers.get_parent_id(self.__chebi_id)<EOL>return None if math.isnan(parent_id) else '<STR_LIT>' + str(parent_id)<EOL>
Returns parent id
f608:c1:m2
def get_formulae(self):
return parsers.get_all_formulae(self.__get_all_ids())<EOL>
Returns formulae
f608:c1:m3
def get_formula(self):
formulae = self.get_formulae()<EOL>return None if len(formulae) == <NUM_LIT:0> else formulae[<NUM_LIT:0>].get_formula()<EOL>
Returns formula
f608:c1:m4
def get_mass(self):
mass = parsers.get_mass(self.__chebi_id)<EOL>if math.isnan(mass):<EOL><INDENT>mass = parsers.get_mass(self.get_parent_id())<EOL><DEDENT>if math.isnan(mass):<EOL><INDENT>for parent_or_child_id in self.__get_all_ids():<EOL><INDENT>mass = parsers.get_mass(parent_or_child_id)<EOL>if not math.isnan(mass):<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return mass<EOL>
Returns mass
f608:c1:m5
def get_charge(self):
charge = parsers.get_charge(self.__chebi_id)<EOL>if math.isnan(charge):<EOL><INDENT>charge = parsers.get_charge(self.get_parent_id())<EOL><DEDENT>if math.isnan(charge):<EOL><INDENT>for parent_or_child_id in self.__get_all_ids():<EOL><INDENT>charge = parsers.get_charge(parent_or_child_id)<EOL>if not math.isnan(charge):<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return charge<EOL>
Returns charge
f608:c1:m6
def get_comments(self):
return parsers.get_all_comments(self.__get_all_ids())<EOL>
Returns comments
f608:c1:m7
def get_source(self):
return parsers.get_source(self.__chebi_id)<EOL>
Returns source
f608:c1:m8
def get_name(self):
name = parsers.get_name(self.__chebi_id)<EOL>if name is None:<EOL><INDENT>name = parsers.get_name(self.get_parent_id())<EOL><DEDENT>if name is None:<EOL><INDENT>for parent_or_child_id in self.__get_all_ids():<EOL><INDENT>name = parsers.get_name(parent_or_child_id)<EOL>if name is not None:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return name<EOL>
Returns name
f608:c1:m9
def get_definition(self):
definition = parsers.get_definition(self.__chebi_id)<EOL>if definition is None:<EOL><INDENT>definition = parsers.get_definition(self.get_parent_id())<EOL><DEDENT>if definition is None:<EOL><INDENT>for parent_or_child_id in self.__get_all_ids():<EOL><INDENT>definition = parsers.get_definition(parent_or_child_id)<EOL>if definition is not None:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return definition<EOL>
Returns definition
f608:c1:m10
def get_modified_on(self):
return parsers.get_all_modified_on(self.__get_all_ids())<EOL>
Returns modified on
f608:c1:m11
def get_created_by(self):
created_by = parsers.get_created_by(self.__chebi_id)<EOL>if created_by is None:<EOL><INDENT>created_by = parsers.get_created_by(self.get_parent_id())<EOL><DEDENT>if created_by is None:<EOL><INDENT>for parent_or_child_id in self.__get_all_ids():<EOL><INDENT>created_by = parsers.get_created_by(parent_or_child_id)<EOL>if created_by is not None:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return created_by<EOL>
Returns created by
f608:c1:m12
def get_star(self):
return parsers.get_star(self.__chebi_id)<EOL>
Returns star
f608:c1:m13
def get_database_accessions(self):
return parsers.get_all_database_accessions(self.__get_all_ids())<EOL>
Returns database accessions
f608:c1:m14
def get_inchi(self):
inchi = parsers.get_inchi(self.__chebi_id)<EOL>if inchi is None:<EOL><INDENT>inchi = parsers.get_inchi(self.get_parent_id())<EOL><DEDENT>if inchi is None:<EOL><INDENT>for parent_or_child_id in self.__get_all_ids():<EOL><INDENT>inchi = parsers.get_inchi(parent_or_child_id)<EOL>if inchi is not None:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return inchi<EOL>
Returns inchi
f608:c1:m15
def get_inchi_key(self):
structure = parsers.get_inchi_key(self.__chebi_id)<EOL>if structure is None:<EOL><INDENT>structure = parsers.get_inchi_key(self.get_parent_id())<EOL><DEDENT>if structure is None:<EOL><INDENT>for parent_or_child_id in self.__get_all_ids():<EOL><INDENT>structure = parsers.get_inchi_key(parent_or_child_id)<EOL>if structure is not None:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return None if structure is None else structure.get_structure()<EOL>
Returns inchi key
f608:c1:m16
def get_smiles(self):
structure = parsers.get_smiles(self.__chebi_id)<EOL>if structure is None:<EOL><INDENT>structure = parsers.get_smiles(self.get_parent_id())<EOL><DEDENT>if structure is None:<EOL><INDENT>for parent_or_child_id in self.__get_all_ids():<EOL><INDENT>structure = parsers.get_smiles(parent_or_child_id)<EOL>if structure is not None:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return None if structure is None else structure.get_structure()<EOL>
Returns smiles
f608:c1:m17
def get_mol(self):
structure = parsers.get_mol(self.__chebi_id)<EOL>if structure is None:<EOL><INDENT>structure = parsers.get_mol(self.get_parent_id())<EOL><DEDENT>if structure is None:<EOL><INDENT>for parent_or_child_id in self.__get_all_ids():<EOL><INDENT>structure = parsers.get_mol(parent_or_child_id)<EOL>if structure is not None:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return None if structure is None else structure.get_structure()<EOL>
Returns mol
f608:c1:m18
def get_mol_filename(self):
mol_filename = parsers.get_mol_filename(self.__chebi_id)<EOL>if mol_filename is None:<EOL><INDENT>mol_filename = parsers.get_mol_filename(self.get_parent_id())<EOL><DEDENT>if mol_filename is None:<EOL><INDENT>for parent_or_child_id in self.__get_all_ids():<EOL><INDENT>mol_filename =parsers.get_mol_filename(parent_or_child_id)<EOL>if mol_filename is not None:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return mol_filename<EOL>
Returns mol filename
f608:c1:m19
def get_names(self):
return parsers.get_all_names(self.__get_all_ids())<EOL>
Returns names
f608:c1:m20
def get_references(self):
return parsers.get_references(self.__get_all_ids())<EOL>
Returns references
f608:c1:m21
def get_compound_origins(self):
return parsers.get_all_compound_origins(self.__get_all_ids())<EOL>
Returns compound origins
f608:c1:m22
def get_outgoings(self):
return parsers.get_all_outgoings(self.__get_all_ids())<EOL>
Returns outgoings
f608:c1:m23
def get_incomings(self):
return parsers.get_all_incomings(self.__get_all_ids())<EOL>
Returns incomings
f608:c1:m24
def __get_status(self):
return parsers.get_status(self.__chebi_id)<EOL>
Returns status
f608:c1:m25
def __get_all_ids(self):
if self.__all_ids is None:<EOL><INDENT>parent_id = parsers.get_parent_id(self.__chebi_id)<EOL>self.__all_ids = parsers.get_all_ids(self.__chebi_id<EOL>if math.isnan(parent_id)<EOL>else parent_id)<EOL>if self.__all_ids is None:<EOL><INDENT>self.__all_ids = []<EOL><DEDENT><DEDENT>return self.__all_ids<EOL>
Returns all ids
f608:c1:m26
def _get_settings_class():
if not hasattr(django_settings, "<STR_LIT>"):<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise ImproperlyConfigured(msg)<EOL><DEDENT>cls = django_settings.AUTH_ADFS.get('<STR_LIT>', DEFAULT_SETTINGS_CLASS)<EOL>return import_string(cls)<EOL>
Get the AUTH_ADFS setting from the Django settings.
f617:m0
def build_authorization_endpoint(self, request, disable_sso=None):
self.load_config()<EOL>redirect_to = request.GET.get(REDIRECT_FIELD_NAME, None)<EOL>if not redirect_to:<EOL><INDENT>redirect_to = django_settings.LOGIN_REDIRECT_URL<EOL><DEDENT>redirect_to = base64.urlsafe_b64encode(redirect_to.encode()).decode()<EOL>query = QueryDict(mutable=True)<EOL>query.update({<EOL>"<STR_LIT>": "<STR_LIT:code>",<EOL>"<STR_LIT>": settings.CLIENT_ID,<EOL>"<STR_LIT>": settings.RELYING_PARTY_ID,<EOL>"<STR_LIT>": self.redirect_uri(request),<EOL>"<STR_LIT:state>": redirect_to,<EOL>})<EOL>if self._mode == "<STR_LIT>":<EOL><INDENT>query["<STR_LIT>"] = "<STR_LIT>"<EOL>if (disable_sso is None and settings.DISABLE_SSO) or disable_sso is True:<EOL><INDENT>query["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT><DEDENT>return "<STR_LIT>".format(self.authorization_endpoint, query.urlencode())<EOL>
This function returns the ADFS authorization URL. Args: request(django.http.request.HttpRequest): A django Request object disable_sso(bool): Whether to disable single sign-on and force the ADFS server to show a login prompt. Returns: str: The redirect URI
f617:c2:m6
def build_end_session_endpoint(self):
self.load_config()<EOL>return self.end_session_endpoint<EOL>
This function returns the ADFS end session URL to log a user out. Returns: str: The redirect URI
f617:c2:m7
def authenticate(self, request):
auth = get_authorization_header(request).split()<EOL>if not auth or auth[<NUM_LIT:0>].lower() != b'<STR_LIT>':<EOL><INDENT>return None<EOL><DEDENT>if len(auth) == <NUM_LIT:1>:<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise exceptions.AuthenticationFailed(msg)<EOL><DEDENT>elif len(auth) > <NUM_LIT:2>:<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise exceptions.AuthenticationFailed(msg)<EOL><DEDENT>user = authenticate(access_token=auth[<NUM_LIT:1>])<EOL>if user is None:<EOL><INDENT>raise exceptions.AuthenticationFailed('<STR_LIT>')<EOL><DEDENT>if not user.is_active:<EOL><INDENT>raise exceptions.AuthenticationFailed('<STR_LIT>')<EOL><DEDENT>return user, auth[<NUM_LIT:1>]<EOL>
Returns a `User` if a correct access token has been supplied in the Authorization header. Otherwise returns `None`.
f619:c0:m0
def get(self, request):
code = request.GET.get("<STR_LIT:code>")<EOL>if not code:<EOL><INDENT>return render(request, '<STR_LIT>', {<EOL>'<STR_LIT>': "<STR_LIT>",<EOL>}, status=<NUM_LIT>)<EOL><DEDENT>redirect_to = request.GET.get("<STR_LIT:state>")<EOL>user = authenticate(request=request, authorization_code=code)<EOL>if user is not None:<EOL><INDENT>if user.is_active:<EOL><INDENT>login(request, user)<EOL>if redirect_to:<EOL><INDENT>redirect_to = base64.urlsafe_b64decode(redirect_to.encode()).decode()<EOL><DEDENT>else:<EOL><INDENT>redirect_to = django_settings.LOGIN_REDIRECT_URL<EOL><DEDENT>url_is_safe = is_safe_url(<EOL>url=redirect_to,<EOL>allowed_hosts=[request.get_host()],<EOL>require_https=request.is_secure(),<EOL>)<EOL>redirect_to = redirect_to if url_is_safe else '<STR_LIT:/>'<EOL>return redirect(redirect_to)<EOL><DEDENT>else:<EOL><INDENT>return render(request, '<STR_LIT>', {<EOL>'<STR_LIT>': "<STR_LIT>",<EOL>}, status=<NUM_LIT>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return render(request, '<STR_LIT>', {<EOL>'<STR_LIT>': "<STR_LIT>",<EOL>}, status=<NUM_LIT>)<EOL><DEDENT>
Handles the redirect from ADFS to our site. We try to process the passed authorization code and login the user. Args: request (django.http.request.HttpRequest): A Django Request object
f620:c0:m0
def get(self, request):
return redirect(provider_config.build_authorization_endpoint(request))<EOL>
Initiates the OAuth2 flow and redirect the user agent to ADFS Args: request (django.http.request.HttpRequest): A Django Request object
f620:c1:m0
def get(self, request):
return redirect(provider_config.build_authorization_endpoint(request, disable_sso=True))<EOL>
Initiates the OAuth2 flow and redirect the user agent to ADFS Args: request (django.http.request.HttpRequest): A Django Request object
f620:c2:m0
def get(self, request):
logout(request)<EOL>return redirect(provider_config.build_end_session_endpoint())<EOL>
Logs out the user from both Django and ADFS Args: request (django.http.request.HttpRequest): A Django Request object
f620:c3:m0
def create_user(self, claims):
<EOL>username_claim = settings.USERNAME_CLAIM<EOL>usermodel = get_user_model()<EOL>user, created = usermodel.objects.get_or_create(**{<EOL>usermodel.USERNAME_FIELD: claims[username_claim]<EOL>})<EOL>if created or not user.password:<EOL><INDENT>user.set_unusable_password()<EOL>logger.debug("<STR_LIT>".format(claims[username_claim]))<EOL><DEDENT>return user<EOL>
Create the user if it doesn't exist yet Args: claims (dict): claims from the access token Returns: django.contrib.auth.models.User: A Django user
f625:c0:m3
def update_user_attributes(self, user, claims):
required_fields = [field.name for field in user._meta.fields if field.blank is False]<EOL>for field, claim in settings.CLAIM_MAPPING.items():<EOL><INDENT>if hasattr(user, field):<EOL><INDENT>if claim in claims:<EOL><INDENT>setattr(user, field, claims[claim])<EOL>logger.debug("<STR_LIT>".format(field, user, claims[claim]))<EOL><DEDENT>else:<EOL><INDENT>if field in required_fields:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise ImproperlyConfigured(msg.format(claim))<EOL><DEDENT>else:<EOL><INDENT>msg = "<STR_LIT>""<STR_LIT>".format(claim, field, user)<EOL>logger.warning(msg)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise ImproperlyConfigured(msg.format(field))<EOL><DEDENT><DEDENT>
Updates user attributes based on the CLAIM_MAPPING setting. Args: user (django.contrib.auth.models.User): User model instance claims (dict): claims from the access token
f625:c0:m4
def update_user_groups(self, user, claims):
if settings.GROUPS_CLAIM is not None:<EOL><INDENT>django_groups = [group.name for group in user.groups.all()]<EOL>if settings.GROUPS_CLAIM in claims:<EOL><INDENT>claim_groups = claims[settings.GROUPS_CLAIM]<EOL>if not isinstance(claim_groups, list):<EOL><INDENT>claim_groups = [claim_groups, ]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.debug(<EOL>"<STR_LIT>".format(settings.GROUPS_CLAIM))<EOL>claim_groups = []<EOL><DEDENT>groups_to_remove = set(django_groups) - set(claim_groups)<EOL>groups_to_add = set(claim_groups) - set(django_groups)<EOL>for group_name in groups_to_remove:<EOL><INDENT>group = Group.objects.get(name=group_name)<EOL>user.groups.remove(group)<EOL>logger.debug("<STR_LIT>".format(group_name))<EOL><DEDENT>for group_name in groups_to_add:<EOL><INDENT>try:<EOL><INDENT>if settings.MIRROR_GROUPS:<EOL><INDENT>group, _ = Group.objects.get_or_create(name=group_name)<EOL>logger.debug("<STR_LIT>".format(group_name))<EOL><DEDENT>else:<EOL><INDENT>group = Group.objects.get(name=group_name)<EOL><DEDENT>user.groups.add(group)<EOL>logger.debug("<STR_LIT>".format(group_name))<EOL><DEDENT>except ObjectDoesNotExist:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>
Updates user group memberships based on the GROUPS_CLAIM setting. Args: user (django.contrib.auth.models.User): User model instance claims (dict): Claims from the access token
f625:c0:m5
def update_user_flags(self, user, claims):
if settings.GROUPS_CLAIM is not None:<EOL><INDENT>if settings.GROUPS_CLAIM in claims:<EOL><INDENT>access_token_groups = claims[settings.GROUPS_CLAIM]<EOL>if not isinstance(access_token_groups, list):<EOL><INDENT>access_token_groups = [access_token_groups, ]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.debug("<STR_LIT>")<EOL>access_token_groups = []<EOL><DEDENT>for flag, group in settings.GROUP_TO_FLAG_MAPPING.items():<EOL><INDENT>if hasattr(user, flag):<EOL><INDENT>if group in access_token_groups:<EOL><INDENT>value = True<EOL><DEDENT>else:<EOL><INDENT>value = False<EOL><DEDENT>setattr(user, flag, value)<EOL>logger.debug("<STR_LIT>".format(user, flag, value))<EOL><DEDENT>else:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise ImproperlyConfigured(msg.format(flag))<EOL><DEDENT><DEDENT><DEDENT>for field, claim in settings.BOOLEAN_CLAIM_MAPPING.items():<EOL><INDENT>if hasattr(user, field):<EOL><INDENT>bool_val = False<EOL>if claim in claims and str(claims[claim]).lower() in ['<STR_LIT:y>', '<STR_LIT:yes>', '<STR_LIT:t>', '<STR_LIT:true>', '<STR_LIT>', '<STR_LIT:1>']:<EOL><INDENT>bool_val = True<EOL><DEDENT>setattr(user, field, bool_val)<EOL>logger.debug('<STR_LIT>'.format(user, field, bool_val))<EOL><DEDENT>else:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise ImproperlyConfigured(msg.format(field))<EOL><DEDENT><DEDENT>
Updates user boolean attributes based on the BOOLEAN_CLAIM_MAPPING setting. Args: user (django.contrib.auth.models.User): User model instance claims (dict): Claims from the access token
f625:c0:m6
def get_queryset(self):
return Question.objects.filter(<EOL>pub_date__lte=timezone.now()<EOL>).order_by('<STR_LIT>')[:<NUM_LIT:5>]<EOL>
Return the last five published questions (not including those set to be published in the future).
f628:c0:m0
def get_queryset(self):
return Question.objects.filter(pub_date__lte=timezone.now())<EOL>
Excludes any questions that aren't published yet.
f628:c1:m0
def get_queryset(self):
return Question.objects.filter(pub_date__lte=timezone.now())<EOL>
Excludes any questions that aren't published yet.
f628:c2:m0
@action(methods=["<STR_LIT>"], detail=True, permission_classes=[IsAuthenticated])<EOL><INDENT>def vote(self, request, pk=None):<DEDENT>
choice = self.get_object()<EOL>choice.vote()<EOL>serializer = self.get_serializer(choice)<EOL>return Response(serializer.data)<EOL>
post: A description of the post method on the custom action.
f631:c1:m0
def __cmp__(self, other):
if hasattr(other, '<STR_LIT>'):<EOL><INDENT>return cmp(self.content_type.lower(), other.content_type.lower())<EOL><DEDENT>elif isinstance(other, basestring):<EOL><INDENT>return cmp(self.simplified, self.simplify(str(other)))<EOL><DEDENT>else:<EOL><INDENT>return cmp(self.content_type.lower(), other.lower())<EOL><DEDENT>
Compares the MIME::Type against the exact content type or the simplified type (the simplified type will be used if comparing against something that can be treated as a String). In comparisons, this is done against the lowercase version of the MIME::Type.
f649:c1:m3
def __eq__(self, other):
return isinstance(other, self.__class__) and cmp(self, other) == <NUM_LIT:0><EOL>
Returns +true+ if the other object is a MIME::Type and the content types match.
f649:c1:m6
def priority_compare(self, other):
pc = cmp(self.simplified, other.simplified)<EOL>if pc is <NUM_LIT:0>:<EOL><INDENT>if self.is_registered != other.is_registered:<EOL><INDENT>pc = -<NUM_LIT:1> if self.is_registered else <NUM_LIT:1><EOL><DEDENT>elif self.platform != other.platform:<EOL><INDENT>pc = <NUM_LIT:1> if self.platform else -<NUM_LIT:1><EOL><DEDENT>elif self.is_complete != other.is_complete:<EOL><INDENT>pc = -<NUM_LIT:1> if self.is_complete else <NUM_LIT:1><EOL><DEDENT>elif self.is_obsolete != other.is_obsolete:<EOL><INDENT>pc = <NUM_LIT:1> if self.is_obsolete else -<NUM_LIT:1><EOL><DEDENT>if pc is <NUM_LIT:0> and self.is_obsolete and (self.use_instead != other.use_instead):<EOL><INDENT>if self.use_instead is None:<EOL><INDENT>pc = -<NUM_LIT:1><EOL><DEDENT>elif other.use_instead is None:<EOL><INDENT>pc = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>pc = cmp(self.use_instead, other.use_instead)<EOL><DEDENT><DEDENT><DEDENT>return pc<EOL>
Compares the MIME::Type based on how reliable it is before doing a normal <=> comparison. Used by MIME::Types#[] to sort types. The comparisons involved are: 1. self.simplified <=> other.simplified (ensures that we don't try to compare different types) 2. IANA-registered definitions < other definitions. 3. Generic definitions < platform definitions. 3. Complete definitions < incomplete definitions. 4. Current definitions < obsolete definitions. 5. Obselete with use-instead references < obsolete without. 6. Obsolete use-instead definitions are compared.
f649:c1:m8
@property<EOL><INDENT>def urls(self):<DEDENT>
def _url(el):<EOL><INDENT>if el == '<STR_LIT>':<EOL><INDENT>return IANA_URL % (self.media_type, self.sub_type)<EOL><DEDENT>elif el == '<STR_LIT>':<EOL><INDENT>return LTSW_URL % self.media_type<EOL><DEDENT>match = re.compile('<STR_LIT>').match(el)<EOL>if match:<EOL><INDENT>return match.group(<NUM_LIT:1>, <NUM_LIT:2>)<EOL><DEDENT>match = re.compile('<STR_LIT>').match(el)<EOL>if match:<EOL><INDENT>return [match.group(<NUM_LIT:1>), CONTACT_URL % match.group(<NUM_LIT:2>)]<EOL><DEDENT>for regex in REGEX_URLS:<EOL><INDENT>match = re.compile(regex).match(el)<EOL>if match:<EOL><INDENT>return REGEX_URLS[regex] % match.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>return el<EOL><DEDENT>return map(_url, self.url)<EOL>
The decoded URL list for this MIME::Type. The special URL value IANA will be translated into: http://www.iana.org/assignments/media-types/<mediatype>/<subtype> The special URL value RFC### will be translated into: http://www.rfc-editor.org/rfc/rfc###.txt The special URL value DRAFT:name will be translated into: https://datatracker.ietf.org/public/idindex.cgi? command=id_detail&filename=<name> The special URL value LTSW will be translated into: http://www.ltsw.se/knbase/internet/<mediatype>.htp The special URL value [token] will be translated into: http://www.iana.org/assignments/contact-people.htm#<token> These values will be accessible through #urls, which always returns an array.
f649:c1:m16
@classmethod<EOL><INDENT>def simplify(cls, content_type):<DEDENT>
matchdata = MEDIA_TYPE_RE.match(content_type)<EOL>if matchdata is None:<EOL><INDENT>return None<EOL><DEDENT>wrap = lambda s: re.sub(UNREG_RE, '<STR_LIT>', s.lower())<EOL>(media_type, subtype) = matchdata.groups()<EOL>return '<STR_LIT>' % (wrap(media_type), wrap(subtype))<EOL>
The MIME types main- and sub-label can both start with <tt>x-</tt>, which indicates that it is a non-registered name. Of course, after registration this flag can disappear, adds to the confusing proliferation of MIME types. The simplified string has the <tt>x-</tt> removed and are translated to lowercase.
f649:c1:m31
@classmethod<EOL><INDENT>def from_array(cls, content_type,<EOL>extensions=[], encoding=None, system=None,<EOL>is_obsolete=False, docs=None, url=None, is_registered=False):<DEDENT>
mt = cls(content_type)<EOL>mt.extensions = extensions<EOL>mt.encoding = encoding<EOL>mt.system = system<EOL>mt.is_obsolete = is_obsolete<EOL>mt.docs = docs<EOL>mt.url = url<EOL>mt.registered = is_registered<EOL>return mt<EOL>
Creates a MIME::Type from an array in the form of: [type-name, [extensions], encoding, system] +extensions+, +encoding+, and +system+ are optional. Type.from_array("application/x-ruby", ['rb'], '8bit') # Type.from_array(["application/x-ruby", ['rb'], '8bit']) These are equivalent to: type = Type('application/x-ruby') type.extensions = ['rb'] type.encoding = '8bit'
f649:c1:m32
@classmethod<EOL><INDENT>def from_hash(cls, hash):<DEDENT>
wrap_key = lambda k: k.lower().replace('<STR_LIT:->', '<STR_LIT:_>')<EOL>type_hash = dict([(wrap_key(k), v) for k, v in hash.items()])<EOL>mt = cls(type_hash['<STR_LIT>'])<EOL>mt.extensions = type_hash.get('<STR_LIT>', [])<EOL>mt.encoding = type_hash.get('<STR_LIT>', '<STR_LIT:default>')<EOL>mt.system = type_hash.get('<STR_LIT>')<EOL>mt.is_obsolete = type_hash.get('<STR_LIT>', False)<EOL>mt.docs = type_hash.get('<STR_LIT>')<EOL>mt.url = type_hash.get('<STR_LIT:url>')<EOL>mt.registered = type_hash.get('<STR_LIT>', False)<EOL>return mt<EOL>
Creates a MIME::Type from a hash. Keys are case-insensitive, dashes may be replaced with underscores, and the internal Symbol of the lowercase-underscore version can be used as well. That is, Content-Type can be provided as content-type, Content_Type, content_type, or :content_type. Known keys are <tt>Content-Type</tt>, <tt>Content-Transfer-Encoding</tt>, <tt>Extensions</tt>, and <tt>System</tt>. Type.from_hash({'Content-Type': 'text/x-yaml', 'Content-Transfer-Encoding': '8bit', 'System': 'linux', 'Extensions': ['yaml', 'yml']}) This is equivalent to: t = Type.new('text/x-yaml') t.encoding = '8bit' t.system = 'linux' t.extensions = ['yaml', 'yml']
f649:c1:m33
@classmethod<EOL><INDENT>def from_mime_type(cls, mime_type):<DEDENT>
mt = cls(deepcopy(mime_type.content_type))<EOL>mt.extensions = map(deepcopy, mime_type.extensions)<EOL>mt.url = mime_type.url and map(deepcopy, mime_type.url) or None<EOL>mt.system = deepcopy(mime_type.system)<EOL>mt.encoding = deepcopy(mime_type.encoding)<EOL>mt.docs = deepcopy(mime_type.docs)<EOL>mt.is_obsolete = mime_type.is_obsolete<EOL>mt.registered = mime_type.is_registered<EOL>return mt<EOL>
Essentially a copy constructor. Type.from_mime_type(plaintext) is equivalent to: t = Type.new(plaintext.content_type.dup) t.extensions = plaintext.extensions.dup t.system = plaintext.system.dup t.encoding = plaintext.encoding.dup
f649:c1:m34
def _predict(distribution, offset, kernel):
N = len(distribution)<EOL>kN = len(kernel)<EOL>width = int((kN - <NUM_LIT:1>) / <NUM_LIT:2>)<EOL>prior = np.zeros(N)<EOL>for i in range(N):<EOL><INDENT>for k in range (kN):<EOL><INDENT>index = (i + (width-k) - offset) % N<EOL>prior[i] += distribution[index] * kernel[k]<EOL><DEDENT><DEDENT>return prior<EOL>
explicit convolution with wraparound
f654:m0
def normalize(pdf):
pdf /= sum(np.asarray(pdf, dtype=float))<EOL>return pdf<EOL>
Normalize distribution `pdf` in-place so it sums to 1.0. Returns pdf for convienence, so you can write things like: >>> kernel = normalize(randn(7)) Parameters ---------- pdf : ndarray discrete distribution that needs to be converted to a pdf. Converted in-place, i.e., this is modified. Returns ------- pdf : ndarray The converted pdf.
f656:m0
def update(likelihood, prior):
posterior = prior * likelihood<EOL>return normalize(posterior)<EOL>
Computes the posterior of a discrete random variable given a discrete likelihood and prior. In a typical application the likelihood will be the likelihood of a measurement matching your current environment, and the prior comes from discrete_bayes.predict(). Parameters ---------- likelihood : ndarray, dtype=flaot array of likelihood values prior : ndarray, dtype=flaot prior pdf. Returns ------- posterior : ndarray, dtype=float Returns array representing the posterior. Examples -------- .. code-block:: Python # self driving car. Sensor returns values that can be equated to positions # on the road. A real likelihood compuation would be much more complicated # than this example. likelihood = np.ones(len(road)) likelihood[road==z] *= scale_factor prior = predict(posterior, velocity, kernel) posterior = update(likelihood, prior)
f656:m1
def predict(pdf, offset, kernel, mode='<STR_LIT>', cval=<NUM_LIT:0.>):
if mode == '<STR_LIT>':<EOL><INDENT>return convolve(np.roll(pdf, offset), kernel, mode='<STR_LIT>')<EOL><DEDENT>return convolve(shift(pdf, offset, cval=cval), kernel,<EOL>cval=cval, mode='<STR_LIT>')<EOL>
Performs the discrete Bayes filter prediction step, generating the prior. `pdf` is a discrete probability distribution expressing our initial belief. `offset` is an integer specifying how much we want to move to the right (negative values means move to the left) We assume there is some noise in that offset, which we express in `kernel`. For example, if offset=3 and kernel=[.1, .7., .2], that means we think there is a 70% chance of moving right by 3, a 10% chance of moving 2 spaces, and a 20% chance of moving by 4. It returns the resulting distribution. If `mode='wrap'`, then the probability distribution is wrapped around the array. If `mode='constant'`, or any other value the pdf is shifted, with `cval` used to fill in missing elements. Examples -------- .. code-block:: Python belief = [.05, .05, .05, .05, .55, .05, .05, .05, .05, .05] prior = predict(belief, offset=2, kernel=[.1, .8, .1])
f656:m2
def update(self, z):
if z is None:<EOL><INDENT>return<EOL><DEDENT>I = self._I<EOL>gamma = self.gamma<EOL>Q = self.Q<EOL>H = self.H<EOL>P = self.P<EOL>x = self.x<EOL>V_inv = self._V_inv<EOL>F = self.F<EOL>W = self.W<EOL>HTVI = dot(H.T, V_inv)<EOL>L = linalg.inv(I - gamma * dot(Q, P) + dot(HTVI, H).dot(P))<EOL>PL = dot(P, L)<EOL>K = dot(F, PL).dot(HTVI)<EOL>self.y = z - dot(H, x)<EOL>self.x = self.x + dot(K, self.y)<EOL>self.P = dot(F, PL).dot(F.T) + W<EOL>self.P = (self.P + self.P.T) / <NUM_LIT:2><EOL>try:<EOL><INDENT>self.z = np.copy(z)<EOL><DEDENT>except:<EOL><INDENT>self.z = copy.deepcopy(z)<EOL><DEDENT>
Add a new measurement `z` to the H-Infinity filter. If `z` is None, nothing is changed. Parameters ---------- z : ndarray measurement for this update.
f659:c0:m1
def predict(self, u=<NUM_LIT:0>):
<EOL>self.x = dot(self.F, self.x) + dot(self.B, u)<EOL>
Predict next position. Parameters ---------- u : ndarray Optional control vector. If non-zero, it is multiplied by `B` to create the control input into the system.
f659:c0:m2
def batch_filter(self, Zs,update_first=False, saver=False):
n = np.size(Zs, <NUM_LIT:0>)<EOL>means = zeros((n, self.dim_x, <NUM_LIT:1>))<EOL>covariances = zeros((n, self.dim_x, self.dim_x))<EOL>if update_first:<EOL><INDENT>for i, z in enumerate(Zs):<EOL><INDENT>self.update(z)<EOL>means[i, :] = self.x<EOL>covariances[i, :, :] = self.P<EOL>self.predict()<EOL>if saver is not None:<EOL><INDENT>saver.save()<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for i, z in enumerate(Zs):<EOL><INDENT>self.predict()<EOL>self.update(z)<EOL>means[i, :] = self.x<EOL>covariances[i, :, :] = self.P<EOL>if saver is not None:<EOL><INDENT>saver.save()<EOL><DEDENT><DEDENT><DEDENT>return (means, covariances)<EOL>
Batch processes a sequences of measurements. Parameters ---------- Zs : list-like list of measurements at each time step `self.dt` Missing measurements must be represented by 'None'. update_first : bool, default=False, optional, controls whether the order of operations is update followed by predict, or predict followed by update. saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- means: ndarray ((n, dim_x, 1)) array of the state for each time step. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance: ndarray((n, dim_x, dim_x)) array of the covariances for each time step. In other words `covariance[k, :, :]` is the covariance at step `k`.
f659:c0:m3
def get_prediction(self, u=<NUM_LIT:0>):
return dot(self.F, self.x) + dot(self.B, u)<EOL>
Predicts the next state of the filter and returns it. Does not alter the state of the filter. Parameters ---------- u : ndarray optional control input Returns ------- x : ndarray State vector of the prediction.
f659:c0:m4
def residual_of(self, z):
return z - dot(self.H, self.x)<EOL>
returns the residual for the given measurement (z). Does not alter the state of the filter.
f659:c0:m5
def measurement_of_state(self, x):
return dot(self.H, x)<EOL>
Helper function that converts a state into a measurement. Parameters ---------- x : ndarray H-Infinity state vector Returns ------- z : ndarray measurement corresponding to the given state
f659:c0:m6
@property<EOL><INDENT>def V(self):<DEDENT>
return self._V<EOL>
measurement noise matrix
f659:c0:m7
@V.setter<EOL><INDENT>def V(self, value):<DEDENT>
if np.isscalar(value):<EOL><INDENT>self._V = np.array([[value]], dtype=float)<EOL><DEDENT>else:<EOL><INDENT>self._V = value<EOL><DEDENT>self._V_inv = linalg.inv(self._V)<EOL>
measurement noise matrix
f659:c0:m8
def residual_resample(weights):
N = len(weights)<EOL>indexes = np.zeros(N, '<STR_LIT:i>')<EOL>num_copies = (np.floor(N*np.asarray(weights))).astype(int)<EOL>k = <NUM_LIT:0><EOL>for i in range(N):<EOL><INDENT>for _ in range(num_copies[i]): <EOL><INDENT>indexes[k] = i<EOL>k += <NUM_LIT:1><EOL><DEDENT><DEDENT>residual = weights - num_copies <EOL>residual /= sum(residual) <EOL>cumulative_sum = np.cumsum(residual)<EOL>cumulative_sum[-<NUM_LIT:1>] = <NUM_LIT:1.> <EOL>indexes[k:N] = np.searchsorted(cumulative_sum, random(N-k))<EOL>return indexes<EOL>
Performs the residual resampling algorithm used by particle filters. Based on observation that we don't need to use random numbers to select most of the weights. Take int(N*w^i) samples of each particle i, and then resample any remaining using a standard resampling algorithm [1] Parameters ---------- weights : list-like of float list of weights as floats Returns ------- indexes : ndarray of ints array of indexes into the weights defining the resample. i.e. the index of the zeroth resample is indexes[0], etc. References ---------- .. [1] J. S. Liu and R. Chen. Sequential Monte Carlo methods for dynamic systems. Journal of the American Statistical Association, 93(443):1032–1044, 1998.
f661:m0
def stratified_resample(weights):
N = len(weights)<EOL>positions = (random(N) + range(N)) / N<EOL>indexes = np.zeros(N, '<STR_LIT:i>')<EOL>cumulative_sum = np.cumsum(weights)<EOL>i, j = <NUM_LIT:0>, <NUM_LIT:0><EOL>while i < N:<EOL><INDENT>if positions[i] < cumulative_sum[j]:<EOL><INDENT>indexes[i] = j<EOL>i += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>j += <NUM_LIT:1><EOL><DEDENT><DEDENT>return indexes<EOL>
Performs the stratified resampling algorithm used by particle filters. This algorithms aims to make selections relatively uniformly across the particles. It divides the cumulative sum of the weights into N equal divisions, and then selects one particle randomly from each division. This guarantees that each sample is between 0 and 2/N apart. Parameters ---------- weights : list-like of float list of weights as floats Returns ------- indexes : ndarray of ints array of indexes into the weights defining the resample. i.e. the index of the zeroth resample is indexes[0], etc.
f661:m1
def systematic_resample(weights):
N = len(weights)<EOL>positions = (random() + np.arange(N)) / N<EOL>indexes = np.zeros(N, '<STR_LIT:i>')<EOL>cumulative_sum = np.cumsum(weights)<EOL>i, j = <NUM_LIT:0>, <NUM_LIT:0><EOL>while i < N:<EOL><INDENT>if positions[i] < cumulative_sum[j]:<EOL><INDENT>indexes[i] = j<EOL>i += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>j += <NUM_LIT:1><EOL><DEDENT><DEDENT>return indexes<EOL>
Performs the systemic resampling algorithm used by particle filters. This algorithm separates the sample space into N divisions. A single random offset is used to to choose where to sample from for all divisions. This guarantees that every sample is exactly 1/N apart. Parameters ---------- weights : list-like of float list of weights as floats Returns ------- indexes : ndarray of ints array of indexes into the weights defining the resample. i.e. the index of the zeroth resample is indexes[0], etc.
f661:m2
def multinomial_resample(weights):
cumulative_sum = np.cumsum(weights)<EOL>cumulative_sum[-<NUM_LIT:1>] = <NUM_LIT:1.> <EOL>return np.searchsorted(cumulative_sum, random(len(weights)))<EOL>
This is the naive form of roulette sampling where we compute the cumulative sum of the weights and then use binary search to select the resampled point based on a uniformly distributed random number. Run time is O(n log n). You do not want to use this algorithm in practice; for some reason it is popular in blogs and online courses so I included it for reference. Parameters ---------- weights : list-like of float list of weights as floats Returns ------- indexes : ndarray of ints array of indexes into the weights defining the resample. i.e. the index of the zeroth resample is indexes[0], etc.
f661:m3
def predict(self, u=<NUM_LIT:0>):
for f in self.filters:<EOL><INDENT>f.predict(u)<EOL><DEDENT>self.x_prior = self.x.copy()<EOL>self.P_prior = self.P.copy()<EOL>
Predict next position using the Kalman filter state propagation equations for each filter in the bank. Parameters ---------- u : np.array Optional control vector. If non-zero, it is multiplied by B to create the control input into the system.
f662:c0:m1
def update(self, z, R=None, H=None):
if H is None:<EOL><INDENT>H = self.H<EOL><DEDENT>for i, f in enumerate(self.filters):<EOL><INDENT>f.update(z, R, H)<EOL>self.p[i] *= f.likelihood<EOL><DEDENT>self.p /= sum(self.p) <EOL>self.P = np.zeros(self.filters[<NUM_LIT:0>].P.shape)<EOL>is_row_vector = (self.filters[<NUM_LIT:0>].x.ndim == <NUM_LIT:1>)<EOL>if is_row_vector:<EOL><INDENT>self.x = np.zeros(self.dim_x)<EOL>for f, p in zip(self.filters, self.p):<EOL><INDENT>self.x += np.dot(f.x, p)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.x = np.zeros((self.dim_x, <NUM_LIT:1>))<EOL>for f, p in zip(self.filters, self.p):<EOL><INDENT>self.x = np.zeros((self.dim_x, <NUM_LIT:1>))<EOL>self.x += np.dot(f.x, p)<EOL><DEDENT><DEDENT>for x, f, p in zip(self.x, self.filters, self.p):<EOL><INDENT>y = f.x - x<EOL>self.P += p*(np.outer(y, y) + f.P)<EOL><DEDENT>self.z = deepcopy(z)<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>
Add a new measurement (z) to the Kalman filter. If z is None, nothing is changed. Parameters ---------- z : np.array measurement for this update. R : np.array, scalar, or None Optionally provide R to override the measurement noise for this one call, otherwise self.R will be used. H : np.array, or None Optionally provide H to override the measurement function for this one call, otherwise self.H will be used.
f662:c0:m2
def num_sigmas(self):
return <NUM_LIT:2>*self.n + <NUM_LIT:1><EOL>
Number of sigma points for each variable in the state x
f663:c0:m1
def sigma_points(self, x, P):
if self.n != np.size(x):<EOL><INDENT>raise ValueError("<STR_LIT>".format(<EOL>self.n, np.size(x)))<EOL><DEDENT>n = self.n<EOL>if np.isscalar(x):<EOL><INDENT>x = np.asarray([x])<EOL><DEDENT>if np.isscalar(P):<EOL><INDENT>P = np.eye(n)*P<EOL><DEDENT>else:<EOL><INDENT>P = np.atleast_2d(P)<EOL><DEDENT>lambda_ = self.alpha**<NUM_LIT:2> * (n + self.kappa) - n<EOL>U = self.sqrt((lambda_ + n)*P)<EOL>sigmas = np.zeros((<NUM_LIT:2>*n+<NUM_LIT:1>, n))<EOL>sigmas[<NUM_LIT:0>] = x<EOL>for k in range(n):<EOL><INDENT>sigmas[k+<NUM_LIT:1>] = self.subtract(x, -U[k])<EOL>sigmas[n+k+<NUM_LIT:1>] = self.subtract(x, U[k])<EOL><DEDENT>return sigmas<EOL>
Computes the sigma points for an unscented Kalman filter given the mean (x) and covariance(P) of the filter. Returns tuple of the sigma points and weights. Works with both scalar and array inputs: sigma_points (5, 9, 2) # mean 5, covariance 9 sigma_points ([5, 2], 9*eye(2), 2) # means 5 and 2, covariance 9I Parameters ---------- x : An array-like object of the means of length n Can be a scalar if 1D. examples: 1, [1,2], np.array([1,2]) P : scalar, or np.array Covariance of the filter. If scalar, is treated as eye(n)*P. Returns ------- sigmas : np.array, of size (n, 2n+1) Two dimensional array of sigma points. Each column contains all of the sigmas for one dimension in the problem space. Ordered by Xi_0, Xi_{1..n}, Xi_{n+1..2n}
f663:c0:m2
def _compute_weights(self):
n = self.n<EOL>lambda_ = self.alpha**<NUM_LIT:2> * (n +self.kappa) - n<EOL>c = <NUM_LIT> / (n + lambda_)<EOL>self.Wc = np.full(<NUM_LIT:2>*n + <NUM_LIT:1>, c)<EOL>self.Wm = np.full(<NUM_LIT:2>*n + <NUM_LIT:1>, c)<EOL>self.Wc[<NUM_LIT:0>] = lambda_ / (n + lambda_) + (<NUM_LIT:1> - self.alpha**<NUM_LIT:2> + self.beta)<EOL>self.Wm[<NUM_LIT:0>] = lambda_ / (n + lambda_)<EOL>
Computes the weights for the scaled unscented Kalman filter.
f663:c0:m3
def num_sigmas(self):
return <NUM_LIT:2>*self.n + <NUM_LIT:1><EOL>
Number of sigma points for each variable in the state x
f663:c1:m1
def sigma_points(self, x, P):
if self.n != np.size(x):<EOL><INDENT>raise ValueError("<STR_LIT>".format(<EOL>self.n, np.size(x)))<EOL><DEDENT>n = self.n<EOL>if np.isscalar(x):<EOL><INDENT>x = np.asarray([x])<EOL><DEDENT>n = np.size(x) <EOL>if np.isscalar(P):<EOL><INDENT>P = np.eye(n) * P<EOL><DEDENT>else:<EOL><INDENT>P = np.atleast_2d(P)<EOL><DEDENT>sigmas = np.zeros((<NUM_LIT:2>*n+<NUM_LIT:1>, n))<EOL>U = self.sqrt((n + self.kappa) * P)<EOL>sigmas[<NUM_LIT:0>] = x<EOL>for k in range(n):<EOL><INDENT>sigmas[k+<NUM_LIT:1>] = self.subtract(x, -U[k])<EOL>sigmas[n+k+<NUM_LIT:1>] = self.subtract(x, U[k])<EOL><DEDENT>return sigmas<EOL>
r""" Computes the sigma points for an unscented Kalman filter given the mean (x) and covariance(P) of the filter. kappa is an arbitrary constant. Returns sigma points. Works with both scalar and array inputs: sigma_points (5, 9, 2) # mean 5, covariance 9 sigma_points ([5, 2], 9*eye(2), 2) # means 5 and 2, covariance 9I Parameters ---------- x : array-like object of the means of length n Can be a scalar if 1D. examples: 1, [1,2], np.array([1,2]) P : scalar, or np.array Covariance of the filter. If scalar, is treated as eye(n)*P. kappa : float Scaling factor. Returns ------- sigmas : np.array, of size (n, 2n+1) 2D array of sigma points :math:`\chi`. Each column contains all of the sigmas for one dimension in the problem space. They are ordered as: .. math:: :nowrap: \begin{eqnarray} \chi[0] = &x \\ \chi[1..n] = &x + [\sqrt{(n+\kappa)P}]_k \\ \chi[n+1..2n] = &x - [\sqrt{(n+\kappa)P}]_k \end{eqnarray}
f663:c1:m2
def _compute_weights(self):
n = self.n<EOL>k = self.kappa<EOL>self.Wm = np.full(<NUM_LIT:2>*n+<NUM_LIT:1>, <NUM_LIT> / (n + k))<EOL>self.Wm[<NUM_LIT:0>] = k / (n+k)<EOL>self.Wc = self.Wm<EOL>
Computes the weights for the unscented Kalman filter. In this formulation the weights for the mean and covariance are the same.
f663:c1:m3
def num_sigmas(self):
return self.n + <NUM_LIT:1><EOL>
Number of sigma points for each variable in the state x
f663:c2:m1
def sigma_points(self, x, P):
if self.n != np.size(x):<EOL><INDENT>raise ValueError("<STR_LIT>".format(<EOL>self.n, np.size(x)))<EOL><DEDENT>n = self.n<EOL>if np.isscalar(x):<EOL><INDENT>x = np.asarray([x])<EOL><DEDENT>x = x.reshape(-<NUM_LIT:1>, <NUM_LIT:1>)<EOL>if np.isscalar(P):<EOL><INDENT>P = np.eye(n) * P<EOL><DEDENT>else:<EOL><INDENT>P = np.atleast_2d(P)<EOL><DEDENT>U = self.sqrt(P)<EOL>lambda_ = n / (n + <NUM_LIT:1>)<EOL>Istar = np.array([[-<NUM_LIT:1>/np.sqrt(<NUM_LIT:2>*lambda_), <NUM_LIT:1>/np.sqrt(<NUM_LIT:2>*lambda_)]])<EOL>for d in range(<NUM_LIT:2>, n+<NUM_LIT:1>):<EOL><INDENT>row = np.ones((<NUM_LIT:1>, Istar.shape[<NUM_LIT:1>] + <NUM_LIT:1>)) * <NUM_LIT:1.> / np.sqrt(lambda_*d*(d + <NUM_LIT:1>))<EOL>row[<NUM_LIT:0>, -<NUM_LIT:1>] = -d / np.sqrt(lambda_ * d * (d + <NUM_LIT:1>))<EOL>Istar = np.r_[np.c_[Istar, np.zeros((Istar.shape[<NUM_LIT:0>]))], row]<EOL><DEDENT>I = np.sqrt(n)*Istar<EOL>scaled_unitary = U.dot(I)<EOL>sigmas = self.subtract(x, -scaled_unitary)<EOL>return sigmas.T<EOL>
Computes the implex sigma points for an unscented Kalman filter given the mean (x) and covariance(P) of the filter. Returns tuple of the sigma points and weights. Works with both scalar and array inputs: sigma_points (5, 9, 2) # mean 5, covariance 9 sigma_points ([5, 2], 9*eye(2), 2) # means 5 and 2, covariance 9I Parameters ---------- x : An array-like object of the means of length n Can be a scalar if 1D. examples: 1, [1,2], np.array([1,2]) P : scalar, or np.array Covariance of the filter. If scalar, is treated as eye(n)*P. Returns ------- sigmas : np.array, of size (n, n+1) Two dimensional array of sigma points. Each column contains all of the sigmas for one dimension in the problem space. Ordered by Xi_0, Xi_{1..n}
f663:c2:m2