signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def _compute_weights(self):
n = self.n<EOL>c = <NUM_LIT:1.> / (n + <NUM_LIT:1>)<EOL>self.Wm = np.full(n + <NUM_LIT:1>, c)<EOL>self.Wc = self.Wm<EOL>
Computes the weights for the scaled unscented Kalman filter.
f663:c2:m3
def spherical_radial_sigmas(x, P):
n, _ = P.shape<EOL>x = x.flatten()<EOL>sigmas = np.empty((<NUM_LIT:2>*n, n))<EOL>U = cholesky(P) * sqrt(n)<EOL>for k in range(n):<EOL><INDENT>sigmas[k] = x + U[k]<EOL>sigmas[n+k] = x - U[k]<EOL><DEDENT>return sigmas<EOL>
r""" Creates cubature points for the the specified state and covariance according to [1]. Parameters ---------- x: ndarray (column vector) examples: np.array([[1.], [2.]]) P : scalar, or np.array Covariance of the filter. References ---------- .. [1] Arasaratnam, I, Haykin, S. "Cubature Kalman Filters," IEEE Transactions on Automatic Control, 2009, pp 1254-1269, vol 54, No 6
f664:m0
def ckf_transform(Xs, Q):
m, n = Xs.shape<EOL>x = sum(Xs, <NUM_LIT:0>)[:, None] / m<EOL>P = np.zeros((n, n))<EOL>xf = x.flatten()<EOL>for k in range(m):<EOL><INDENT>P += np.outer(Xs[k], Xs[k]) - np.outer(xf, xf)<EOL><DEDENT>P *= <NUM_LIT:1> / m<EOL>P += Q<EOL>return x, P<EOL>
Compute mean and covariance of array of cubature points. Parameters ---------- Xs : ndarray Cubature points Q : ndarray Noise covariance Returns ------- mean : ndarray mean of the cubature points variance: ndarray covariance matrix of the cubature points
f664:m1
def predict(self, dt=None, fx_args=()):
if dt is None:<EOL><INDENT>dt = self._dt<EOL><DEDENT>if not isinstance(fx_args, tuple):<EOL><INDENT>fx_args = (fx_args,)<EOL><DEDENT>sigmas = spherical_radial_sigmas(self.x, self.P)<EOL>for k in range(self._num_sigmas):<EOL><INDENT>self.sigmas_f[k] = self.fx(sigmas[k], dt, *fx_args)<EOL><DEDENT>self.x, self.P = ckf_transform(self.sigmas_f, self.Q)<EOL>self.x_prior = self.x.copy()<EOL>self.P_prior = self.P.copy()<EOL>
r""" Performs the predict step of the CKF. On return, self.x and self.P contain the predicted state (x) and covariance (P). Important: this MUST be called before update() is called for the first time. Parameters ---------- dt : double, optional If specified, the time step to be used for this prediction. self._dt is used if this is not provided. fx_args : tuple, optional, default (,) optional arguments to be passed into fx() after the required state variable.
f664:c0:m1
def update(self, z, R=None, hx_args=()):
if z is None:<EOL><INDENT>self.z = np.array([[None]*self.dim_z]).T<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>return<EOL><DEDENT>if not isinstance(hx_args, tuple):<EOL><INDENT>hx_args = (hx_args,)<EOL><DEDENT>if R is None:<EOL><INDENT>R = self.R<EOL><DEDENT>elif isscalar(R):<EOL><INDENT>R = eye(self.dim_z) * R<EOL><DEDENT>for k in range(self._num_sigmas):<EOL><INDENT>self.sigmas_h[k] = self.hx(self.sigmas_f[k], *hx_args)<EOL><DEDENT>zp, self.S = ckf_transform(self.sigmas_h, R)<EOL>self.SI = inv(self.S)<EOL>m = self._num_sigmas <EOL>xf = self.x.flatten()<EOL>zpf = zp.flatten()<EOL>Pxz = outer_product_sum(self.sigmas_f - xf, self.sigmas_h - zpf) / m<EOL>self.K = dot(Pxz, self.SI) <EOL>self.y = self.residual_z(z, zp) <EOL>self.x = self.x + dot(self.K, self.y)<EOL>self.P = self.P - dot(self.K, self.S).dot(self.K.T)<EOL>self.z = deepcopy(z)<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>self._log_likelihood = None<EOL>self._likelihood = None<EOL>self._mahalanobis = None<EOL>
Update the CKF with the given measurements. On return, self.x and self.P contain the new mean and covariance of the filter. Parameters ---------- z : numpy.array of shape (dim_z) measurement vector R : numpy.array((dim_z, dim_z)), optional Measurement noise. If provided, overrides self.R for this function call. hx_args : tuple, optional, default (,) arguments to be passed into Hx function after the required state variable.
f664:c0:m2
@property<EOL><INDENT>def log_likelihood(self):<DEDENT>
if self._log_likelihood is None:<EOL><INDENT>self._log_likelihood = logpdf(x=self.y, cov=self.S)<EOL><DEDENT>return self._log_likelihood<EOL>
log-likelihood of the last measurement.
f664:c0:m3
@property<EOL><INDENT>def likelihood(self):<DEDENT>
if self._likelihood is None:<EOL><INDENT>self._likelihood = exp(self.log_likelihood)<EOL>if self._likelihood == <NUM_LIT:0>:<EOL><INDENT>self._likelihood = sys.float_info.min<EOL><DEDENT><DEDENT>return self._likelihood<EOL>
Computed from the log-likelihood. The log-likelihood can be very small, meaning a large negative value such as -28000. Taking the exp() of that results in 0.0, which can break typical algorithms which multiply by this value, so by default we always return a number >= sys.float_info.min.
f664:c0:m4
@property<EOL><INDENT>def mahalanobis(self):<DEDENT>
if self._mahalanobis is None:<EOL><INDENT>self._mahalanobis = sqrt(float(dot(dot(self.y.T, self.SI), self.y)))<EOL><DEDENT>return self._mahalanobis<EOL>
Mahalanobis distance of innovation. E.g. 3 means measurement was 3 standard deviations away from the predicted value. Returns ------- mahalanobis : float
f664:c0:m5
def update(x, P, z, R, H=None, return_all=False):
<EOL>if z is None:<EOL><INDENT>if return_all:<EOL><INDENT>return x, P, None, None, None, None<EOL><DEDENT>return x, P<EOL><DEDENT>if H is None:<EOL><INDENT>H = np.array([<NUM_LIT:1>])<EOL><DEDENT>if np.isscalar(H):<EOL><INDENT>H = np.array([H])<EOL><DEDENT>Hx = np.atleast_1d(dot(H, x))<EOL>z = reshape_z(z, Hx.shape[<NUM_LIT:0>], x.ndim)<EOL>y = z - Hx<EOL>S = dot(dot(H, P), H.T) + R<EOL>try:<EOL><INDENT>K = dot(dot(P, H.T), linalg.inv(S))<EOL><DEDENT>except:<EOL><INDENT>K = dot(dot(P, H.T), <NUM_LIT:1.>/S)<EOL><DEDENT>x = x + dot(K, y)<EOL>KH = dot(K, H)<EOL>try:<EOL><INDENT>I_KH = np.eye(KH.shape[<NUM_LIT:0>]) - KH<EOL><DEDENT>except:<EOL><INDENT>I_KH = np.array([<NUM_LIT:1> - KH])<EOL><DEDENT>P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)<EOL>if return_all:<EOL><INDENT>log_likelihood = logpdf(z, dot(H, x), S)<EOL>return x, P, y, K, S, log_likelihood<EOL><DEDENT>return x, P<EOL>
Add a new measurement (z) to the Kalman filter. If z is None, nothing is changed. This can handle either the multidimensional or unidimensional case. If all parameters are floats instead of arrays the filter will still work, and return floats for x, P as the result. update(1, 2, 1, 1, 1) # univariate update(x, P, 1 Parameters ---------- x : numpy.array(dim_x, 1), or float State estimate vector P : numpy.array(dim_x, dim_x), or float Covariance matrix z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. R : numpy.array(dim_z, dim_z), or float Measurement noise matrix H : numpy.array(dim_x, dim_x), or float, optional Measurement function. If not provided, a value of 1 is assumed. return_all : bool, default False If true, y, K, S, and log_likelihood are returned, otherwise only x and P are returned. Returns ------- x : numpy.array Posterior state estimate vector P : numpy.array Posterior covariance matrix y : numpy.array or scalar Residua. Difference between measurement and state in measurement space K : numpy.array Kalman gain S : numpy.array System uncertainty in measurement space log_likelihood : float log likelihood of the measurement
f665:m0
def update_steadystate(x, z, K, H=None):
if z is None:<EOL><INDENT>return x<EOL><DEDENT>if H is None:<EOL><INDENT>H = np.array([<NUM_LIT:1>])<EOL><DEDENT>if np.isscalar(H):<EOL><INDENT>H = np.array([H])<EOL><DEDENT>Hx = np.atleast_1d(dot(H, x))<EOL>z = reshape_z(z, Hx.shape[<NUM_LIT:0>], x.ndim)<EOL>y = z - Hx<EOL>return x + dot(K, y)<EOL>
Add a new measurement (z) to the Kalman filter. If z is None, nothing is changed. Parameters ---------- x : numpy.array(dim_x, 1), or float State estimate vector z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. K : numpy.array, or float Kalman gain matrix H : numpy.array(dim_x, dim_x), or float, optional Measurement function. If not provided, a value of 1 is assumed. Returns ------- x : numpy.array Posterior state estimate vector Examples -------- This can handle either the multidimensional or unidimensional case. If all parameters are floats instead of arrays the filter will still work, and return floats for x, P as the result. >>> update_steadystate(1, 2, 1) # univariate >>> update_steadystate(x, P, z, H)
f665:m1
def predict(x, P, F=<NUM_LIT:1>, Q=<NUM_LIT:0>, u=<NUM_LIT:0>, B=<NUM_LIT:1>, alpha=<NUM_LIT:1.>):
if np.isscalar(F):<EOL><INDENT>F = np.array(F)<EOL><DEDENT>x = dot(F, x) + dot(B, u)<EOL>P = (alpha * alpha) * dot(dot(F, P), F.T) + Q<EOL>return x, P<EOL>
Predict next state (prior) using the Kalman filter state propagation equations. Parameters ---------- x : numpy.array State estimate vector P : numpy.array Covariance matrix F : numpy.array() State Transition matrix Q : numpy.array, Optional Process noise matrix u : numpy.array, Optional, default 0. Control vector. If non-zero, it is multiplied by B to create the control input into the system. B : numpy.array, optional, default 0. Control transition matrix. alpha : float, Optional, default=1.0 Fading memory setting. 1.0 gives the normal Kalman filter, and values slightly larger than 1.0 (such as 1.02) give a fading memory effect - previous measurements have less influence on the filter's estimates. This formulation of the Fading memory filter (there are many) is due to Dan Simon Returns ------- x : numpy.array Prior state estimate vector P : numpy.array Prior covariance matrix
f665:m2
def predict_steadystate(x, F=<NUM_LIT:1>, u=<NUM_LIT:0>, B=<NUM_LIT:1>):
if np.isscalar(F):<EOL><INDENT>F = np.array(F)<EOL><DEDENT>x = dot(F, x) + dot(B, u)<EOL>return x<EOL>
Predict next state (prior) using the Kalman filter state propagation equations. This steady state form only computes x, assuming that the covariance is constant. Parameters ---------- x : numpy.array State estimate vector P : numpy.array Covariance matrix F : numpy.array() State Transition matrix u : numpy.array, Optional, default 0. Control vector. If non-zero, it is multiplied by B to create the control input into the system. B : numpy.array, optional, default 0. Control transition matrix. Returns ------- x : numpy.array Prior state estimate vector
f665:m3
def batch_filter(x, P, zs, Fs, Qs, Hs, Rs, Bs=None, us=None,<EOL>update_first=False, saver=None):
n = np.size(zs, <NUM_LIT:0>)<EOL>dim_x = x.shape[<NUM_LIT:0>]<EOL>if x.ndim == <NUM_LIT:1>:<EOL><INDENT>means = zeros((n, dim_x))<EOL>means_p = zeros((n, dim_x))<EOL><DEDENT>else:<EOL><INDENT>means = zeros((n, dim_x, <NUM_LIT:1>))<EOL>means_p = zeros((n, dim_x, <NUM_LIT:1>))<EOL><DEDENT>covariances = zeros((n, dim_x, dim_x))<EOL>covariances_p = zeros((n, dim_x, dim_x))<EOL>if us is None:<EOL><INDENT>us = [<NUM_LIT:0.>] * n<EOL>Bs = [<NUM_LIT:0.>] * n<EOL><DEDENT>if update_first:<EOL><INDENT>for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):<EOL><INDENT>x, P = update(x, P, z, R=R, H=H)<EOL>means[i, :] = x<EOL>covariances[i, :, :] = P<EOL>x, P = predict(x, P, u=u, B=B, F=F, Q=Q)<EOL>means_p[i, :] = x<EOL>covariances_p[i, :, :] = P<EOL>if saver is not None:<EOL><INDENT>saver.save()<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):<EOL><INDENT>x, P = predict(x, P, u=u, B=B, F=F, Q=Q)<EOL>means_p[i, :] = x<EOL>covariances_p[i, :, :] = P<EOL>x, P = update(x, P, z, R=R, H=H)<EOL>means[i, :] = x<EOL>covariances[i, :, :] = P<EOL>if saver is not None:<EOL><INDENT>saver.save()<EOL><DEDENT><DEDENT><DEDENT>return (means, covariances, means_p, covariances_p)<EOL>
Batch processes a sequences of measurements. Parameters ---------- zs : list-like list of measurements at each time step. Missing measurements must be represented by None. Fs : list-like list of values to use for the state transition matrix matrix. Qs : list-like list of values to use for the process error covariance. Hs : list-like list of values to use for the measurement matrix. Rs : list-like list of values to use for the measurement error covariance. Bs : list-like, optional list of values to use for the control transition matrix; a value of None in any position will cause the filter to use `self.B` for that time step. us : list-like, optional list of values to use for the control input vector; a value of None in any position will cause the filter to use 0 for that time step. update_first : bool, optional controls whether the order of operations is update followed by predict, or predict followed by update. Default is predict->update. saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- means : np.array((n,dim_x,1)) array of the state for each time step after the update. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance : np.array((n,dim_x,dim_x)) array of the covariances for each time step after the update. In other words `covariance[k,:,:]` is the covariance at step `k`. means_predictions : np.array((n,dim_x,1)) array of the state for each time step after the predictions. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance_predictions : np.array((n,dim_x,dim_x)) array of the covariances for each time step after the prediction. In other words `covariance[k,:,:]` is the covariance at step `k`. Examples -------- .. code-block:: Python zs = [t + random.randn()*4 for t in range (40)] Fs = [kf.F for t in range (40)] Hs = [kf.H for t in range (40)] (mu, cov, _, _) = kf.batch_filter(zs, Rs=R_list, Fs=Fs, Hs=Hs, Qs=None, Bs=None, us=None, update_first=False) (xs, Ps, Ks) = kf.rts_smoother(mu, cov, Fs=Fs, Qs=None)
f665:m4
def rts_smoother(Xs, Ps, Fs, Qs):
if len(Xs) != len(Ps):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>n = Xs.shape[<NUM_LIT:0>]<EOL>dim_x = Xs.shape[<NUM_LIT:1>]<EOL>K = zeros((n, dim_x, dim_x))<EOL>x, P, pP = Xs.copy(), Ps.copy(), Ps.copy()<EOL>for k in range(n-<NUM_LIT:2>, -<NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>pP[k] = dot(dot(Fs[k], P[k]), Fs[k].T) + Qs[k]<EOL>K[k] = dot(dot(P[k], Fs[k].T), linalg.inv(pP[k]))<EOL>x[k] += dot(K[k], x[k+<NUM_LIT:1>] - dot(Fs[k], x[k]))<EOL>P[k] += dot(dot(K[k], P[k+<NUM_LIT:1>] - pP[k]), K[k].T)<EOL><DEDENT>return (x, P, K, pP)<EOL>
Runs the Rauch-Tung-Striebal Kalman smoother on a set of means and covariances computed by a Kalman filter. The usual input would come from the output of `KalmanFilter.batch_filter()`. Parameters ---------- Xs : numpy.array array of the means (state variable x) of the output of a Kalman filter. Ps : numpy.array array of the covariances of the output of a kalman filter. Fs : list-like collection of numpy.array State transition matrix of the Kalman filter at each time step. Qs : list-like collection of numpy.array, optional Process noise of the Kalman filter at each time step. Returns ------- x : numpy.ndarray smoothed means P : numpy.ndarray smoothed state covariances K : numpy.ndarray smoother gain at each step pP : numpy.ndarray predicted state covariances Examples -------- .. code-block:: Python zs = [t + random.randn()*4 for t in range (40)] (mu, cov, _, _) = kalman.batch_filter(zs) (x, P, K, pP) = rts_smoother(mu, cov, kf.F, kf.Q)
f665:m5
def predict(self, u=None, B=None, F=None, Q=None):
if B is None:<EOL><INDENT>B = self.B<EOL><DEDENT>if F is None:<EOL><INDENT>F = self.F<EOL><DEDENT>if Q is None:<EOL><INDENT>Q = self.Q<EOL><DEDENT>elif isscalar(Q):<EOL><INDENT>Q = eye(self.dim_x) * Q<EOL><DEDENT>if B is not None and u is not None:<EOL><INDENT>self.x = dot(F, self.x) + dot(B, u)<EOL><DEDENT>else:<EOL><INDENT>self.x = dot(F, self.x)<EOL><DEDENT>self.P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q<EOL>self.x_prior = self.x.copy()<EOL>self.P_prior = self.P.copy()<EOL>
Predict next state (prior) using the Kalman filter state propagation equations. Parameters ---------- u : np.array Optional control vector. If not `None`, it is multiplied by B to create the control input into the system. B : np.array(dim_x, dim_z), or None Optional control transition matrix; a value of None will cause the filter to use `self.B`. F : np.array(dim_x, dim_x), or None Optional state transition matrix; a value of None will cause the filter to use `self.F`. Q : np.array(dim_x, dim_x), scalar, or None Optional process noise matrix; a value of None will cause the filter to use `self.Q`.
f665:c0:m1
def update(self, z, R=None, H=None):
<EOL>self._log_likelihood = None<EOL>self._likelihood = None<EOL>self._mahalanobis = None<EOL>if z is None:<EOL><INDENT>self.z = np.array([[None]*self.dim_z]).T<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>self.y = zeros((self.dim_z, <NUM_LIT:1>))<EOL>return<EOL><DEDENT>z = reshape_z(z, self.dim_z, self.x.ndim)<EOL>if R is None:<EOL><INDENT>R = self.R<EOL><DEDENT>elif isscalar(R):<EOL><INDENT>R = eye(self.dim_z) * R<EOL><DEDENT>if H is None:<EOL><INDENT>H = self.H<EOL><DEDENT>self.y = z - dot(H, self.x)<EOL>PHT = dot(self.P, H.T)<EOL>self.S = dot(H, PHT) + R<EOL>self.SI = self.inv(self.S)<EOL>self.K = dot(PHT, self.SI)<EOL>self.x = self.x + dot(self.K, self.y)<EOL>I_KH = self._I - dot(self.K, H)<EOL>self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(self.K, R), self.K.T)<EOL>self.z = deepcopy(z)<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>
Add a new measurement (z) to the Kalman filter. If z is None, nothing is computed. However, x_post and P_post are updated with the prior (x_prior, P_prior), and self.z is set to None. Parameters ---------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. R : np.array, scalar, or None Optionally provide R to override the measurement noise for this one call, otherwise self.R will be used. H : np.array, or None Optionally provide H to override the measurement function for this one call, otherwise self.H will be used.
f665:c0:m2
def predict_steadystate(self, u=<NUM_LIT:0>, B=None):
if B is None:<EOL><INDENT>B = self.B<EOL><DEDENT>if B is not None:<EOL><INDENT>self.x = dot(self.F, self.x) + dot(B, u)<EOL><DEDENT>else:<EOL><INDENT>self.x = dot(self.F, self.x)<EOL><DEDENT>self.x_prior = self.x.copy()<EOL>self.P_prior = self.P.copy()<EOL>
Predict state (prior) using the Kalman filter state propagation equations. Only x is updated, P is left unchanged. See update_steadstate() for a longer explanation of when to use this method. Parameters ---------- u : np.array Optional control vector. If non-zero, it is multiplied by B to create the control input into the system. B : np.array(dim_x, dim_z), or None Optional control transition matrix; a value of None will cause the filter to use `self.B`.
f665:c0:m3
def update_steadystate(self, z):
<EOL>self._log_likelihood = None<EOL>self._likelihood = None<EOL>self._mahalanobis = None<EOL>if z is None:<EOL><INDENT>self.z = np.array([[None]*self.dim_z]).T<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>self.y = zeros((self.dim_z, <NUM_LIT:1>))<EOL>return<EOL><DEDENT>z = reshape_z(z, self.dim_z, self.x.ndim)<EOL>self.y = z - dot(self.H, self.x)<EOL>self.x = self.x + dot(self.K, self.y)<EOL>self.z = deepcopy(z)<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>self._log_likelihood = None<EOL>self._likelihood = None<EOL>self._mahalanobis = None<EOL>
Add a new measurement (z) to the Kalman filter without recomputing the Kalman gain K, the state covariance P, or the system uncertainty S. You can use this for LTI systems since the Kalman gain and covariance converge to a fixed value. Precompute these and assign them explicitly, or run the Kalman filter using the normal predict()/update(0 cycle until they converge. The main advantage of this call is speed. We do significantly less computation, notably avoiding a costly matrix inversion. Use in conjunction with predict_steadystate(), otherwise P will grow without bound. Parameters ---------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. Examples -------- >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter >>> # let filter converge on representative data, then save k and P >>> for i in range(100): >>> cv.predict() >>> cv.update([i, i, i]) >>> saved_k = np.copy(cv.K) >>> saved_P = np.copy(cv.P) later on: >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter >>> cv.K = np.copy(saved_K) >>> cv.P = np.copy(saved_P) >>> for i in range(100): >>> cv.predict_steadystate() >>> cv.update_steadystate([i, i, i])
f665:c0:m4
def update_correlated(self, z, R=None, H=None):
<EOL>self._log_likelihood = None<EOL>self._likelihood = None<EOL>self._mahalanobis = None<EOL>if z is None:<EOL><INDENT>self.z = np.array([[None]*self.dim_z]).T<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>self.y = zeros((self.dim_z, <NUM_LIT:1>))<EOL>return<EOL><DEDENT>z = reshape_z(z, self.dim_z, self.x.ndim)<EOL>if R is None:<EOL><INDENT>R = self.R<EOL><DEDENT>elif isscalar(R):<EOL><INDENT>R = eye(self.dim_z) * R<EOL><DEDENT>if H is None:<EOL><INDENT>H = self.H<EOL><DEDENT>if self.x.ndim == <NUM_LIT:1> and shape(z) == (<NUM_LIT:1>, <NUM_LIT:1>):<EOL><INDENT>z = z[<NUM_LIT:0>]<EOL><DEDENT>if shape(z) == (): <EOL><INDENT>z = np.asarray([z])<EOL><DEDENT>self.y = z - dot(H, self.x)<EOL>PHT = dot(self.P, H.T)<EOL>self.S = dot(H, PHT) + dot(H, self.M) + dot(self.M.T, H.T) + R<EOL>self.SI = self.inv(self.S)<EOL>self.K = dot(PHT + self.M, self.SI)<EOL>self.x = self.x + dot(self.K, self.y)<EOL>self.P = self.P - dot(self.K, dot(H, self.P) + self.M.T)<EOL>self.z = deepcopy(z)<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>
Add a new measurement (z) to the Kalman filter assuming that process noise and measurement noise are correlated as defined in the `self.M` matrix. If z is None, nothing is changed. Parameters ---------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. R : np.array, scalar, or None Optionally provide R to override the measurement noise for this one call, otherwise self.R will be used. H : np.array, or None Optionally provide H to override the measurement function for this one call, otherwise self.H will be used.
f665:c0:m5
def batch_filter(self, zs, Fs=None, Qs=None, Hs=None,<EOL>Rs=None, Bs=None, us=None, update_first=False,<EOL>saver=None):
<EOL>n = np.size(zs, <NUM_LIT:0>)<EOL>if Fs is None:<EOL><INDENT>Fs = [self.F] * n<EOL><DEDENT>if Qs is None:<EOL><INDENT>Qs = [self.Q] * n<EOL><DEDENT>if Hs is None:<EOL><INDENT>Hs = [self.H] * n<EOL><DEDENT>if Rs is None:<EOL><INDENT>Rs = [self.R] * n<EOL><DEDENT>if Bs is None:<EOL><INDENT>Bs = [self.B] * n<EOL><DEDENT>if us is None:<EOL><INDENT>us = [<NUM_LIT:0>] * n<EOL><DEDENT>if self.x.ndim == <NUM_LIT:1>:<EOL><INDENT>means = zeros((n, self.dim_x))<EOL>means_p = zeros((n, self.dim_x))<EOL><DEDENT>else:<EOL><INDENT>means = zeros((n, self.dim_x, <NUM_LIT:1>))<EOL>means_p = zeros((n, self.dim_x, <NUM_LIT:1>))<EOL><DEDENT>covariances = zeros((n, self.dim_x, self.dim_x))<EOL>covariances_p = zeros((n, self.dim_x, self.dim_x))<EOL>if update_first:<EOL><INDENT>for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):<EOL><INDENT>self.update(z, R=R, H=H)<EOL>means[i, :] = self.x<EOL>covariances[i, :, :] = self.P<EOL>self.predict(u=u, B=B, F=F, Q=Q)<EOL>means_p[i, :] = self.x<EOL>covariances_p[i, :, :] = self.P<EOL>if saver is not None:<EOL><INDENT>saver.save()<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):<EOL><INDENT>self.predict(u=u, B=B, F=F, Q=Q)<EOL>means_p[i, :] = self.x<EOL>covariances_p[i, :, :] = self.P<EOL>self.update(z, R=R, H=H)<EOL>means[i, :] = self.x<EOL>covariances[i, :, :] = self.P<EOL>if saver is not None:<EOL><INDENT>saver.save()<EOL><DEDENT><DEDENT><DEDENT>return (means, covariances, means_p, covariances_p)<EOL>
Batch processes a sequences of measurements. Parameters ---------- zs : list-like list of measurements at each time step `self.dt`. Missing measurements must be represented by `None`. Fs : None, list-like, default=None optional value or list of values to use for the state transition matrix F. If Fs is None then self.F is used for all epochs. Otherwise it must contain a list-like list of F's, one for each epoch. This allows you to have varying F per epoch. Qs : None, np.array or list-like, default=None optional value or list of values to use for the process error covariance Q. If Qs is None then self.Q is used for all epochs. Otherwise it must contain a list-like list of Q's, one for each epoch. This allows you to have varying Q per epoch. Hs : None, np.array or list-like, default=None optional list of values to use for the measurement matrix H. If Hs is None then self.H is used for all epochs. If Hs contains a single matrix, then it is used as H for all epochs. Otherwise it must contain a list-like list of H's, one for each epoch. This allows you to have varying H per epoch. Rs : None, np.array or list-like, default=None optional list of values to use for the measurement error covariance R. If Rs is None then self.R is used for all epochs. Otherwise it must contain a list-like list of R's, one for each epoch. This allows you to have varying R per epoch. Bs : None, np.array or list-like, default=None optional list of values to use for the control transition matrix B. If Bs is None then self.B is used for all epochs. Otherwise it must contain a list-like list of B's, one for each epoch. This allows you to have varying B per epoch. us : None, np.array or list-like, default=None optional list of values to use for the control input vector; If us is None then None is used for all epochs (equivalent to 0, or no control input). Otherwise it must contain a list-like list of u's, one for each epoch. update_first : bool, optional, default=False controls whether the order of operations is update followed by predict, or predict followed by update. Default is predict->update. saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- means : np.array((n,dim_x,1)) array of the state for each time step after the update. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance : np.array((n,dim_x,dim_x)) array of the covariances for each time step after the update. In other words `covariance[k,:,:]` is the covariance at step `k`. means_predictions : np.array((n,dim_x,1)) array of the state for each time step after the predictions. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance_predictions : np.array((n,dim_x,dim_x)) array of the covariances for each time step after the prediction. In other words `covariance[k,:,:]` is the covariance at step `k`. Examples -------- .. code-block:: Python # this example demonstrates tracking a measurement where the time # between measurement varies, as stored in dts. This requires # that F be recomputed for each epoch. The output is then smoothed # with an RTS smoother. zs = [t + random.randn()*4 for t in range (40)] Fs = [np.array([[1., dt], [0, 1]] for dt in dts] (mu, cov, _, _) = kf.batch_filter(zs, Fs=Fs) (xs, Ps, Ks) = kf.rts_smoother(mu, cov, Fs=Fs)
f665:c0:m6
def rts_smoother(self, Xs, Ps, Fs=None, Qs=None, inv=np.linalg.inv):
if len(Xs) != len(Ps):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>n = Xs.shape[<NUM_LIT:0>]<EOL>dim_x = Xs.shape[<NUM_LIT:1>]<EOL>if Fs is None:<EOL><INDENT>Fs = [self.F] * n<EOL><DEDENT>if Qs is None:<EOL><INDENT>Qs = [self.Q] * n<EOL><DEDENT>K = zeros((n, dim_x, dim_x))<EOL>x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy()<EOL>for k in range(n-<NUM_LIT:2>, -<NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>Pp[k] = dot(dot(Fs[k+<NUM_LIT:1>], P[k]), Fs[k+<NUM_LIT:1>].T) + Qs[k+<NUM_LIT:1>]<EOL>K[k] = dot(dot(P[k], Fs[k+<NUM_LIT:1>].T), inv(Pp[k]))<EOL>x[k] += dot(K[k], x[k+<NUM_LIT:1>] - dot(Fs[k+<NUM_LIT:1>], x[k]))<EOL>P[k] += dot(dot(K[k], P[k+<NUM_LIT:1>] - Pp[k]), K[k].T)<EOL><DEDENT>return (x, P, K, Pp)<EOL>
Runs the Rauch-Tung-Striebal Kalman smoother on a set of means and covariances computed by a Kalman filter. The usual input would come from the output of `KalmanFilter.batch_filter()`. Parameters ---------- Xs : numpy.array array of the means (state variable x) of the output of a Kalman filter. Ps : numpy.array array of the covariances of the output of a kalman filter. Fs : list-like collection of numpy.array, optional State transition matrix of the Kalman filter at each time step. Optional, if not provided the filter's self.F will be used Qs : list-like collection of numpy.array, optional Process noise of the Kalman filter at each time step. Optional, if not provided the filter's self.Q will be used inv : function, default numpy.linalg.inv If you prefer another inverse function, such as the Moore-Penrose pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv Returns ------- x : numpy.ndarray smoothed means P : numpy.ndarray smoothed state covariances K : numpy.ndarray smoother gain at each step Pp : numpy.ndarray Predicted state covariances Examples -------- .. code-block:: Python zs = [t + random.randn()*4 for t in range (40)] (mu, cov, _, _) = kalman.batch_filter(zs) (x, P, K, Pp) = rts_smoother(mu, cov, kf.F, kf.Q)
f665:c0:m7
def get_prediction(self, u=<NUM_LIT:0>):
x = dot(self.F, self.x) + dot(self.B, u)<EOL>P = self._alpha_sq * dot(dot(self.F, self.P), self.F.T) + self.Q<EOL>return (x, P)<EOL>
Predicts the next state of the filter and returns it without altering the state of the filter. Parameters ---------- u : np.array optional control input Returns ------- (x, P) : tuple State vector and covariance array of the prediction.
f665:c0:m8
def get_update(self, z=None):
if z is None:<EOL><INDENT>return self.x, self.P<EOL><DEDENT>z = reshape_z(z, self.dim_z, self.x.ndim)<EOL>R = self.R<EOL>H = self.H<EOL>P = self.P<EOL>x = self.x<EOL>y = z - dot(H, x)<EOL>PHT = dot(P, H.T)<EOL>S = dot(H, PHT) + R<EOL>K = dot(PHT, self.inv(S))<EOL>x = x + dot(K, y)<EOL>I_KH = self._I - dot(K, H)<EOL>P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)<EOL>return x, P<EOL>
Computes the new estimate based on measurement `z` and returns it without altering the state of the filter. Parameters ---------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. Returns ------- (x, P) : tuple State vector and covariance array of the update.
f665:c0:m9
def residual_of(self, z):
return z - dot(self.H, self.x_prior)<EOL>
Returns the residual for the given measurement (z). Does not alter the state of the filter.
f665:c0:m10
def measurement_of_state(self, x):
return dot(self.H, x)<EOL>
Helper function that converts a state into a measurement. Parameters ---------- x : np.array kalman state vector Returns ------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector.
f665:c0:m11
@property<EOL><INDENT>def log_likelihood(self):<DEDENT>
if self._log_likelihood is None:<EOL><INDENT>self._log_likelihood = logpdf(x=self.y, cov=self.S)<EOL><DEDENT>return self._log_likelihood<EOL>
log-likelihood of the last measurement.
f665:c0:m12
@property<EOL><INDENT>def likelihood(self):<DEDENT>
if self._likelihood is None:<EOL><INDENT>self._likelihood = exp(self.log_likelihood)<EOL>if self._likelihood == <NUM_LIT:0>:<EOL><INDENT>self._likelihood = sys.float_info.min<EOL><DEDENT><DEDENT>return self._likelihood<EOL>
Computed from the log-likelihood. The log-likelihood can be very small, meaning a large negative value such as -28000. Taking the exp() of that results in 0.0, which can break typical algorithms which multiply by this value, so by default we always return a number >= sys.float_info.min.
f665:c0:m13
@property<EOL><INDENT>def mahalanobis(self):<DEDENT>
if self._mahalanobis is None:<EOL><INDENT>self._mahalanobis = sqrt(float(dot(dot(self.y.T, self.SI), self.y)))<EOL><DEDENT>return self._mahalanobis<EOL>
Mahalanobis distance of measurement. E.g. 3 means measurement was 3 standard deviations away from the predicted value. Returns ------- mahalanobis : float
f665:c0:m14
@property<EOL><INDENT>def alpha(self):<DEDENT>
return self._alpha_sq**<NUM_LIT><EOL>
Fading memory setting. 1.0 gives the normal Kalman filter, and values slightly larger than 1.0 (such as 1.02) give a fading memory effect - previous measurements have less influence on the filter's estimates. This formulation of the Fading memory filter (there are many) is due to Dan Simon [1]_.
f665:c0:m15
def log_likelihood_of(self, z):
if z is None:<EOL><INDENT>return log(sys.float_info.min)<EOL><DEDENT>return logpdf(z, dot(self.H, self.x), self.S)<EOL>
log likelihood of the measurement `z`. This should only be called after a call to update(). Calling after predict() will yield an incorrect result.
f665:c0:m16
def const_vel_filter(dt, x0=<NUM_LIT:0>, x_ndim=<NUM_LIT:1>, P_diag=(<NUM_LIT:1.>, <NUM_LIT:1.>), R_std=<NUM_LIT:1.>,<EOL>Q_var=<NUM_LIT>):
f = KalmanFilter(dim_x=<NUM_LIT:2>, dim_z=<NUM_LIT:1>)<EOL>if x_ndim == <NUM_LIT:1>:<EOL><INDENT>f.x = np.array([x0, <NUM_LIT:0.>])<EOL><DEDENT>else:<EOL><INDENT>f.x = np.array([[x0, <NUM_LIT:0.>]]).T<EOL><DEDENT>f.F = np.array([[<NUM_LIT:1.>, dt],<EOL>[<NUM_LIT:0.>, <NUM_LIT:1.>]])<EOL>f.H = np.array([[<NUM_LIT:1.>, <NUM_LIT:0.>]])<EOL>f.P = np.diag(P_diag)<EOL>f.R = np.eye(<NUM_LIT:1>) * (R_std**<NUM_LIT:2>)<EOL>f.Q = Q_discrete_white_noise(<NUM_LIT:2>, dt, Q_var)<EOL>return f<EOL>
helper, constructs 1d, constant velocity filter
f667:m0
def const_vel_filter_2d(dt, x_ndim=<NUM_LIT:1>, P_diag=(<NUM_LIT:1.>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>), R_std=<NUM_LIT:1.>,<EOL>Q_var=<NUM_LIT>):
kf = KalmanFilter(dim_x=<NUM_LIT:4>, dim_z=<NUM_LIT:2>)<EOL>kf.x = np.array([[<NUM_LIT:0.>, <NUM_LIT:0.>, <NUM_LIT:0.>, <NUM_LIT:0.>]]).T<EOL>kf.P *= np.diag(P_diag)<EOL>kf.F = np.array([[<NUM_LIT:1.>, dt, <NUM_LIT:0.>, <NUM_LIT:0.>],<EOL>[<NUM_LIT:0.>, <NUM_LIT:1.>, <NUM_LIT:0.>, <NUM_LIT:0.>],<EOL>[<NUM_LIT:0.>, <NUM_LIT:0.>, <NUM_LIT:1.>, dt],<EOL>[<NUM_LIT:0.>, <NUM_LIT:0.>, <NUM_LIT:0.>, <NUM_LIT:1.>]])<EOL>kf.H = np.array([[<NUM_LIT:1.>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0.>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>]])<EOL>kf.R *= np.eye(<NUM_LIT:2>) * (R_std**<NUM_LIT:2>)<EOL>q = Q_discrete_white_noise(dim=<NUM_LIT:2>, dt=dt, var=Q_var)<EOL>kf.Q = block_diag(q, q)<EOL>return kf<EOL>
helper, constructs 1d, constant velocity filter
f667:m1
def proc_form():
dt = <NUM_LIT:1.><EOL>std_z = <NUM_LIT><EOL>x = np.array([[<NUM_LIT:0.>], [<NUM_LIT:0.>]])<EOL>F = np.array([[<NUM_LIT:1.>, dt], [<NUM_LIT:0.>, <NUM_LIT:1.>]])<EOL>H = np.array([[<NUM_LIT:1.>, <NUM_LIT:0.>]])<EOL>P = np.eye(<NUM_LIT:2>)<EOL>R = np.eye(<NUM_LIT:1>)*std_z**<NUM_LIT:2><EOL>Q = Q_discrete_white_noise(<NUM_LIT:2>, dt, <NUM_LIT>)<EOL>pos = <NUM_LIT:0.><EOL>for t in range(<NUM_LIT>):<EOL><INDENT>z = pos + random.randn() * std_z<EOL>pos += <NUM_LIT:100><EOL>x, P = predict(x, P, F, Q)<EOL>x, P, _ = update(z, R, x, P, H)<EOL><DEDENT>
This is for me to run against the class_form() function to see which, if either, runs faster. They within a few ms of each other on my machine with Python 3.5.1
f667:m10
def update(self, z):
<EOL>for i, f in enumerate(self.filters):<EOL><INDENT>f.update(z)<EOL>self.likelihood[i] = f.likelihood<EOL><DEDENT>self.mu = self.cbar * self.likelihood<EOL>self.mu /= np.sum(self.mu) <EOL>self._compute_mixing_probabilities()<EOL>self._compute_state_estimate()<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>
Add a new measurement (z) to the Kalman filter. If z is None, nothing is changed. Parameters ---------- z : np.array measurement for this update.
f679:c0:m1
def predict(self, u=None):
<EOL>xs, Ps = [], []<EOL>for i, (f, w) in enumerate(zip(self.filters, self.omega.T)):<EOL><INDENT>x = zeros(self.x.shape)<EOL>for kf, wj in zip(self.filters, w):<EOL><INDENT>x += kf.x * wj<EOL><DEDENT>xs.append(x)<EOL>P = zeros(self.P.shape)<EOL>for kf, wj in zip(self.filters, w):<EOL><INDENT>y = kf.x - x<EOL>P += wj * (outer(y, y) + kf.P)<EOL><DEDENT>Ps.append(P)<EOL><DEDENT>for i, f in enumerate(self.filters):<EOL><INDENT>f.x = xs[i].copy()<EOL>f.P = Ps[i].copy()<EOL>f.predict(u)<EOL><DEDENT>self._compute_state_estimate()<EOL>self.x_prior = self.x.copy()<EOL>self.P_prior = self.P.copy()<EOL>
Predict next state (prior) using the IMM state propagation equations. Parameters ---------- u : np.array, optional Control vector. If not `None`, it is multiplied by B to create the control input into the system.
f679:c0:m2
def _compute_state_estimate(self):
self.x.fill(<NUM_LIT:0>)<EOL>for f, mu in zip(self.filters, self.mu):<EOL><INDENT>self.x += f.x * mu<EOL><DEDENT>self.P.fill(<NUM_LIT:0>)<EOL>for f, mu in zip(self.filters, self.mu):<EOL><INDENT>y = f.x - self.x<EOL>self.P += mu * (outer(y, y) + f.P)<EOL><DEDENT>
Computes the IMM's mixed state estimate from each filter using the the mode probability self.mu to weight the estimates.
f679:c0:m3
def _compute_mixing_probabilities(self):
self.cbar = dot(self.mu, self.M)<EOL>for i in range(self.N):<EOL><INDENT>for j in range(self.N):<EOL><INDENT>self.omega[i, j] = (self.M[i, j]*self.mu[i]) / self.cbar[j]<EOL><DEDENT><DEDENT>
Compute the mixing probability for each filter.
f679:c0:m4
def update(self, z, R_inv=None):
if z is None:<EOL><INDENT>self.z = None<EOL>self.x_post = self.x.copy()<EOL>self.P_inv_post = self.P_inv.copy()<EOL>return<EOL><DEDENT>if R_inv is None:<EOL><INDENT>R_inv = self.R_inv<EOL><DEDENT>elif np.isscalar(R_inv):<EOL><INDENT>R_inv = eye(self.dim_z) * R_inv<EOL><DEDENT>H = self.H<EOL>H_T = H.T<EOL>P_inv = self.P_inv<EOL>x = self.x<EOL>if self._no_information:<EOL><INDENT>self.x = dot(P_inv, x) + dot(H_T, R_inv).dot(z)<EOL>self.P_inv = P_inv + dot(H_T, R_inv).dot(H)<EOL>self.log_likelihood = math.log(sys.float_info.min)<EOL>self.likelihood = sys.float_info.min<EOL><DEDENT>else:<EOL><INDENT>self.y = z - dot(H, x)<EOL>self.S = P_inv + dot(H_T, R_inv).dot(H)<EOL>self.K = dot(self.inv(self.S), H_T).dot(R_inv)<EOL>self.x = x + dot(self.K, self.y)<EOL>self.P_inv = P_inv + dot(H_T, R_inv).dot(H)<EOL>self.z = np.copy(reshape_z(z, self.dim_z, np.ndim(self.x)))<EOL>if self.compute_log_likelihood:<EOL><INDENT>self.log_likelihood = logpdf(x=self.y, cov=self.S)<EOL>self.likelihood = math.exp(self.log_likelihood)<EOL>if self.likelihood == <NUM_LIT:0>:<EOL><INDENT>self.likelihood = sys.float_info.min<EOL><DEDENT><DEDENT><DEDENT>self.z = deepcopy(z)<EOL>self.x_post = self.x.copy()<EOL>self.P_inv_post = self.P_inv.copy()<EOL>
Add a new measurement (z) to the kalman filter. If z is None, nothing is changed. Parameters ---------- z : np.array measurement for this update. R : np.array, scalar, or None Optionally provide R to override the measurement noise for this one call, otherwise self.R will be used.
f680:c0:m1
def predict(self, u=<NUM_LIT:0>):
<EOL>A = dot(self._F_inv.T, self.P_inv).dot(self._F_inv)<EOL>try:<EOL><INDENT>AI = self.inv(A)<EOL>invertable = True<EOL>if self._no_information:<EOL><INDENT>try:<EOL><INDENT>self.x = dot(self.inv(self.P_inv), self.x)<EOL><DEDENT>except:<EOL><INDENT>self.x = dot(<NUM_LIT:0>, self.x)<EOL><DEDENT>self._no_information = False<EOL><DEDENT><DEDENT>except:<EOL><INDENT>invertable = False<EOL>self._no_information = True<EOL><DEDENT>if invertable:<EOL><INDENT>self.x = dot(self._F, self.x) + dot(self.B, u)<EOL>self.P_inv = self.inv(AI + self.Q)<EOL>self.P_inv_prior = np.copy(self.P_inv)<EOL>self.x_prior = np.copy(self.x)<EOL><DEDENT>else:<EOL><INDENT>I_PF = self._I - dot(self.P_inv, self._F_inv)<EOL>FTI = self.inv(self._F.T)<EOL>FTIX = dot(FTI, self.x)<EOL>AQI = self.inv(A + self.Q)<EOL>self.x = dot(FTI, dot(I_PF, AQI).dot(FTIX))<EOL>self.x_prior = np.copy(self.x)<EOL>self.P_inv_prior = np.copy(AQI)<EOL><DEDENT>
Predict next position. Parameters ---------- u : ndarray Optional control vector. If non-zero, it is multiplied by B to create the control input into the system.
f680:c0:m2
def batch_filter(self, zs, Rs=None, update_first=False, saver=None):
raise NotImplementedError("<STR_LIT>")<EOL>n = np.size(zs, <NUM_LIT:0>)<EOL>if Rs is None:<EOL><INDENT>Rs = [None] * n<EOL><DEDENT>means = zeros((n, self.dim_x, <NUM_LIT:1>))<EOL>covariances = zeros((n, self.dim_x, self.dim_x))<EOL>if update_first:<EOL><INDENT>for i, (z, r) in enumerate(zip(zs, Rs)):<EOL><INDENT>self.update(z, r)<EOL>means[i, :] = self.x<EOL>covariances[i, :, :] = self._P<EOL>self.predict()<EOL>if saver is not None:<EOL><INDENT>saver.save()<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for i, (z, r) in enumerate(zip(zs, Rs)):<EOL><INDENT>self.predict()<EOL>self.update(z, r)<EOL>means[i, :] = self.x<EOL>covariances[i, :, :] = self._P<EOL>if saver is not None:<EOL><INDENT>saver.save()<EOL><DEDENT><DEDENT><DEDENT>return (means, covariances)<EOL>
Batch processes a sequences of measurements. Parameters ---------- zs : list-like list of measurements at each time step `self.dt` Missing measurements must be represented by 'None'. Rs : list-like, optional optional list of values to use for the measurement error covariance; a value of None in any position will cause the filter to use `self.R` for that time step. update_first : bool, optional, controls whether the order of operations is update followed by predict, or predict followed by update. Default is predict->update. saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- means: np.array((n,dim_x,1)) array of the state for each time step. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance: np.array((n,dim_x,dim_x)) array of the covariances for each time step. In other words `covariance[k,:,:]` is the covariance at step `k`.
f680:c0:m3
@property<EOL><INDENT>def F(self):<DEDENT>
return self._F<EOL>
State Transition matrix
f680:c0:m4
@F.setter<EOL><INDENT>def F(self, value):<DEDENT>
self._F = value<EOL>self._F_inv = self.inv(self._F)<EOL>
State Transition matrix
f680:c0:m5
@property<EOL><INDENT>def P(self):<DEDENT>
return self.inv(self.P_inv)<EOL>
State covariance matrix
f680:c0:m6
def __init__(self, dim_x, dim_z, dt, hx, fx, points,<EOL>sqrt_fn=None, x_mean_fn=None, z_mean_fn=None,<EOL>residual_x=None,<EOL>residual_z=None):
<EOL>self.x = zeros(dim_x)<EOL>self.P = eye(dim_x)<EOL>self.x_prior = np.copy(self.x)<EOL>self.P_prior = np.copy(self.P)<EOL>self.Q = eye(dim_x)<EOL>self.R = eye(dim_z)<EOL>self._dim_x = dim_x<EOL>self._dim_z = dim_z<EOL>self.points_fn = points<EOL>self._dt = dt<EOL>self._num_sigmas = points.num_sigmas()<EOL>self.hx = hx<EOL>self.fx = fx<EOL>self.x_mean = x_mean_fn<EOL>self.z_mean = z_mean_fn<EOL>self._log_likelihood = log(sys.float_info.min)<EOL>self._likelihood = sys.float_info.min<EOL>self._mahalanobis = None<EOL>if sqrt_fn is None:<EOL><INDENT>self.msqrt = cholesky<EOL><DEDENT>else:<EOL><INDENT>self.msqrt = sqrt_fn<EOL><DEDENT>self.Wm, self.Wc = points.Wm, points.Wc<EOL>if residual_x is None:<EOL><INDENT>self.residual_x = np.subtract<EOL><DEDENT>else:<EOL><INDENT>self.residual_x = residual_x<EOL><DEDENT>if residual_z is None:<EOL><INDENT>self.residual_z = np.subtract<EOL><DEDENT>else:<EOL><INDENT>self.residual_z = residual_z<EOL><DEDENT>self.sigmas_f = zeros((self._num_sigmas, self._dim_x))<EOL>self.sigmas_h = zeros((self._num_sigmas, self._dim_z))<EOL>self.K = np.zeros((dim_x, dim_z)) <EOL>self.y = np.zeros((dim_z)) <EOL>self.z = np.array([[None]*dim_z]).T <EOL>self.S = np.zeros((dim_z, dim_z)) <EOL>self.SI = np.zeros((dim_z, dim_z)) <EOL>self.inv = np.linalg.inv<EOL>self.x_prior = self.x.copy()<EOL>self.P_prior = self.P.copy()<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>
Create a Kalman filter. You are responsible for setting the various state variables to reasonable values; the defaults below will not give you a functional filter.
f681:c0:m0
def predict(self, dt=None, UT=None, fx=None, **fx_args):
if dt is None:<EOL><INDENT>dt = self._dt<EOL><DEDENT>if UT is None:<EOL><INDENT>UT = unscented_transform<EOL><DEDENT>self.compute_process_sigmas(dt, fx, **fx_args)<EOL>self.x, self.P = UT(self.sigmas_f, self.Wm, self.Wc, self.Q,<EOL>self.x_mean, self.residual_x)<EOL>self.x_prior = np.copy(self.x)<EOL>self.P_prior = np.copy(self.P)<EOL>
r""" Performs the predict step of the UKF. On return, self.x and self.P contain the predicted state (x) and covariance (P). ' Important: this MUST be called before update() is called for the first time. Parameters ---------- dt : double, optional If specified, the time step to be used for this prediction. self._dt is used if this is not provided. fx : callable f(x, **fx_args), optional State transition function. If not provided, the default function passed in during construction will be used. UT : function(sigmas, Wm, Wc, noise_cov), optional Optional function to compute the unscented transform for the sigma points passed through hx. Typically the default function will work - you can use x_mean_fn and z_mean_fn to alter the behavior of the unscented transform. **fx_args : keyword arguments optional keyword arguments to be passed into f(x).
f681:c0:m1
def update(self, z, R=None, UT=None, hx=None, **hx_args):
if z is None:<EOL><INDENT>self.z = np.array([[None]*self._dim_z]).T<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>return<EOL><DEDENT>if hx is None:<EOL><INDENT>hx = self.hx<EOL><DEDENT>if UT is None:<EOL><INDENT>UT = unscented_transform<EOL><DEDENT>if R is None:<EOL><INDENT>R = self.R<EOL><DEDENT>elif isscalar(R):<EOL><INDENT>R = eye(self._dim_z) * R<EOL><DEDENT>sigmas_h = []<EOL>for s in self.sigmas_f:<EOL><INDENT>sigmas_h.append(hx(s, **hx_args))<EOL><DEDENT>self.sigmas_h = np.atleast_2d(sigmas_h)<EOL>zp, self.S = UT(self.sigmas_h, self.Wm, self.Wc, R, self.z_mean, self.residual_z)<EOL>self.SI = self.inv(self.S)<EOL>Pxz = self.cross_variance(self.x, zp, self.sigmas_f, self.sigmas_h)<EOL>self.K = dot(Pxz, self.SI) <EOL>self.y = self.residual_z(z, zp) <EOL>self.x = self.x + dot(self.K, self.y)<EOL>self.P = self.P - dot(self.K, dot(self.S, self.K.T))<EOL>self.z = deepcopy(z)<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>self._log_likelihood = None<EOL>self._likelihood = None<EOL>self._mahalanobis = None<EOL>
Update the UKF with the given measurements. On return, self.x and self.P contain the new mean and covariance of the filter. Parameters ---------- z : numpy.array of shape (dim_z) measurement vector R : numpy.array((dim_z, dim_z)), optional Measurement noise. If provided, overrides self.R for this function call. UT : function(sigmas, Wm, Wc, noise_cov), optional Optional function to compute the unscented transform for the sigma points passed through hx. Typically the default function will work - you can use x_mean_fn and z_mean_fn to alter the behavior of the unscented transform. **hx_args : keyword argument arguments to be passed into h(x) after x -> h(x, **hx_args)
f681:c0:m2
def cross_variance(self, x, z, sigmas_f, sigmas_h):
Pxz = zeros((sigmas_f.shape[<NUM_LIT:1>], sigmas_h.shape[<NUM_LIT:1>]))<EOL>N = sigmas_f.shape[<NUM_LIT:0>]<EOL>for i in range(N):<EOL><INDENT>dx = self.residual_x(sigmas_f[i], x)<EOL>dz = self.residual_z(sigmas_h[i], z)<EOL>Pxz += self.Wc[i] * outer(dx, dz)<EOL><DEDENT>return Pxz<EOL>
Compute cross variance of the state `x` and measurement `z`.
f681:c0:m3
def compute_process_sigmas(self, dt, fx=None, **fx_args):
if fx is None:<EOL><INDENT>fx = self.fx<EOL><DEDENT>sigmas = self.points_fn.sigma_points(self.x, self.P)<EOL>for i, s in enumerate(sigmas):<EOL><INDENT>self.sigmas_f[i] = fx(s, dt, **fx_args)<EOL><DEDENT>
computes the values of sigmas_f. Normally a user would not call this, but it is useful if you need to call update more than once between calls to predict (to update for multiple simultaneous measurements), so the sigmas correctly reflect the updated state x, P.
f681:c0:m4
def batch_filter(self, zs, Rs=None, dts=None, UT=None, saver=None):
<EOL>try:<EOL><INDENT>z = zs[<NUM_LIT:0>]<EOL><DEDENT>except TypeError:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if self._dim_z == <NUM_LIT:1>:<EOL><INDENT>if not(isscalar(z) or (z.ndim == <NUM_LIT:1> and len(z) == <NUM_LIT:1>)):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if len(z) != self._dim_z:<EOL><INDENT>raise TypeError(<EOL>'<STR_LIT>'.format(self._dim_z))<EOL><DEDENT><DEDENT>z_n = np.size(zs, <NUM_LIT:0>)<EOL>if Rs is None:<EOL><INDENT>Rs = [self.R] * z_n<EOL><DEDENT>if dts is None:<EOL><INDENT>dts = [self._dt] * z_n<EOL><DEDENT>if self.x.ndim == <NUM_LIT:1>:<EOL><INDENT>means = zeros((z_n, self._dim_x))<EOL><DEDENT>else:<EOL><INDENT>means = zeros((z_n, self._dim_x, <NUM_LIT:1>))<EOL><DEDENT>covariances = zeros((z_n, self._dim_x, self._dim_x))<EOL>for i, (z, r, dt) in enumerate(zip(zs, Rs, dts)):<EOL><INDENT>self.predict(dt=dt, UT=UT)<EOL>self.update(z, r, UT=UT)<EOL>means[i, :] = self.x<EOL>covariances[i, :, :] = self.P<EOL>if saver is not None:<EOL><INDENT>saver.save()<EOL><DEDENT><DEDENT>return (means, covariances)<EOL>
Performs the UKF filter over the list of measurement in `zs`. Parameters ---------- zs : list-like list of measurements at each time step `self._dt` Missing measurements must be represented by 'None'. Rs : None, np.array or list-like, default=None optional list of values to use for the measurement error covariance R. If Rs is None then self.R is used for all epochs. If it is a list of matrices or a 3D array where len(Rs) == len(zs), then it is treated as a list of R values, one per epoch. This allows you to have varying R per epoch. dts : None, scalar or list-like, default=None optional value or list of delta time to be passed into predict. If dtss is None then self.dt is used for all epochs. If it is a list where len(dts) == len(zs), then it is treated as a list of dt values, one per epoch. This allows you to have varying epoch durations. UT : function(sigmas, Wm, Wc, noise_cov), optional Optional function to compute the unscented transform for the sigma points passed through hx. Typically the default function will work - you can use x_mean_fn and z_mean_fn to alter the behavior of the unscented transform. saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- means: ndarray((n,dim_x,1)) array of the state for each time step after the update. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance: ndarray((n,dim_x,dim_x)) array of the covariances for each time step after the update. In other words `covariance[k,:,:]` is the covariance at step `k`. Examples -------- .. code-block:: Python # this example demonstrates tracking a measurement where the time # between measurement varies, as stored in dts The output is then smoothed # with an RTS smoother. zs = [t + random.randn()*4 for t in range (40)] (mu, cov, _, _) = ukf.batch_filter(zs, dts=dts) (xs, Ps, Ks) = ukf.rts_smoother(mu, cov)
f681:c0:m5
def rts_smoother(self, Xs, Ps, Qs=None, dts=None, UT=None):
<EOL>if len(Xs) != len(Ps):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>n, dim_x = Xs.shape<EOL>if dts is None:<EOL><INDENT>dts = [self._dt] * n<EOL><DEDENT>elif isscalar(dts):<EOL><INDENT>dts = [dts] * n<EOL><DEDENT>if Qs is None:<EOL><INDENT>Qs = [self.Q] * n<EOL><DEDENT>if UT is None:<EOL><INDENT>UT = unscented_transform<EOL><DEDENT>Ks = zeros((n, dim_x, dim_x))<EOL>num_sigmas = self._num_sigmas<EOL>xs, ps = Xs.copy(), Ps.copy()<EOL>sigmas_f = zeros((num_sigmas, dim_x))<EOL>for k in reversed(range(n-<NUM_LIT:1>)):<EOL><INDENT>sigmas = self.points_fn.sigma_points(xs[k], ps[k])<EOL>for i in range(num_sigmas):<EOL><INDENT>sigmas_f[i] = self.fx(sigmas[i], dts[k])<EOL><DEDENT>xb, Pb = UT(<EOL>sigmas_f, self.Wm, self.Wc, self.Q,<EOL>self.x_mean, self.residual_x)<EOL>Pxb = <NUM_LIT:0><EOL>for i in range(num_sigmas):<EOL><INDENT>y = self.residual_x(sigmas_f[i], xb)<EOL>z = self.residual_x(sigmas[i], Xs[k])<EOL>Pxb += self.Wc[i] * outer(z, y)<EOL><DEDENT>K = dot(Pxb, self.inv(Pb))<EOL>xs[k] += dot(K, self.residual_x(xs[k+<NUM_LIT:1>], xb))<EOL>ps[k] += dot(K, ps[k+<NUM_LIT:1>] - Pb).dot(K.T)<EOL>Ks[k] = K<EOL><DEDENT>return (xs, ps, Ks)<EOL>
Runs the Rauch-Tung-Striebal Kalman smoother on a set of means and covariances computed by the UKF. The usual input would come from the output of `batch_filter()`. Parameters ---------- Xs : numpy.array array of the means (state variable x) of the output of a Kalman filter. Ps : numpy.array array of the covariances of the output of a kalman filter. Qs: list-like collection of numpy.array, optional Process noise of the Kalman filter at each time step. Optional, if not provided the filter's self.Q will be used dt : optional, float or array-like of float If provided, specifies the time step of each step of the filter. If float, then the same time step is used for all steps. If an array, then each element k contains the time at step k. Units are seconds. UT : function(sigmas, Wm, Wc, noise_cov), optional Optional function to compute the unscented transform for the sigma points passed through hx. Typically the default function will work - you can use x_mean_fn and z_mean_fn to alter the behavior of the unscented transform. Returns ------- x : numpy.ndarray smoothed means P : numpy.ndarray smoothed state covariances K : numpy.ndarray smoother gain at each step Examples -------- .. code-block:: Python zs = [t + random.randn()*4 for t in range (40)] (mu, cov, _, _) = kalman.batch_filter(zs) (x, P, K) = rts_smoother(mu, cov, fk.F, fk.Q)
f681:c0:m6
@property<EOL><INDENT>def log_likelihood(self):<DEDENT>
if self._log_likelihood is None:<EOL><INDENT>self._log_likelihood = logpdf(x=self.y, cov=self.S)<EOL><DEDENT>return self._log_likelihood<EOL>
log-likelihood of the last measurement.
f681:c0:m7
@property<EOL><INDENT>def likelihood(self):<DEDENT>
if self._likelihood is None:<EOL><INDENT>self._likelihood = exp(self.log_likelihood)<EOL>if self._likelihood == <NUM_LIT:0>:<EOL><INDENT>self._likelihood = sys.float_info.min<EOL><DEDENT><DEDENT>return self._likelihood<EOL>
Computed from the log-likelihood. The log-likelihood can be very small, meaning a large negative value such as -28000. Taking the exp() of that results in 0.0, which can break typical algorithms which multiply by this value, so by default we always return a number >= sys.float_info.min.
f681:c0:m8
@property<EOL><INDENT>def mahalanobis(self):<DEDENT>
if self._mahalanobis is None:<EOL><INDENT>self._mahalanobis = sqrt(float(dot(dot(self.y.T, self.SI), self.y)))<EOL><DEDENT>return self._mahalanobis<EOL>
Mahalanobis distance of measurement. E.g. 3 means measurement was 3 standard deviations away from the predicted value. Returns ------- mahalanobis : float
f681:c0:m9
def update(self, z, R2=None):
if z is None:<EOL><INDENT>self.z = np.array([[None]*self.dim_z]).T<EOL>self.x_post = self.x.copy()<EOL>self._P1_2_post = np.copy(self._P1_2)<EOL>return<EOL><DEDENT>if R2 is None:<EOL><INDENT>R2 = self._R1_2<EOL><DEDENT>elif np.isscalar(R2):<EOL><INDENT>R2 = eye(self.dim_z) * R2<EOL><DEDENT>dim_z = self.dim_z<EOL>M = self.M<EOL>M[<NUM_LIT:0>:dim_z, <NUM_LIT:0>:dim_z] = R2.T<EOL>M[dim_z:, <NUM_LIT:0>:dim_z] = dot(self.H, self._P1_2).T<EOL>M[dim_z:, dim_z:] = self._P1_2.T<EOL>_, self.S = qr(M)<EOL>self.K = self.S[<NUM_LIT:0>:dim_z, dim_z:].T<EOL>N = self.S[<NUM_LIT:0>:dim_z, <NUM_LIT:0>:dim_z].T<EOL>self.y = z - dot(self.H, self.x)<EOL>self.x += dot(self.K, pinv(N)).dot(self.y)<EOL>self._P1_2 = self.S[dim_z:, dim_z:].T<EOL>self.z = deepcopy(z)<EOL>self.x_post = self.x.copy()<EOL>self._P1_2_post = np.copy(self._P1_2)<EOL>
Add a new measurement (z) to the kalman filter. If z is None, nothing is changed. Parameters ---------- z : np.array measurement for this update. R2 : np.array, scalar, or None Sqrt of meaaurement noize. Optionally provide to override the measurement noise for this one call, otherwise self.R2 will be used.
f682:c0:m1
def predict(self, u=<NUM_LIT:0>):
<EOL>self.x = dot(self.F, self.x) + dot(self.B, u)<EOL>_, P2 = qr(np.hstack([dot(self.F, self._P1_2), self._Q1_2]).T)<EOL>self._P1_2 = P2[:self.dim_x, :self.dim_x].T<EOL>self.x_prior = np.copy(self.x)<EOL>self._P1_2_prior = np.copy(self._P1_2)<EOL>
Predict next state (prior) using the Kalman filter state propagation equations. Parameters ---------- u : np.array, optional Optional control vector. If non-zero, it is multiplied by B to create the control input into the system.
f682:c0:m2
def residual_of(self, z):
return z - dot(self.H, self.x)<EOL>
returns the residual for the given measurement (z). Does not alter the state of the filter.
f682:c0:m3
def measurement_of_state(self, x):
return dot(self.H, x)<EOL>
Helper function that converts a state into a measurement. Parameters ---------- x : np.array kalman state vector Returns ------- z : np.array measurement corresponding to the given state
f682:c0:m4
@property<EOL><INDENT>def Q(self):<DEDENT>
return dot(self._Q1_2.T, self._Q1_2)<EOL>
Process uncertainty
f682:c0:m5
@property<EOL><INDENT>def Q1_2(self):<DEDENT>
return self._Q1_2<EOL>
Sqrt Process uncertainty
f682:c0:m6
@Q.setter<EOL><INDENT>def Q(self, value):<DEDENT>
self._Q = value<EOL>self._Q1_2 = cholesky(self._Q, lower=True)<EOL>
Process uncertainty
f682:c0:m7
@property<EOL><INDENT>def P(self):<DEDENT>
return dot(self._P1_2.T, self._P1_2)<EOL>
covariance matrix
f682:c0:m8
@property<EOL><INDENT>def P_prior(self):<DEDENT>
return dot(self._P1_2_prior.T, self._P1_2_prior)<EOL>
covariance matrix of the prior
f682:c0:m9
@property<EOL><INDENT>def P_post(self):<DEDENT>
return dot(self._P1_2_prior.T, self._P1_2_prior)<EOL>
covariance matrix of the posterior
f682:c0:m10
@property<EOL><INDENT>def P1_2(self):<DEDENT>
return self._P1_2<EOL>
sqrt of covariance matrix
f682:c0:m11
@P.setter<EOL><INDENT>def P(self, value):<DEDENT>
self._P = value<EOL>self._P1_2 = cholesky(self._P, lower=True)<EOL>
covariance matrix
f682:c0:m12
@property<EOL><INDENT>def R(self):<DEDENT>
return dot(self._R1_2.T, self._R1_2)<EOL>
measurement uncertainty
f682:c0:m13
@property<EOL><INDENT>def R1_2(self):<DEDENT>
return self._R1_2<EOL>
sqrt of measurement uncertainty
f682:c0:m14
@R.setter<EOL><INDENT>def R(self, value):<DEDENT>
self._R = value<EOL>self._R1_2 = cholesky(self._R, lower=True)<EOL>
measurement uncertainty
f682:c0:m15
def initialize(self, x, P):
if x.ndim != <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.sigmas = multivariate_normal(mean=x, cov=P, size=self.N)<EOL>self.x = x<EOL>self.P = P<EOL>self.x_prior = self.x.copy()<EOL>self.P_prior = self.P.copy()<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>
Initializes the filter with the specified mean and covariance. Only need to call this if you are using the filter to filter more than one set of data; this is called by __init__ Parameters ---------- x : np.array(dim_z) state mean P : np.array((dim_x, dim_x)) covariance of the state
f684:c0:m1
def update(self, z, R=None):
if z is None:<EOL><INDENT>self.z = array([[None]*self.dim_z]).T<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>return<EOL><DEDENT>if R is None:<EOL><INDENT>R = self.R<EOL><DEDENT>if np.isscalar(R):<EOL><INDENT>R = eye(self.dim_z) * R<EOL><DEDENT>N = self.N<EOL>dim_z = len(z)<EOL>sigmas_h = zeros((N, dim_z))<EOL>for i in range(N):<EOL><INDENT>sigmas_h[i] = self.hx(self.sigmas[i])<EOL><DEDENT>z_mean = np.mean(sigmas_h, axis=<NUM_LIT:0>)<EOL>P_zz = (outer_product_sum(sigmas_h - z_mean) / (N-<NUM_LIT:1>)) + R<EOL>P_xz = outer_product_sum(<EOL>self.sigmas - self.x, sigmas_h - z_mean) / (N - <NUM_LIT:1>)<EOL>self.S = P_zz<EOL>self.SI = self.inv(self.S)<EOL>self.K = dot(P_xz, self.SI)<EOL>e_r = multivariate_normal(self._mean_z, R, N)<EOL>for i in range(N):<EOL><INDENT>self.sigmas[i] += dot(self.K, z + e_r[i] - sigmas_h[i])<EOL><DEDENT>self.x = np.mean(self.sigmas, axis=<NUM_LIT:0>)<EOL>self.P = self.P - dot(dot(self.K, self.S), self.K.T)<EOL>self.z = deepcopy(z)<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>
Add a new measurement (z) to the kalman filter. If z is None, nothing is changed. Parameters ---------- z : np.array measurement for this update. R : np.array, scalar, or None Optionally provide R to override the measurement noise for this one call, otherwise self.R will be used.
f684:c0:m2
def predict(self):
N = self.N<EOL>for i, s in enumerate(self.sigmas):<EOL><INDENT>self.sigmas[i] = self.fx(s, self.dt)<EOL><DEDENT>e = multivariate_normal(self._mean, self.Q, N)<EOL>self.sigmas += e<EOL>self.x = np.mean(self.sigmas, axis=<NUM_LIT:0>)<EOL>self.P = outer_product_sum(self.sigmas - self.x) / (N - <NUM_LIT:1>)<EOL>self.x_prior = np.copy(self.x)<EOL>self.P_prior = np.copy(self.P)<EOL>
Predict next position.
f684:c0:m3
def unscented_transform(sigmas, Wm, Wc, noise_cov=None,<EOL>mean_fn=None, residual_fn=None):
kmax, n = sigmas.shape<EOL>try:<EOL><INDENT>if mean_fn is None:<EOL><INDENT>x = np.dot(Wm, sigmas) <EOL><DEDENT>else:<EOL><INDENT>x = mean_fn(sigmas, Wm)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>print(sigmas)<EOL>raise<EOL><DEDENT>if residual_fn is np.subtract or residual_fn is None:<EOL><INDENT>y = sigmas - x[np.newaxis, :]<EOL>P = np.dot(y.T, np.dot(np.diag(Wc), y))<EOL><DEDENT>else:<EOL><INDENT>P = np.zeros((n, n))<EOL>for k in range(kmax):<EOL><INDENT>y = residual_fn(sigmas[k], x)<EOL>P += Wc[k] * np.outer(y, y)<EOL><DEDENT><DEDENT>if noise_cov is not None:<EOL><INDENT>P += noise_cov<EOL><DEDENT>return (x, P)<EOL>
r""" Computes unscented transform of a set of sigma points and weights. returns the mean and covariance in a tuple. This works in conjunction with the UnscentedKalmanFilter class. Parameters ---------- sigmas: ndarray, of size (n, 2n+1) 2D array of sigma points. Wm : ndarray [# sigmas per dimension] Weights for the mean. Wc : ndarray [# sigmas per dimension] Weights for the covariance. noise_cov : ndarray, optional noise matrix added to the final computed covariance matrix. mean_fn : callable (sigma_points, weights), optional Function that computes the mean of the provided sigma points and weights. Use this if your state variable contains nonlinear values such as angles which cannot be summed. .. code-block:: Python def state_mean(sigmas, Wm): x = np.zeros(3) sum_sin, sum_cos = 0., 0. for i in range(len(sigmas)): s = sigmas[i] x[0] += s[0] * Wm[i] x[1] += s[1] * Wm[i] sum_sin += sin(s[2])*Wm[i] sum_cos += cos(s[2])*Wm[i] x[2] = atan2(sum_sin, sum_cos) return x residual_fn : callable (x, y), optional Function that computes the residual (difference) between x and y. You will have to supply this if your state variable cannot support subtraction, such as angles (359-1 degreees is 2, not 358). x and y are state vectors, not scalars. .. code-block:: Python def residual(a, b): y = a[0] - b[0] y = y % (2 * np.pi) if y > np.pi: y -= 2*np.pi return y Returns ------- x : ndarray [dimension] Mean of the sigma points after passing through the transform. P : ndarray covariance of the sigma points after passing throgh the transform. Examples -------- See my book Kalman and Bayesian Filters in Python https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
f685:m0
def __init__(self, dim_x, dim_z, N=None):
self.dim_x = dim_x<EOL>self.dim_z = dim_z<EOL>self.N = N<EOL>self.x = zeros((dim_x, <NUM_LIT:1>)) <EOL>self.x_s = zeros((dim_x, <NUM_LIT:1>)) <EOL>self.P = eye(dim_x) <EOL>self.Q = eye(dim_x) <EOL>self.F = eye(dim_x) <EOL>self.H = eye(dim_z, dim_x) <EOL>self.R = eye(dim_z) <EOL>self.K = zeros((dim_x, <NUM_LIT:1>)) <EOL>self.y = zeros((dim_z, <NUM_LIT:1>))<EOL>self.B = <NUM_LIT:0.><EOL>self.S = zeros((dim_z, dim_z))<EOL>self._I = np.eye(dim_x)<EOL>self.count = <NUM_LIT:0><EOL>if N is not None:<EOL><INDENT>self.xSmooth = []<EOL><DEDENT>
Create a fixed lag Kalman filter smoother. You are responsible for setting the various state variables to reasonable values; the defaults below will not give you a functional filter. Parameters ---------- dim_x : int Number of state variables for the Kalman filter. For example, if you are tracking the position and velocity of an object in two dimensions, dim_x would be 4. This is used to set the default size of P, Q, and u dim_z : int Number of of measurement inputs. For example, if the sensor provides you with position in (x,y), dim_z would be 2. N : int, optional If provided, the size of the lag. Not needed if you are only using smooth_batch() function. Required if calling smooth()
f686:c0:m0
def smooth(self, z, u=None):
<EOL>H = self.H<EOL>R = self.R<EOL>F = self.F<EOL>P = self.P<EOL>x = self.x<EOL>Q = self.Q<EOL>B = self.B<EOL>N = self.N<EOL>k = self.count<EOL>x_pre = dot(F, x)<EOL>if u is not None:<EOL><INDENT>x_pre += dot(B, u)<EOL><DEDENT>P = dot(F, P).dot(F.T) + Q<EOL>self.y = z - dot(H, x_pre)<EOL>self.S = dot(H, P).dot(H.T) + R<EOL>SI = inv(self.S)<EOL>K = dot(P, H.T).dot(SI)<EOL>x = x_pre + dot(K, self.y)<EOL>I_KH = self._I - dot(K, H)<EOL>P = dot(I_KH, P).dot(I_KH.T) + dot(K, R).dot(K.T)<EOL>self.xSmooth.append(x_pre.copy())<EOL>HTSI = dot(H.T, SI)<EOL>F_LH = (F - dot(K, H)).T<EOL>if k >= N:<EOL><INDENT>PS = P.copy() <EOL>for i in range(N):<EOL><INDENT>K = dot(PS, HTSI) <EOL>PS = dot(PS, F_LH) <EOL>si = k-i<EOL>self.xSmooth[si] = self.xSmooth[si] + dot(K, self.y)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.xSmooth[k] = x.copy()<EOL><DEDENT>self.count += <NUM_LIT:1><EOL>self.x = x<EOL>self.P = P<EOL>
Smooths the measurement using a fixed lag smoother. On return, self.xSmooth is populated with the N previous smoothed estimates, where self.xSmooth[k] is the kth time step. self.x merely contains the current Kalman filter output of the most recent measurement, and is not smoothed at all (beyond the normal Kalman filter processing). self.xSmooth grows in length on each call. If you run this 1 million times, it will contain 1 million elements. Sure, we could minimize this, but then this would make the caller's code much more cumbersome. This also means that you cannot use this filter to track more than one data set; as data will be hopelessly intermingled. If you want to filter something else, create a new FixedLagSmoother object. Parameters ---------- z : ndarray or scalar measurement to be smoothed u : ndarray, optional If provided, control input to the filter
f686:c0:m1
def smooth_batch(self, zs, N, us=None):
<EOL>H = self.H<EOL>R = self.R<EOL>F = self.F<EOL>P = self.P<EOL>x = self.x<EOL>Q = self.Q<EOL>B = self.B<EOL>if x.ndim == <NUM_LIT:1>:<EOL><INDENT>xSmooth = zeros((len(zs), self.dim_x))<EOL>xhat = zeros((len(zs), self.dim_x))<EOL><DEDENT>else:<EOL><INDENT>xSmooth = zeros((len(zs), self.dim_x, <NUM_LIT:1>))<EOL>xhat = zeros((len(zs), self.dim_x, <NUM_LIT:1>))<EOL><DEDENT>for k, z in enumerate(zs):<EOL><INDENT>x_pre = dot(F, x)<EOL>if us is not None:<EOL><INDENT>x_pre += dot(B, us[k])<EOL><DEDENT>P = dot(F, P).dot(F.T) + Q<EOL>y = z - dot(H, x_pre)<EOL>S = dot(H, P).dot(H.T) + R<EOL>SI = inv(S)<EOL>K = dot(P, H.T).dot(SI)<EOL>x = x_pre + dot(K, y)<EOL>I_KH = self._I - dot(K, H)<EOL>P = dot(I_KH, P).dot(I_KH.T) + dot(K, R).dot(K.T)<EOL>xhat[k] = x.copy()<EOL>xSmooth[k] = x_pre.copy()<EOL>HTSI = dot(H.T, SI)<EOL>F_LH = (F - dot(K, H)).T<EOL>if k >= N:<EOL><INDENT>PS = P.copy() <EOL>for i in range(N):<EOL><INDENT>K = dot(PS, HTSI) <EOL>PS = dot(PS, F_LH) <EOL>si = k-i<EOL>xSmooth[si] = xSmooth[si] + dot(K, y)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>xSmooth[k] = xhat[k]<EOL><DEDENT><DEDENT>return xSmooth, xhat<EOL>
batch smooths the set of measurements using a fixed lag smoother. I consider this function a somewhat pedalogical exercise; why would you not use a RTS smoother if you are able to batch process your data? Hint: RTS is a much better smoother, and faster besides. Use it. This is a batch processor, so it does not alter any of the object's data. In particular, self.x is NOT modified. All date is returned by the function. Parameters ---------- zs : ndarray of measurements iterable list (usually ndarray, but whatever works for you) of measurements that you want to smooth, one per time step. N : int size of fixed lag in time steps us : ndarray, optional If provided, control input to the filter for each time step Returns ------- (xhat_smooth, xhat) : ndarray, ndarray xhat_smooth is the output of the N step fix lag smoother xhat is the filter output of the standard Kalman filter
f686:c0:m2
def update(self, z, R=None):
if z is None:<EOL><INDENT>self.z = np.array([[None]*self.dim_z]).T<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>return<EOL><DEDENT>if R is None:<EOL><INDENT>R = self.R<EOL><DEDENT>elif np.isscalar(R):<EOL><INDENT>R = eye(self.dim_z) * R<EOL><DEDENT>self.y = z - dot(self.H, self.x)<EOL>PHT = dot(self.P, self.H.T)<EOL>self.S = dot(self.H, PHT) + R<EOL>self.SI = linalg.inv(self.S)<EOL>self.K = PHT.dot(self.SI)<EOL>self.x = self.x + dot(self.K, self.y)<EOL>I_KH = self.I - dot(self.K, self.H)<EOL>self.P = dot(I_KH, self.P).dot(I_KH.T) + dot(self.K, R).dot(self.K.T)<EOL>self.z = deepcopy(z)<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>self._log_likelihood = None<EOL>self._likelihood = None<EOL>self._mahalanobis = None<EOL>
Add a new measurement (z) to the kalman filter. If z is None, nothing is changed. Parameters ---------- z : np.array measurement for this update. R : np.array, scalar, or None Optionally provide R to override the measurement noise for this one call, otherwise self.R will be used.
f687:c0:m1
def predict(self, u=<NUM_LIT:0>):
<EOL>self.x = dot(self.F, self.x) + dot(self.B, u)<EOL>self.P = self.alpha_sq * dot(self.F, self.P).dot(self.F.T) + self.Q<EOL>self.x_prior = self.x.copy()<EOL>self.P_prior = self.P.copy()<EOL>
Predict next position. Parameters ---------- u : np.array Optional control vector. If non-zero, it is multiplied by B to create the control input into the system.
f687:c0:m2
def batch_filter(self, zs, Rs=None, update_first=False):
n = np.size(zs, <NUM_LIT:0>)<EOL>if Rs is None:<EOL><INDENT>Rs = [None] * n<EOL><DEDENT>means = zeros((n, self.dim_x, <NUM_LIT:1>))<EOL>means_p = zeros((n, self.dim_x, <NUM_LIT:1>))<EOL>covariances = zeros((n, self.dim_x, self.dim_x))<EOL>covariances_p = zeros((n, self.dim_x, self.dim_x))<EOL>if update_first:<EOL><INDENT>for i, (z, r) in enumerate(zip(zs, Rs)):<EOL><INDENT>self.update(z, r)<EOL>means[i, :] = self.x<EOL>covariances[i, :, :] = self.P<EOL>self.predict()<EOL>means_p[i, :] = self.x<EOL>covariances_p[i, :, :] = self.P<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for i, (z, r) in enumerate(zip(zs, Rs)):<EOL><INDENT>self.predict()<EOL>means_p[i, :] = self.x<EOL>covariances_p[i, :, :] = self.P<EOL>self.update(z, r)<EOL>means[i, :] = self.x<EOL>covariances[i, :, :] = self.P<EOL><DEDENT><DEDENT>return (means, covariances, means_p, covariances_p)<EOL>
Batch processes a sequences of measurements. Parameters ---------- zs : list-like list of measurements at each time step `self.dt` Missing measurements must be represented by 'None'. Rs : list-like, optional optional list of values to use for the measurement error covariance; a value of None in any position will cause the filter to use `self.R` for that time step. update_first : bool, optional, controls whether the order of operations is update followed by predict, or predict followed by update. Default is predict->update. Returns ------- means: np.array((n,dim_x,1)) array of the state for each time step after the update. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance: np.array((n,dim_x,dim_x)) array of the covariances for each time step after the update. In other words `covariance[k,:,:]` is the covariance at step `k`. means_predictions: np.array((n,dim_x,1)) array of the state for each time step after the predictions. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance_predictions: np.array((n,dim_x,dim_x)) array of the covariances for each time step after the prediction. In other words `covariance[k,:,:]` is the covariance at step `k`.
f687:c0:m3
def get_prediction(self, u=<NUM_LIT:0>):
x = dot(self.F, self.x) + dot(self.B, u)<EOL>P = self.alpha_sq * dot(self.F, self.P).dot(self.F.T) + self.Q<EOL>return (x, P)<EOL>
Predicts the next state of the filter and returns it. Does not alter the state of the filter. Parameters ---------- u : np.array optional control input Returns ------- (x, P) State vector and covariance array of the prediction.
f687:c0:m4
def residual_of(self, z):
return z - dot(self.H, self.x)<EOL>
returns the residual for the given measurement (z). Does not alter the state of the filter.
f687:c0:m5
def measurement_of_state(self, x):
return dot(self.H, x)<EOL>
Helper function that converts a state into a measurement. Parameters ---------- x : np.array kalman state vector Returns ------- z : np.array measurement corresponding to the given state
f687:c0:m6
@property<EOL><INDENT>def alpha(self):<DEDENT>
return sqrt(self.alpha_sq)<EOL>
scaling factor for fading memory
f687:c0:m7
@property<EOL><INDENT>def log_likelihood(self):<DEDENT>
if self._log_likelihood is None:<EOL><INDENT>self._log_likelihood = logpdf(x=self.y, cov=self.S)<EOL><DEDENT>return self._log_likelihood<EOL>
log-likelihood of the last measurement.
f687:c0:m8
@property<EOL><INDENT>def likelihood(self):<DEDENT>
if self._likelihood is None:<EOL><INDENT>self._likelihood = exp(self.log_likelihood)<EOL>if self._likelihood == <NUM_LIT:0>:<EOL><INDENT>self._likelihood = sys.float_info.min<EOL><DEDENT><DEDENT>return self._likelihood<EOL>
Computed from the log-likelihood. The log-likelihood can be very small, meaning a large negative value such as -28000. Taking the exp() of that results in 0.0, which can break typical algorithms which multiply by this value, so by default we always return a number >= sys.float_info.min.
f687:c0:m9
@property<EOL><INDENT>def mahalanobis(self):<DEDENT>
if self._mahalanobis is None:<EOL><INDENT>self._mahalanobis = sqrt(float(dot(dot(self.y.T, self.SI), self.y)))<EOL><DEDENT>return self._mahalanobis<EOL>
Mahalanobis distance of innovation. E.g. 3 means measurement was 3 standard deviations away from the predicted value. Returns ------- mahalanobis : float
f687:c0:m10
def predict_update(self, z, HJacobian, Hx, args=(), hx_args=(), u=<NUM_LIT:0>):
<EOL>if not isinstance(args, tuple):<EOL><INDENT>args = (args,)<EOL><DEDENT>if not isinstance(hx_args, tuple):<EOL><INDENT>hx_args = (hx_args,)<EOL><DEDENT>if np.isscalar(z) and self.dim_z == <NUM_LIT:1>:<EOL><INDENT>z = np.asarray([z], float)<EOL><DEDENT>F = self.F<EOL>B = self.B<EOL>P = self.P<EOL>Q = self.Q<EOL>R = self.R<EOL>x = self.x<EOL>H = HJacobian(x, *args)<EOL>x = dot(F, x) + dot(B, u)<EOL>P = dot(F, P).dot(F.T) + Q<EOL>self.x_prior = np.copy(self.x)<EOL>self.P_prior = np.copy(self.P)<EOL>PHT = dot(P, H.T)<EOL>self.S = dot(H, PHT) + R<EOL>self.SI = linalg.inv(self.S)<EOL>self.K = dot(PHT, self.SI)<EOL>self.y = z - Hx(x, *hx_args)<EOL>self.x = x + dot(self.K, self.y)<EOL>I_KH = self._I - dot(self.K, H)<EOL>self.P = dot(I_KH, P).dot(I_KH.T) + dot(self.K, R).dot(self.K.T)<EOL>self.z = deepcopy(z)<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>self._log_likelihood = None<EOL>self._likelihood = None<EOL>self._mahalanobis = None<EOL>
Performs the predict/update innovation of the extended Kalman filter. Parameters ---------- z : np.array measurement for this step. If `None`, only predict step is perfomed. HJacobian : function function which computes the Jacobian of the H matrix (measurement function). Takes state variable (self.x) as input, along with the optional arguments in args, and returns H. Hx : function function which takes as input the state variable (self.x) along with the optional arguments in hx_args, and returns the measurement that would correspond to that state. args : tuple, optional, default (,) arguments to be passed into HJacobian after the required state variable. hx_args : tuple, optional, default (,) arguments to be passed into Hx after the required state variable. u : np.array or scalar optional control vector input to the filter.
f688:c0:m1
def update(self, z, HJacobian, Hx, R=None, args=(), hx_args=(),<EOL>residual=np.subtract):
if z is None:<EOL><INDENT>self.z = np.array([[None]*self.dim_z]).T<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>return<EOL><DEDENT>if not isinstance(args, tuple):<EOL><INDENT>args = (args,)<EOL><DEDENT>if not isinstance(hx_args, tuple):<EOL><INDENT>hx_args = (hx_args,)<EOL><DEDENT>if R is None:<EOL><INDENT>R = self.R<EOL><DEDENT>elif np.isscalar(R):<EOL><INDENT>R = eye(self.dim_z) * R<EOL><DEDENT>if np.isscalar(z) and self.dim_z == <NUM_LIT:1>:<EOL><INDENT>z = np.asarray([z], float)<EOL><DEDENT>H = HJacobian(self.x, *args)<EOL>PHT = dot(self.P, H.T)<EOL>self.S = dot(H, PHT) + R<EOL>self.K = PHT.dot(linalg.inv(self.S))<EOL>hx = Hx(self.x, *hx_args)<EOL>self.y = residual(z, hx)<EOL>self.x = self.x + dot(self.K, self.y)<EOL>I_KH = self._I - dot(self.K, H)<EOL>self.P = dot(I_KH, self.P).dot(I_KH.T) + dot(self.K, R).dot(self.K.T)<EOL>self._log_likelihood = None<EOL>self._likelihood = None<EOL>self._mahalanobis = None<EOL>self.z = deepcopy(z)<EOL>self.x_post = self.x.copy()<EOL>self.P_post = self.P.copy()<EOL>
Performs the update innovation of the extended Kalman filter. Parameters ---------- z : np.array measurement for this step. If `None`, posterior is not computed HJacobian : function function which computes the Jacobian of the H matrix (measurement function). Takes state variable (self.x) as input, returns H. Hx : function function which takes as input the state variable (self.x) along with the optional arguments in hx_args, and returns the measurement that would correspond to that state. R : np.array, scalar, or None Optionally provide R to override the measurement noise for this one call, otherwise self.R will be used. args : tuple, optional, default (,) arguments to be passed into HJacobian after the required state variable. for robot localization you might need to pass in information about the map and time of day, so you might have `args=(map_data, time)`, where the signature of HCacobian will be `def HJacobian(x, map, t)` hx_args : tuple, optional, default (,) arguments to be passed into Hx function after the required state variable. residual : function (z, z2), optional Optional function that computes the residual (difference) between the two measurement vectors. If you do not provide this, then the built in minus operator will be used. You will normally want to use the built in unless your residual computation is nonlinear (for example, if they are angles)
f688:c0:m2
def predict_x(self, u=<NUM_LIT:0>):
self.x = dot(self.F, self.x) + dot(self.B, u)<EOL>
Predicts the next state of X. If you need to compute the next state yourself, override this function. You would need to do this, for example, if the usual Taylor expansion to generate F is not providing accurate results for you.
f688:c0:m3
def predict(self, u=<NUM_LIT:0>):
self.predict_x(u)<EOL>self.P = dot(self.F, self.P).dot(self.F.T) + self.Q<EOL>self.x_prior = np.copy(self.x)<EOL>self.P_prior = np.copy(self.P)<EOL>
Predict next state (prior) using the Kalman filter state propagation equations. Parameters ---------- u : np.array Optional control vector. If non-zero, it is multiplied by B to create the control input into the system.
f688:c0:m4
@property<EOL><INDENT>def log_likelihood(self):<DEDENT>
if self._log_likelihood is None:<EOL><INDENT>self._log_likelihood = logpdf(x=self.y, cov=self.S)<EOL><DEDENT>return self._log_likelihood<EOL>
log-likelihood of the last measurement.
f688:c0:m5
@property<EOL><INDENT>def likelihood(self):<DEDENT>
if self._likelihood is None:<EOL><INDENT>self._likelihood = exp(self.log_likelihood)<EOL>if self._likelihood == <NUM_LIT:0>:<EOL><INDENT>self._likelihood = sys.float_info.min<EOL><DEDENT><DEDENT>return self._likelihood<EOL>
Computed from the log-likelihood. The log-likelihood can be very small, meaning a large negative value such as -28000. Taking the exp() of that results in 0.0, which can break typical algorithms which multiply by this value, so by default we always return a number >= sys.float_info.min.
f688:c0:m6
@property<EOL><INDENT>def mahalanobis(self):<DEDENT>
if self._mahalanobis is None:<EOL><INDENT>self._mahalanobis = sqrt(float(dot(dot(self.y.T, self.SI), self.y)))<EOL><DEDENT>return self._mahalanobis<EOL>
Mahalanobis distance of innovation. E.g. 3 means measurement was 3 standard deviations away from the predicted value. Returns ------- mahalanobis : float
f688:c0:m7
def optimal_noise_smoothing(g):
h = ((<NUM_LIT:2>*g**<NUM_LIT:3> - <NUM_LIT:4>*g**<NUM_LIT:2>) + (<NUM_LIT:4>*g**<NUM_LIT:6> -<NUM_LIT:64>*g**<NUM_LIT:5> + <NUM_LIT:64>*g**<NUM_LIT:4>)**<NUM_LIT>) / (<NUM_LIT:8>*(<NUM_LIT:1>-g))<EOL>k = (h*(<NUM_LIT:2>-g) - g**<NUM_LIT:2>) / g<EOL>return (g, h, k)<EOL>
provides g,h,k parameters for optimal smoothing of noise for a given value of g. This is due to Polge and Bhagavan[1]. Parameters ---------- g : float value for g for which we will optimize for Returns ------- (g,h,k) : (float, float, float) values for g,h,k that provide optimal smoothing of noise Examples -------- .. code-block:: Python from filterpy.gh import GHKFilter, optimal_noise_smoothing g,h,k = optimal_noise_smoothing(g) f = GHKFilter(0,0,0,1,g,h,k) f.update(1.) References ---------- [1] Polge and Bhagavan. "A Study of the g-h-k Tracking Filter". Report No. RE-CR-76-1. University of Alabama in Huntsville. July, 1975
f691:m0
def least_squares_parameters(n):
den = (n+<NUM_LIT:2>)*(n+<NUM_LIT:1>)<EOL>g = (<NUM_LIT:2>*(<NUM_LIT:2>*n + <NUM_LIT:1>)) / den<EOL>h = <NUM_LIT:6> / den<EOL>return (g, h)<EOL>
An order 1 least squared filter can be computed by a g-h filter by varying g and h over time according to the formulas below, where the first measurement is at n=0, the second is at n=1, and so on: .. math:: h_n = \\frac{6}{(n+2)(n+1)} g_n = \\frac{2(2n+1)}{(n+2)(n+1)} Parameters ---------- n : int the nth measurement, starting at 0 (i.e. first measurement has n==0) Returns ------- (g,h) : (float, float) g and h parameters for this time step for the least-squares filter Examples -------- .. code-block:: Python from filterpy.gh import GHFilter, least_squares_parameters lsf = GHFilter (0, 0, 1, 0, 0) z = 10 for i in range(10): g,h = least_squares_parameters(i) lsf.update(z, g, h)
f691:m1
def critical_damping_parameters(theta, order=<NUM_LIT:2>):
if theta < <NUM_LIT:0> or theta > <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if order == <NUM_LIT:2>:<EOL><INDENT>return (<NUM_LIT:1.> - theta**<NUM_LIT:2>, (<NUM_LIT:1.> - theta)**<NUM_LIT:2>)<EOL><DEDENT>if order == <NUM_LIT:3>:<EOL><INDENT>return (<NUM_LIT:1.> - theta**<NUM_LIT:3>, <NUM_LIT>*(<NUM_LIT:1.>-theta**<NUM_LIT:2>)*(<NUM_LIT:1.>-theta), <NUM_LIT>*(<NUM_LIT:1> - theta)**<NUM_LIT:3>)<EOL><DEDENT>raise ValueError('<STR_LIT>'.format(order))<EOL>
Computes values for g and h (and k for g-h-k filter) for a critically damped filter. The idea here is to create a filter that reduces the influence of old data as new data comes in. This allows the filter to track a moving target better. This goes by different names. It may be called the discounted least-squares g-h filter, a fading-memory polynomal filter of order 1, or a critically damped g-h filter. In a normal least-squares filter we compute the error for each point as .. math:: \epsilon_t = (z-\\hat{x})^2 For a crically damped filter we reduce the influence of each error by .. math:: \\theta^{t-i} where .. math:: 0 <= \\theta <= 1 In other words the last error is scaled by theta, the next to last by theta squared, the next by theta cubed, and so on. Parameters ---------- theta : float, 0 <= theta <= 1 scaling factor for previous terms order : int, 2 (default) or 3 order of filter to create the parameters for. g and h will be calculated for the order 2, and g, h, and k for order 3. Returns ------- g : scalar optimal value for g in the g-h or g-h-k filter h : scalar optimal value for h in the g-h or g-h-k filter k : scalar optimal value for g in the g-h-k filter Examples -------- .. code-block:: Python from filterpy.gh import GHFilter, critical_damping_parameters g,h = critical_damping_parameters(0.3) critical_filter = GHFilter(0, 0, 1, g, h) References ---------- Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and Sons, 1998. Polge and Bhagavan. "A Study of the g-h-k Tracking Filter". Report No. RE-CR-76-1. University of Alabama in Huntsville. July, 1975
f691:m2
def benedict_bornder_constants(g, critical=False):
g_sqr = g**<NUM_LIT:2><EOL>if critical:<EOL><INDENT>return (g, <NUM_LIT> * (<NUM_LIT> - g_sqr - <NUM_LIT:2>*(<NUM_LIT:1>-g_sqr)**<NUM_LIT>) / g_sqr)<EOL><DEDENT>return (g, g_sqr / (<NUM_LIT>-g))<EOL>
Computes the g,h constants for a Benedict-Bordner filter, which minimizes transient errors for a g-h filter. Returns the values g,h for a specified g. Strictly speaking, only h is computed, g is returned unchanged. The default formula for the Benedict-Bordner allows ringing. We can "nearly" critically damp it; ringing will be reduced, but not entirely eliminated at the cost of reduced performance. Parameters ---------- g : float scaling factor g for the filter critical : boolean, default False Attempts to critically damp the filter. Returns ------- g : float scaling factor g (same as the g that was passed in) h : float scaling factor h that minimizes the transient errors Examples -------- .. code-block:: Python from filterpy.gh import GHFilter, benedict_bornder_constants g, h = benedict_bornder_constants(.855) f = GHFilter(0, 0, 1, g, h) References ---------- Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and Sons, 1998.
f691:m3
def __init__(self, x0, dt, order, g, h=None, k=None):
if order < <NUM_LIT:0> or order > <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if np.isscalar(x0):<EOL><INDENT>self.x = np.zeros(order+<NUM_LIT:1>)<EOL>self.x[<NUM_LIT:0>] = x0<EOL><DEDENT>else:<EOL><INDENT>self.x = np.copy(x0.astype(float))<EOL><DEDENT>self.dt = dt<EOL>self.order = order<EOL>self.g = g<EOL>self.h = h<EOL>self.k = k<EOL>self.y = np.zeros(len(self.x)) <EOL>self.z = np.zeros(len(self.x))<EOL>
Creates a g-h filter of order 0, 1, or 2.
f691:c0:m0
def update(self, z, g=None, h=None, k=None):
if self.order == <NUM_LIT:0>:<EOL><INDENT>if g is None:<EOL><INDENT>g = self.g<EOL><DEDENT>self.y = z - self.x[<NUM_LIT:0>]<EOL>self.x += dot(g, self.y)<EOL><DEDENT>elif self.order == <NUM_LIT:1>:<EOL><INDENT>if g is None:<EOL><INDENT>g = self.g<EOL><DEDENT>if h is None:<EOL><INDENT>h = self.h<EOL><DEDENT>x = self.x[<NUM_LIT:0>]<EOL>dx = self.x[<NUM_LIT:1>]<EOL>dxdt = dot(dx, self.dt)<EOL>self.y = z - (x + dxdt)<EOL>self.x[<NUM_LIT:0>] = x + dxdt + g*self.y<EOL>self.x[<NUM_LIT:1>] = dx + h*self.y / self.dt<EOL>self.z = z<EOL><DEDENT>else: <EOL><INDENT>if g is None:<EOL><INDENT>g = self.g<EOL><DEDENT>if h is None:<EOL><INDENT>h = self.h<EOL><DEDENT>if k is None:<EOL><INDENT>k = self.k<EOL><DEDENT>x = self.x[<NUM_LIT:0>]<EOL>dx = self.x[<NUM_LIT:1>]<EOL>ddx = self.x[<NUM_LIT:2>]<EOL>dxdt = dot(dx, self.dt)<EOL>T2 = self.dt**<NUM_LIT><EOL>self.y = z -(x + dxdt +<NUM_LIT:0.5>*ddx*T2)<EOL>self.x[<NUM_LIT:0>] = x + dxdt + <NUM_LIT:0.5>*ddx*T2 + g*self.y<EOL>self.x[<NUM_LIT:1>] = dx + ddx*self.dt + h*self.y / self.dt<EOL>self.x[<NUM_LIT:2>] = ddx + <NUM_LIT:2>*k*self.y / (self.dt**<NUM_LIT:2>)<EOL><DEDENT>
Update the filter with measurement z. z must be the same type or treatable as the same type as self.x[0].
f691:c0:m1
def update(self, z, g=None, h=None):
if g is None:<EOL><INDENT>g = self.g<EOL><DEDENT>if h is None:<EOL><INDENT>h = self.h<EOL><DEDENT>self.dx_prediction = self.dx<EOL>self.x_prediction = self.x + (self.dx*self.dt)<EOL>self.y = z - self.x_prediction<EOL>self.dx = self.dx_prediction + h * self.y / self.dt<EOL>self.x = self.x_prediction + g * self.y<EOL>return (self.x, self.dx)<EOL>
performs the g-h filter predict and update step on the measurement z. Modifies the member variables listed below, and returns the state of x and dx as a tuple as a convienence. **Modified Members** x filtered state variable dx derivative (velocity) of x residual difference between the measurement and the prediction for x x_prediction predicted value of x before incorporating the measurement z. dx_prediction predicted value of the derivative of x before incorporating the measurement z. Parameters ---------- z : any the measurement g : scalar (optional) Override the fixed self.g value for this update h : scalar (optional) Override the fixed self.h value for this update Returns ------- x filter output for x dx filter output for dx (derivative of x
f691:c1:m1
def batch_filter(self, data, save_predictions=False, saver=None):
x = self.x<EOL>dx = self.dx<EOL>n = len(data)<EOL>results = np.zeros((n+<NUM_LIT:1>, <NUM_LIT:2>))<EOL>results[<NUM_LIT:0>, <NUM_LIT:0>] = x<EOL>results[<NUM_LIT:0>, <NUM_LIT:1>] = dx<EOL>if save_predictions:<EOL><INDENT>predictions = np.zeros(n)<EOL><DEDENT>h_dt = self.h / self.dt<EOL>for i, z in enumerate(data):<EOL><INDENT>x_est = x + (dx * self.dt)<EOL>residual = z - x_est<EOL>dx = dx + h_dt * residual <EOL>x = x_est + self.g * residual<EOL>results[i+<NUM_LIT:1>, <NUM_LIT:0>] = x<EOL>results[i+<NUM_LIT:1>, <NUM_LIT:1>] = dx<EOL>if save_predictions:<EOL><INDENT>predictions[i] = x_est<EOL><DEDENT>if saver is not None:<EOL><INDENT>saver.save()<EOL><DEDENT><DEDENT>if save_predictions:<EOL><INDENT>return results, predictions<EOL><DEDENT>return results<EOL>
Given a sequenced list of data, performs g-h filter with a fixed g and h. See update() if you need to vary g and/or h. Uses self.x and self.dx to initialize the filter, but DOES NOT alter self.x and self.dx during execution, allowing you to use this class multiple times without reseting self.x and self.dx. I'm not sure how often you would need to do that, but the capability is there. More exactly, none of the class member variables are modified by this function, in distinct contrast to update(), which changes most of them. Parameters ---------- data : list like contains the data to be filtered. save_predictions : boolean the predictions will be saved and returned if this is true saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- results : np.array shape (n+1, 2), where n=len(data) contains the results of the filter, where results[i,0] is x , and results[i,1] is dx (derivative of x) First entry is the initial values of x and dx as set by __init__. predictions : np.array shape(n), optional the predictions for each step in the filter. Only retured if save_predictions == True
f691:c1:m2