Skip to content

Commit c9a9e48

Browse files
committed
remove np.matrix
1 parent 91d6f0e commit c9a9e48

File tree

2 files changed

+77
-81
lines changed

2 files changed

+77
-81
lines changed

SLAM/FastSLAM1/fast_slam1.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,8 @@ def compute_jacobians(particle, xf, Pf, Q):
132132
d2 = dx**2 + dy**2
133133
d = math.sqrt(d2)
134134

135-
zp = np.array([[d, pi_2_pi(math.atan2(dy, dx) - particle.yaw)]]).T
135+
zp = np.array(
136+
[d, pi_2_pi(math.atan2(dy, dx) - particle.yaw)]).reshape(2, 1)
136137

137138
Hv = np.array([[-dx / d, -dy / d, 0.0],
138139
[dy / d2, -dx / d2, -1.0]])

SLAM/FastSLAM2/fast_slam2.py

Lines changed: 75 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
STATE_SIZE = 3 # State size [x,y,yaw]
2828
LM_SIZE = 2 # LM srate size [x,y]
2929
N_PARTICLE = 100 # number of particle
30-
NTH = N_PARTICLE / 1.0 # Number of particle for re-sampling
30+
NTH = N_PARTICLE / 1.5 # Number of particle for re-sampling
3131

3232
show_animation = True
3333

@@ -41,9 +41,9 @@ def __init__(self, N_LM):
4141
self.yaw = 0.0
4242
self.P = np.eye(3)
4343
# landmark x-y positions
44-
self.lm = np.matrix(np.zeros((N_LM, LM_SIZE)))
44+
self.lm = np.zeros((N_LM, LM_SIZE))
4545
# landmark position covariance
46-
self.lmP = np.matrix(np.zeros((N_LM * LM_SIZE, LM_SIZE)))
46+
self.lmP = np.zeros((N_LM * LM_SIZE, LM_SIZE))
4747

4848

4949
def fast_slam2(particles, u, z):
@@ -97,7 +97,7 @@ def predict_particles(particles, u):
9797
px[0, 0] = particles[i].x
9898
px[1, 0] = particles[i].y
9999
px[2, 0] = particles[i].yaw
100-
ud = u + (np.matrix(np.random.randn(1, 2)) * R).T # add noise
100+
ud = u + (np.random.randn(1, 2) @ R).T # add noise
101101
px = motion_model(px, ud)
102102
particles[i].x = px[0, 0]
103103
particles[i].y = px[1, 0]
@@ -108,9 +108,9 @@ def predict_particles(particles, u):
108108

109109
def add_new_lm(particle, z, Q):
110110

111-
r = z[0, 0]
112-
b = z[0, 1]
113-
lm_id = int(z[0, 2])
111+
r = z[0]
112+
b = z[1]
113+
lm_id = int(z[2])
114114

115115
s = math.sin(pi_2_pi(particle.yaw + b))
116116
c = math.cos(pi_2_pi(particle.yaw + b))
@@ -119,10 +119,10 @@ def add_new_lm(particle, z, Q):
119119
particle.lm[lm_id, 1] = particle.y + r * s
120120

121121
# covariance
122-
Gz = np.matrix([[c, -r * s],
123-
[s, r * c]])
122+
Gz = np.array([[c, -r * s],
123+
[s, r * c]])
124124

125-
particle.lmP[2 * lm_id:2 * lm_id + 2] = Gz * Q * Gz.T
125+
particle.lmP[2 * lm_id:2 * lm_id + 2] = Gz @ Q @ Gz.T
126126

127127
return particle
128128

@@ -133,44 +133,45 @@ def compute_jacobians(particle, xf, Pf, Q):
133133
d2 = dx**2 + dy**2
134134
d = math.sqrt(d2)
135135

136-
zp = np.matrix([[d, pi_2_pi(math.atan2(dy, dx) - particle.yaw)]]).T
136+
zp = np.array(
137+
[d, pi_2_pi(math.atan2(dy, dx) - particle.yaw)]).reshape(2, 1)
137138

138-
Hv = np.matrix([[-dx / d, -dy / d, 0.0],
139-
[dy / d2, -dx / d2, -1.0]])
139+
Hv = np.array([[-dx / d, -dy / d, 0.0],
140+
[dy / d2, -dx / d2, -1.0]])
140141

141-
Hf = np.matrix([[dx / d, dy / d],
142-
[-dy / d2, dx / d2]])
142+
Hf = np.array([[dx / d, dy / d],
143+
[-dy / d2, dx / d2]])
143144

144-
Sf = Hf * Pf * Hf.T + Q
145+
Sf = Hf @ Pf @ Hf.T + Q
145146

146147
return zp, Hv, Hf, Sf
147148

148149

149150
def update_KF_with_cholesky(xf, Pf, v, Q, Hf):
150-
PHt = Pf * Hf.T
151-
S = Hf * PHt + Q
151+
PHt = Pf @ Hf.T
152+
S = Hf @ PHt + Q
152153

153154
S = (S + S.T) * 0.5
154155
SChol = np.linalg.cholesky(S).T
155156
SCholInv = np.linalg.inv(SChol)
156-
W1 = PHt * SCholInv
157-
W = W1 * SCholInv.T
157+
W1 = PHt @ SCholInv
158+
W = W1 @ SCholInv.T
158159

159-
x = xf + W * v
160-
P = Pf - W1 * W1.T
160+
x = xf + W @ v
161+
P = Pf - W1 @ W1.T
161162

162163
return x, P
163164

164165

165166
def update_landmark(particle, z, Q):
166167

167-
lm_id = int(z[0, 2])
168-
xf = np.matrix(particle.lm[lm_id, :]).T
169-
Pf = np.matrix(particle.lmP[2 * lm_id:2 * lm_id + 2, :])
168+
lm_id = int(z[2])
169+
xf = np.array(particle.lm[lm_id, :]).reshape(2, 1)
170+
Pf = np.array(particle.lmP[2 * lm_id:2 * lm_id + 2])
170171

171172
zp, Hv, Hf, Sf = compute_jacobians(particle, xf, Pf, Q)
172173

173-
dz = z[0, 0: 2].T - zp
174+
dz = z[0:2].reshape(2, 1) - zp
174175
dz[1, 0] = pi_2_pi(dz[1, 0])
175176

176177
xf, Pf = update_KF_with_cholesky(xf, Pf, dz, Q, Hf)
@@ -183,21 +184,20 @@ def update_landmark(particle, z, Q):
183184

184185
def compute_weight(particle, z, Q):
185186

186-
lm_id = int(z[0, 2])
187-
xf = np.matrix(particle.lm[lm_id, :]).T
188-
Pf = np.matrix(particle.lmP[2 * lm_id:2 * lm_id + 2])
187+
lm_id = int(z[2])
188+
xf = np.array(particle.lm[lm_id, :]).reshape(2, 1)
189+
Pf = np.array(particle.lmP[2 * lm_id:2 * lm_id + 2])
189190
zp, Hv, Hf, Sf = compute_jacobians(particle, xf, Pf, Q)
190191

191-
dz = z[0, 0: 2].T - zp
192+
dz = z[0:2].reshape(2, 1) - zp
192193
dz[1, 0] = pi_2_pi(dz[1, 0])
193194

194195
try:
195196
invS = np.linalg.inv(Sf)
196197
except np.linalg.linalg.LinAlgError:
197-
print("singuler")
198198
return 1.0
199199

200-
num = math.exp(-0.5 * dz.T * invS * dz)
200+
num = math.exp(-0.5 * dz.T @ invS @ dz)
201201
den = 2.0 * math.pi * math.sqrt(np.linalg.det(Sf))
202202

203203
w = num / den
@@ -207,22 +207,22 @@ def compute_weight(particle, z, Q):
207207

208208
def proposal_sampling(particle, z, Q):
209209

210-
lm_id = int(z[0, 2])
211-
xf = np.matrix(particle.lm[lm_id, :]).T
212-
Pf = np.matrix(particle.lmP[2 * lm_id:2 * lm_id + 2])
210+
lm_id = int(z[2])
211+
xf = particle.lm[lm_id, :].reshape(2, 1)
212+
Pf = particle.lmP[2 * lm_id:2 * lm_id + 2]
213213
# State
214-
x = np.matrix([[particle.x, particle.y, particle.yaw]]).T
214+
x = np.array([particle.x, particle.y, particle.yaw]).reshape(3, 1)
215215
P = particle.P
216216
zp, Hv, Hf, Sf = compute_jacobians(particle, xf, Pf, Q)
217217

218218
Sfi = np.linalg.inv(Sf)
219-
dz = z[0, 0: 2].T - zp
220-
dz[1, 0] = pi_2_pi(dz[1, 0])
219+
dz = z[0:2].reshape(2, 1) - zp
220+
dz[1] = pi_2_pi(dz[1])
221221

222222
Pi = np.linalg.inv(P)
223223

224-
particle.P = np.linalg.inv(Hv.T * Sfi * Hv + Pi) # proposal covariance
225-
x += particle.P * Hv.T * Sfi * dz # proposal mean
224+
particle.P = np.linalg.inv(Hv.T @ Sfi @ Hv + Pi) # proposal covariance
225+
x += particle.P @ Hv.T @ Sfi @ dz # proposal mean
226226

227227
particle.x = x[0, 0]
228228
particle.y = x[1, 0]
@@ -233,21 +233,20 @@ def proposal_sampling(particle, z, Q):
233233

234234
def update_with_observation(particles, z):
235235

236-
for iz in range(len(z[:, 0])):
237-
238-
lmid = int(z[iz, 2])
236+
for iz in range(len(z[0, :])):
237+
lmid = int(z[2, iz])
239238

240239
for ip in range(N_PARTICLE):
241240
# new landmark
242241
if abs(particles[ip].lm[lmid, 0]) <= 0.01:
243-
particles[ip] = add_new_lm(particles[ip], z[iz, :], Q)
242+
particles[ip] = add_new_lm(particles[ip], z[:, iz], Q)
244243
# known landmark
245244
else:
246-
w = compute_weight(particles[ip], z[iz, :], Q)
245+
w = compute_weight(particles[ip], z[:, iz], Q)
247246
particles[ip].w *= w
248247

249-
particles[ip] = update_landmark(particles[ip], z[iz, :], Q)
250-
particles[ip] = proposal_sampling(particles[ip], z[iz, :], Q)
248+
particles[ip] = update_landmark(particles[ip], z[:, iz], Q)
249+
particles[ip] = proposal_sampling(particles[ip], z[:, iz], Q)
251250

252251
return particles
253252

@@ -263,20 +262,20 @@ def resampling(particles):
263262
for i in range(N_PARTICLE):
264263
pw.append(particles[i].w)
265264

266-
pw = np.matrix(pw)
265+
pw = np.array(pw)
267266

268-
Neff = 1.0 / (pw * pw.T)[0, 0] # Effective particle number
267+
Neff = 1.0 / (pw @ pw.T) # Effective particle number
269268
# print(Neff)
270269

271270
if Neff < NTH: # resampling
272271
wcum = np.cumsum(pw)
273272
base = np.cumsum(pw * 0.0 + 1 / N_PARTICLE) - 1 / N_PARTICLE
274-
resampleid = base + np.random.rand(base.shape[1]) / N_PARTICLE
273+
resampleid = base + np.random.rand(base.shape[0]) / N_PARTICLE
275274

276275
inds = []
277276
ind = 0
278277
for ip in range(N_PARTICLE):
279-
while ((ind < wcum.shape[1] - 1) and (resampleid[0, ip] > wcum[0, ind])):
278+
while ((ind < wcum.shape[0] - 1) and (resampleid[ip] > wcum[ind])):
280279
ind += 1
281280
inds.append(ind)
282281

@@ -294,41 +293,42 @@ def resampling(particles):
294293

295294
def calc_input(time):
296295

297-
if time <= 3.0:
296+
if time <= 3.0: # wait at first
298297
v = 0.0
299298
yawrate = 0.0
300299
else:
301300
v = 1.0 # [m/s]
302301
yawrate = 0.1 # [rad/s]
303302

304-
u = np.matrix([v, yawrate]).T
303+
u = np.array([v, yawrate]).reshape(2, 1)
305304

306305
return u
307306

308307

309308
def observation(xTrue, xd, u, RFID):
310309

310+
# calc true state
311311
xTrue = motion_model(xTrue, u)
312312

313-
# add noise to gps x-y
314-
z = np.matrix(np.zeros((0, 3)))
313+
# add noise to range observation
314+
z = np.zeros((3, 0))
315315

316316
for i in range(len(RFID[:, 0])):
317317

318318
dx = RFID[i, 0] - xTrue[0, 0]
319319
dy = RFID[i, 1] - xTrue[1, 0]
320320
d = math.sqrt(dx**2 + dy**2)
321-
angle = math.atan2(dy, dx) - xTrue[2, 0]
321+
angle = pi_2_pi(math.atan2(dy, dx) - xTrue[2, 0])
322322
if d <= MAX_RANGE:
323323
dn = d + np.random.randn() * Qsim[0, 0] # add noise
324324
anglen = angle + np.random.randn() * Qsim[1, 1] # add noise
325-
zi = np.matrix([dn, pi_2_pi(anglen), i])
326-
z = np.vstack((z, zi))
325+
zi = np.array([dn, pi_2_pi(anglen), i]).reshape(3, 1)
326+
z = np.hstack((z, zi))
327327

328328
# add noise to input
329329
ud1 = u[0, 0] + np.random.randn() * Rsim[0, 0]
330330
ud2 = u[1, 0] + np.random.randn() * Rsim[1, 1] + OFFSET_YAWRATE_NOISE
331-
ud = np.matrix([ud1, ud2]).T
331+
ud = np.array([ud1, ud2]).reshape(2, 1)
332332

333333
xd = motion_model(xd, ud)
334334

@@ -337,15 +337,15 @@ def observation(xTrue, xd, u, RFID):
337337

338338
def motion_model(x, u):
339339

340-
F = np.matrix([[1.0, 0, 0],
341-
[0, 1.0, 0],
342-
[0, 0, 1.0]])
340+
F = np.array([[1.0, 0, 0],
341+
[0, 1.0, 0],
342+
[0, 0, 1.0]])
343343

344-
B = np.matrix([[DT * math.cos(x[2, 0]), 0],
345-
[DT * math.sin(x[2, 0]), 0],
346-
[0.0, DT]])
344+
B = np.array([[DT * math.cos(x[2, 0]), 0],
345+
[DT * math.sin(x[2, 0]), 0],
346+
[0.0, DT]])
347347

348-
x = F * x + B * u
348+
x = F @ x + B @ u
349349

350350
x[2, 0] = pi_2_pi(x[2, 0])
351351

@@ -374,10 +374,9 @@ def main():
374374
N_LM = RFID.shape[0]
375375

376376
# State Vector [x y yaw v]'
377-
xEst = np.matrix(np.zeros((STATE_SIZE, 1)))
378-
xTrue = np.matrix(np.zeros((STATE_SIZE, 1)))
379-
380-
xDR = np.matrix(np.zeros((STATE_SIZE, 1))) # Dead reckoning
377+
xEst = np.zeros((STATE_SIZE, 1)) # SLAM estimation
378+
xTrue = np.zeros((STATE_SIZE, 1)) # True state
379+
xDR = np.zeros((STATE_SIZE, 1)) # Dead reckoning
381380

382381
# history
383382
hxEst = xEst
@@ -408,21 +407,17 @@ def main():
408407
plt.plot(RFID[:, 0], RFID[:, 1], "*k")
409408

410409
for iz in range(len(z[:, 0])):
411-
lmid = int(z[iz, 2])
412-
plt.plot([xEst[0, 0], RFID[lmid, 0]], [
413-
xEst[1, 0], RFID[lmid, 1]], "-k")
410+
lmid = int(z[2, iz])
411+
plt.plot([xEst[0], RFID[lmid, 0]], [
412+
xEst[1], RFID[lmid, 1]], "-k")
414413

415414
for i in range(N_PARTICLE):
416415
plt.plot(particles[i].x, particles[i].y, ".r")
417416
plt.plot(particles[i].lm[:, 0], particles[i].lm[:, 1], "xb")
418417

419-
plt.plot(np.array(hxTrue[0, :]).flatten(),
420-
np.array(hxTrue[1, :]).flatten(), "-b")
421-
plt.plot(np.array(hxDR[0, :]).flatten(),
422-
np.array(hxDR[1, :]).flatten(), "-k")
423-
plt.plot(np.array(hxEst[0, :]).flatten(),
424-
np.array(hxEst[1, :]).flatten(), "-r")
425-
418+
plt.plot(hxTrue[0, :], hxTrue[1, :], "-b")
419+
plt.plot(hxDR[0, :], hxDR[1, :], "-k")
420+
plt.plot(hxEst[0, :], hxEst[1, :], "-r")
426421
plt.plot(xEst[0], xEst[1], "xk")
427422
plt.axis("equal")
428423
plt.grid(True)

0 commit comments

Comments
 (0)