Mercurial Hosting > traffic-intelligence
comparison scripts/test-compute-object-position-from-features.py @ 334:1d90e9080cb2
moved python scripts to the scripts directory
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Fri, 14 Jun 2013 10:34:11 -0400 |
parents | python/compute-object-from-features.py@04a874e1f19f |
children | 56cc8a1f7082 |
comparison
equal
deleted
inserted
replaced
333:c9201f6b143a | 334:1d90e9080cb2 |
---|---|
1 #!/usr/bin/env python | |
2 | |
3 import sys | |
4 | |
5 import matplotlib.mlab as pylab | |
6 import matplotlib.pyplot as plt | |
7 import numpy as np | |
8 | |
9 import cv | |
10 import utils | |
11 import cvutils | |
12 import ubc_utils | |
13 import moving | |
14 | |
15 # use something like getopt to manage arguments if necessary | |
16 | |
17 if len(sys.argv) < 3: | |
18 print('Usage: {0} <video-filename> <n-objects>'.format(sys.argv[0])) | |
19 sys.exit() | |
20 | |
21 if sys.argv[1].endswith('.avi'): | |
22 videoFilenamePrefix = utils.removeExtension(sys.argv[1],'.') | |
23 else: | |
24 videoFilenamePrefix = sys.argv[1] | |
25 | |
26 objectNum = int(sys.argv[2]) | |
27 | |
28 objects = ubc_utils.loadTrajectories(videoFilenamePrefix+'-objects.txt', objectNum+1) | |
29 obj = objects[objectNum] | |
30 features = ubc_utils.loadTrajectories(videoFilenamePrefix+'-features.txt', max(obj.featureNumbers)+1) | |
31 h = np.loadtxt(videoFilenamePrefix+'-homography.txt') | |
32 | |
33 invh = cvutils.invertHomography(h) | |
34 | |
35 def computeGroundTrajectory(features, homography, timeInterval = None): | |
36 '''Computes a trajectory for the set of features as the closes point to the ground | |
37 using the homography in image space''' | |
38 if not timeInterval: | |
39 raise Exception('not implemented') # compute from the features | |
40 | |
41 yCoordinates = -np.ones((len(features),int(timeInterval.length()))) | |
42 for i,f in enumerate(features): | |
43 traj = f.getPositions().asArray() | |
44 imgTraj = cvutils.projectArray(homography, traj) | |
45 yCoordinates[i,f.getFirstInstant()-timeInterval.first:f.getLastInstant()+1-timeInterval.first] = imgTraj[1,:] | |
46 | |
47 indices = np.argmax(yCoordinates,0) | |
48 newTraj = moving.Trajectory() | |
49 for j,idx in enumerate(indices): | |
50 newTraj.addPosition(features[idx].getPositionAtInstant(j+timeInterval.first)) | |
51 #newVelocities.addPosition(features[obj.featureNumbers[idx]].getVelocityAtInstant(j+obj.getFirstInstant())) | |
52 | |
53 return newTraj | |
54 | |
55 def computeMedianTrajectory(features, timeInterval = None): | |
56 if not timeInterval: | |
57 raise Exception('not implemented') # compute from the features | |
58 | |
59 newTraj = moving.Trajectory() | |
60 for t in timeInterval: | |
61 points = [] | |
62 for f in features: | |
63 if f.existsAtInstant(t): | |
64 points.append(f.getPositionAtInstant(t).aslist()) | |
65 med = np.median(np.array(points), 0) | |
66 newTraj.addPositionXY(med[0], med[1]) | |
67 | |
68 return newTraj | |
69 | |
70 # TODO version median: conversion to large matrix will not work, have to do it frame by frame | |
71 | |
72 def kalmanFilter(positions, velocities, processNoiseCov, measurementNoiseCov): | |
73 kalman=cv.CreateKalman(6, 4) | |
74 kalman.transition_matrix[0,2]=1 | |
75 kalman.transition_matrix[0,4]=1./2 | |
76 kalman.transition_matrix[1,3]=1 | |
77 kalman.transition_matrix[1,5]=1./2 | |
78 kalman.transition_matrix[2,4]=1 | |
79 kalman.transition_matrix[3,5]=1 | |
80 | |
81 cv.SetIdentity(kalman.measurement_matrix, 1.) | |
82 cv.SetIdentity(kalman.process_noise_cov, processNoiseCov) | |
83 cv.SetIdentity(kalman.measurement_noise_cov, measurementNoiseCov) | |
84 cv.SetIdentity(kalman.error_cov_post, 1.) | |
85 | |
86 p = positions[0] | |
87 v = velocities[0] | |
88 v2 = velocities[2] | |
89 a = (v2-v).multiply(0.5) | |
90 kalman.state_post[0,0]=p.x | |
91 kalman.state_post[1,0]=p.y | |
92 kalman.state_post[2,0]=v.x | |
93 kalman.state_post[3,0]=v.y | |
94 kalman.state_post[4,0]=a.x | |
95 kalman.state_post[5,0]=a.y | |
96 | |
97 filteredPositions = moving.Trajectory() | |
98 filteredVelocities = moving.Trajectory() | |
99 measurement = cv.CreateMat(4,1,cv.CV_32FC1) | |
100 for i in xrange(positions.length()): | |
101 cv.KalmanPredict(kalman) # no control | |
102 p = positions[i] | |
103 v = velocities[i] | |
104 measurement[0,0] = p.x | |
105 measurement[1,0] = p.y | |
106 measurement[2,0] = v.x | |
107 measurement[3,0] = v.y | |
108 cv.KalmanCorrect(kalman, measurement) | |
109 filteredPositions.addPositionXY(kalman.state_post[0,0], kalman.state_post[1,0]) | |
110 filteredVelocities.addPositionXY(kalman.state_post[2,0], kalman.state_post[3,0]) | |
111 | |
112 return (filteredPositions, filteredVelocities) | |
113 | |
114 groundTrajectory = computeGroundTrajectory([features[i] for i in obj.featureNumbers], invh, obj.getTimeInterval()) | |
115 (filteredPositions, filteredVelocities) = kalmanFilter(groundTrajectory, obj.getVelocities(), 0.1, 0.1) | |
116 | |
117 #medianTrajectory = computeMedianTrajectory([features[i] for i in obj.featureNumbers], obj.getTimeInterval()) | |
118 | |
119 delta = [] | |
120 for t in obj.getTimeInterval(): | |
121 p1 = obj.getPositionAtInstant(t) | |
122 p2 = groundTrajectory[t-obj.getFirstInstant()] | |
123 delta.append((p1-p2).aslist()) | |
124 | |
125 delta = np.median(delta, 0) | |
126 | |
127 translated = moving.Trajectory() | |
128 for t in obj.getTimeInterval(): | |
129 p1 = obj.getPositionAtInstant(t) | |
130 p1.x -= delta[0] | |
131 p1.y -= delta[1] | |
132 translated.addPosition(p1) | |
133 | |
134 plt.clf() | |
135 obj.draw('rx-') | |
136 for fnum in obj.featureNumbers: features[fnum].draw() | |
137 groundTrajectory.draw('bx-') | |
138 filteredPositions.draw('gx-') | |
139 translated.draw('kx-') | |
140 #medianTrajectory.draw('kx-') | |
141 plt.axis('equal') |