changeset 838:2918de3d40fc

first working version of display of merged tracking
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Wed, 06 Jul 2016 12:30:08 -0400
parents e01cabca4c55
children 2c7b4e6a32dd
files python/metadata.py scripts/display-synced-trajectories.py
diffstat 2 files changed, 49 insertions(+), 19 deletions(-) [+]
line wrap: on
line diff
--- a/python/metadata.py	Wed Jul 06 10:58:18 2016 -0400
+++ b/python/metadata.py	Wed Jul 06 12:30:08 2016 -0400
@@ -11,6 +11,7 @@
 from sqlalchemy.ext.declarative import declarative_base
 
 from utils import datetimeFormat, removeExtension
+from cvutils import computeUndistortMaps
 from moving import TimeInterval
 
 Base = declarative_base()
@@ -154,7 +155,14 @@
             self.distortionCoefficients[4] = self.distortionCoefficients4
         else:
             self.distortionCoefficients = None
-        
+
+    def computeUndistortMaps(self):
+        if self.undistortedImageMultiplication is not None and self.intrinsicCameraMatrix is not None and self.distortionCoefficients is not None:
+            self.map1, self.map2 = computeUndistortMaps(self.resX, self.resY, self.undistortedImageMultiplication, self.intrinsicCameraMatrix, self.distortionCoefficients)
+        else:
+            self.map1 = None
+            self.map2 = None
+    
 class CameraView(Base):
     __tablename__ = 'camera_views'
     idx = Column(Integer, primary_key=True)
--- a/scripts/display-synced-trajectories.py	Wed Jul 06 10:58:18 2016 -0400
+++ b/scripts/display-synced-trajectories.py	Wed Jul 06 12:30:08 2016 -0400
@@ -15,6 +15,7 @@
 parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to display', choices = ['feature', 'object'], default = 'object')
 parser.add_argument('-r', dest = 'rescale', help = 'rescaling factor for the displayed image', default = 1., type = float)
 parser.add_argument('-s', dest = 'step', help = 'display every s image', default = 1, type = int)
+parser.add_argument('-u', dest = 'undistort', help = 'undistort the video (because features have been extracted that way)', action = 'store_true')
 
 args = parser.parse_args()
 
@@ -27,20 +28,28 @@
 
 dirname = os.path.split(args.metadataFilename)[0]
 
+frameRate = mergedSequence.cameraView.cameraType.frameRate
 startTime = datetime.strptime(args.startTime, utils.datetimeFormat)
-if startTime > mergedSequence.startTime:
-    mergedFirstFrameNum = (startTime-mergedSequence.startTime).seconds*mergedSequence.cameraView.cameraType.frameRate
+mergedFirstFrameNum = utils.deltaFrames(mergedSequence.startTime, startTime, frameRate)
 
 cameraViews = session.query(CameraView).filter(CameraView.site == mergedSequence.cameraView.site).filter(CameraView.virtual == False).all()
-videoSequences = session.query(VideoSequence).filter(VideoSequence.virtual == False).filter(VideoSequence.startTime <= startTime).all()
+videoSequences = session.query(VideoSequence).filter(VideoSequence.virtual == False).all()
 #videoSequences.remove(mergedSequence)
-videoSequences = [v for v in videoSequences if v.cameraView in cameraViews and v.containsInstant(startTime)]
+videoSequences = [v for v in videoSequences if v.cameraView in cameraViews and (v.containsInstant(startTime) or v.startTime > startTime)]
 filenames = [dirname+os.path.sep+v.getVideoSequenceFilename() for v in videoSequences]
-firstFrameNums = [v.getFrameNum(startTime) for v in videoSequences]
+firstFrameNums = [utils.deltaFrames(v.startTime, startTime, frameRate) for v in videoSequences] # use pos/neg first frame nums
 windowNames = [v.cameraView.description for v in videoSequences]
-homographies = [np.inv(np.loadtxt(dirname+os.path.sep+v.cameraView.getHomographyFilename())) for v in videoSequences]
+
+# homography and undistort
+homographies = [np.linalg.inv(np.loadtxt(dirname+os.path.sep+v.cameraView.getHomographyFilename())) for v in videoSequences]
+if args.undistort:
+    cameraTypes = set([cv.cameraType for cv in cameraViews])
+    for cameraType in cameraTypes:
+        cameraType.computeUndistortMaps()
 
 objects = storage.loadTrajectoriesFromSqlite(dirname+os.path.sep+mergedSequence.getDatabaseFilename(), args.trajectoryType)
+for obj in objects:
+    obj.projectedPositions = {}
 
 #def playVideo(filenames, windowNames = None, firstFrameNums = None, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1., step = 1):
 if len(filenames) == 0:
@@ -64,27 +73,40 @@
     key = -1
     ret = True
     nFramesShown = 0
-    if firstFrameNums is not None:
-        for i in xrange(len(captures)):
+    for i in xrange(len(captures)):
+        if firstFrameNums[i] > 0:
             captures[i].set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNums[i])
     while ret and not cvutils.quitKey(key):
         rets = []
         images = []
-        for cap in captures:
-            ret, img = cap.read()
-            rets.append(ret)
-            images.append(img)
-        if np.array(rets).all():
+        for i in xrange(len(captures)):
+            if firstFrameNums[i]+nFramesShown>=0:
+                ret, img = captures[i].read()
+                if ret and args.undistort:
+                    img = cv2.remap(img, videoSequences[i].cameraView.cameraType.map1, videoSequences[i].cameraView.cameraType.map2, interpolation=cv2.INTER_LINEAR)
+                rets.append(ret)
+                images.append(img)
+            else:
+                rets.append(False)
+                images.append(None)                
+        if np.array(rets).any():
             #if printFrames:
             print('frame shown {0}'.format(nFramesShown))
             for i in xrange(len(filenames)):
-                for obj in objects:
-                    
-                    if obj.existsAtInstant():
-                        
+                if rets[i]:#firstFrameNums[i]+nFramesShown>=0:
+                    for obj in objects:
+                        if obj.existsAtInstant(mergedFirstFrameNum+nFramesShown):
+                            #print obj.num, obj.timeInterval, mergedFirstFrameNum, nFramesShown
+                            if i not in obj.projectedPositions:
+                                if homographies[i] is not None:
+                                    obj.projectedPositions[i] = obj.positions.project(homographies[i])
+                                else:
+                                    obj.projectedPositions[i] = obj.positions
+                            cvutils.cvPlot(images[i], obj.projectedPositions[i], cvutils.cvColors[obj.getNum()], int(mergedFirstFrameNum+nFramesShown)-obj.getFirstInstant())
+
                     #if text is not None:
                     #    cv2.putText(images[i], text, (10,50), cv2.FONT_HERSHEY_PLAIN, 1, cvRed)
-                cvutils.cvImshow(windowNames[i], images[i], rescale) # cv2.imshow('frame', img)
+                    cvutils.cvImshow(windowNames[i], images[i], rescale) # cv2.imshow('frame', img)
             key = cv2.waitKey(wait)
             #if cvutils.saveKey(key):
             #    cv2.imwrite('image-{}.png'.format(frameNum), img)