changeset 902:c69a8defe5c3

changed workflow of classify objects
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Thu, 22 Jun 2017 16:57:34 -0400
parents 753a081989e2
children 81ee5aaf213d
files scripts/classify-objects.py scripts/display-trajectories.py scripts/extract-appearance-images.py
diffstat 3 files changed, 51 insertions(+), 27 deletions(-) [+]
line wrap: on
line diff
--- a/scripts/classify-objects.py	Thu Jun 22 12:02:34 2017 -0400
+++ b/scripts/classify-objects.py	Thu Jun 22 16:57:34 2017 -0400
@@ -20,7 +20,6 @@
 
 args = parser.parse_args()
 params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum = storage.processVideoArguments(args)
-
 classifierParams = storage.ClassifierParameters(params.classifierFilename)
 classifierParams.convertToFrames(params.videoFrameRate, 3.6) # conversion from km/h to m/frame
 
@@ -66,18 +65,14 @@
     sys.exit()
 
 objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', args.nObjects, withFeatures = True)
-#features = storage.loadTrajectoriesFromSqlite(databaseFilename, 'feature')
-intervals = []
-for obj in objects:
-    #obj.setFeatures(features)
-    intervals.append(obj.getTimeInterval())
-timeInterval = moving.TimeInterval.unionIntervals(intervals)
+timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects])
 
 capture = cv2.VideoCapture(videoFilename)
 width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
 height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
 
 pastObjects = []
+currentObjects = []
 if undistort: # setup undistortion
     [map1, map2] = cvutils.computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
 if capture.isOpened():
@@ -92,26 +87,23 @@
             if frameNum%50 == 0:
                 print('frame number: {}'.format(frameNum))
             if undistort:
-                img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
-            currentObjects = []
+                img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)            
             for obj in objects:
-                inter = obj.getTimeInterval()
-                if inter.contains(frameNum):
-                    if inter.first == frameNum:
-                        obj.initClassifyUserTypeHoGSVM(speedAggregationFunc, pedBikeCarSVM, bikeCarSVM, classifierParams.maxPedestrianSpeed, classifierParams.maxCyclistSpeed, classifierParams.nFramesIgnoreAtEnds)
-                        currentObjects.append(obj)
-                    elif inter.last == frameNum:
-                        obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = classifierParams.minSpeedEquiprobable, speedProbabilities = speedProbabilities, maxPercentUnknown = classifierParams.maxPercentUnknown)
-                        pastObjects.append(obj)
-                    else:
-                        obj.classifyUserTypeHoGSVMAtInstant(img, frameNum, invHomography, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels, classifierParams.hogRescaleSize, classifierParams.hogNOrientations, classifierParams.hogNPixelsPerCell, classifierParams.hogNCellsPerBlock, classifierParams.hogBlockNorm)
-                        currentObjects.append(obj)
+                if obj.getFirstInstant() == frameNum:
+                    obj.initClassifyUserTypeHoGSVM(speedAggregationFunc, pedBikeCarSVM, bikeCarSVM, classifierParams.maxPedestrianSpeed, classifierParams.maxCyclistSpeed, classifierParams.nFramesIgnoreAtEnds)
+                    currentObjects.append(obj)
+                    objects.remove(obj)
+
+            for obj in currentObjects:
+                if obj.getLastInstant() == frameNum:
+                    obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = classifierParams.minSpeedEquiprobable, speedProbabilities = speedProbabilities, maxPercentUnknown = classifierParams.maxPercentUnknown)
+                    pastObjects.append(obj)
+                    currentObjects.remove(obj)
                 else:
-                    currentObjects.append(obj)
-            objects = currentObjects
+                    obj.classifyUserTypeHoGSVMAtInstant(img, frameNum, invHomography, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels, classifierParams.hogRescaleSize, classifierParams.hogNOrientations, classifierParams.hogNPixelsPerCell, classifierParams.hogNCellsPerBlock, classifierParams.hogBlockNorm)
         frameNum += 1
     
-    for obj in objects:
+    for obj in currentObjects:
         obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = classifierParams.minSpeedEquiprobable, speedProbabilities = speedProbabilities, maxPercentUnknown = classifierParams.maxPercentUnknown)
         pastObjects.append(obj)
     print('Saving user types')
--- a/scripts/display-trajectories.py	Thu Jun 22 12:02:34 2017 -0400
+++ b/scripts/display-trajectories.py	Thu Jun 22 16:57:34 2017 -0400
@@ -9,8 +9,8 @@
 
 parser = argparse.ArgumentParser(description='The program displays feature or object trajectories overlaid over the video frames.', epilog = 'Either the configuration filename or the other parameters (at least video and database filenames) need to be provided.')
 parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file')
-parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file')
-parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file')
+parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file (overrides the configuration file)')
+parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)')
 parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to display', choices = ['feature', 'object'], default = 'feature')
 parser.add_argument('-o', dest = 'homographyFilename', help = 'name of the image to world homography file')
 parser.add_argument('--intrinsic', dest = 'intrinsicCameraMatrixFilename', help = 'name of the intrinsic camera file')
--- a/scripts/extract-appearance-images.py	Thu Jun 22 12:02:34 2017 -0400
+++ b/scripts/extract-appearance-images.py	Thu Jun 22 16:57:34 2017 -0400
@@ -9,14 +9,46 @@
 
 parser = argparse.ArgumentParser(description='The program extracts labeled image patches to train the HoG-SVM classifier, and optionnally speed information')
 parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file', required = True)
+parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file (overrides the configuration file)')
+parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)')
+parser.add_argument('--gt', dest = 'classificationAnnotationFilename', help = 'name of the file containing the correct classes (user types)', required = True)
+parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each saved patch', default = 50, type = int)
+parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to use to extract patches from', type = int, default = None)
+parser.add_argument('--compute-speed-distributions', dest = 'computeSpeedDistribution', help = 'computes the distribution of the road users of each type and fits parameters to each', action = 'store_true')
+
 
 #parser.add_argument('-d', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', required = True)
 
 args = parser.parse_args()
-params = storage.ProcessParameters(args.configFilename)
+params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum = storage.processVideoArguments(args)
 classifierParams = storage.ClassifierParameters(params.classifierFilename)
 
-# need all info as for classification (image info)
+objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', args.nObjects, withFeatures = True)
+timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects])
+
+capture = cv2.VideoCapture(videoFilename)
+width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
+height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
+
+if undistort: # setup undistortion
+    [map1, map2] = cvutils.computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
+if capture.isOpened():
+    ret = True
+    frameNum = timeInterval.first
+    capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
+    lastFrameNum = timeInterval.last
+    while ret and frameNum <= lastFrameNum:
+        ret, img = capture.read()
+        if ret:
+            if frameNum%50 == 0:
+                print('frame number: {}'.format(frameNum))
+            if undistort:
+                img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
+
+
+        frameNum += 1
+
+
 
 # todo speed info: distributions AND min speed equiprobable