changeset 946:e5970606066f

bug fix on list filtering (cannot remove while iterating) and motion prediction keeping the same features
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Fri, 21 Jul 2017 11:25:20 -0400
parents 05d4302bf67e
children 053484e08947
files python/cvutils.py python/prediction.py scripts/classify-objects.py scripts/extract-appearance-images.py scripts/safety-analysis.py
diffstat 5 files changed, 18 insertions(+), 10 deletions(-) [+]
line wrap: on
line diff
--- a/python/cvutils.py	Thu Jul 20 14:29:46 2017 -0400
+++ b/python/cvutils.py	Fri Jul 21 11:25:20 2017 -0400
@@ -344,8 +344,7 @@
                                 if frameNum not in toMatches[obj.getNum()]:
                                     objDescription += " FA"
                             cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvColors[colorType][obj.getNum()])
-                            if obj.getLastInstant() == frameNum:
-                                objects.remove(obj)
+                    objects[:] = [obj for obj in objects if obj.getLastInstant() != frameNum]
                     # plot object bounding boxes
                     if frameNum in boundingBoxes.keys():
                         for rect in boundingBoxes[frameNum]:
--- a/python/prediction.py	Thu Jul 20 14:29:46 2017 -0400
+++ b/python/prediction.py	Fri Jul 21 11:25:20 2017 -0400
@@ -575,10 +575,16 @@
         predictedTrajectories = []
         if instant-obj.getFirstInstant()+1 >= self.minFeatureTime:
             if self.useFeatures and obj.hasFeatures():
-                # get current features existing for the most time, sort on first instant of feature and take n first
-                firstInstants = [(f,f.getFirstInstant()) for f in obj.getFeatures() if f.existsAtInstant(instant)]
+                if not hasattr(obj, 'currentPredictionFeatures'):
+                    obj.currentPredictionFeatures = []
+                else:
+                    obj.currentPredictionFeatures[:] = [f for f in obj.currentPredictionFeatures if f.existsAtInstant(instant)]
+                firstInstants = [(f,f.getFirstInstant()) for f in obj.getFeatures() if f.existsAtInstant(instant) and f not in obj.currentPredictionFeatures]
                 firstInstants.sort(key = lambda t: t[1])
-                for f,t1 in firstInstants[:min(self.nPredictedTrajectories, len(firstInstants))]:
+                for f,t1 in firstInstants[:min(self.nPredictedTrajectories, len(firstInstants), self.nPredictedTrajectories-len(obj.currentPredictionFeatures))]:
+                    obj.currentPredictionFeatures.append(f)
+                print len(obj.currentPredictionFeatures), self.nPredictedTrajectories
+                for f in obj.currentPredictionFeatures:
                     self.addPredictedTrajectories(predictedTrajectories, f, instant)
             else:
                 self.addPredictedTrajectories(predictedTrajectories, obj, instant)
--- a/scripts/classify-objects.py	Thu Jul 20 14:29:46 2017 -0400
+++ b/scripts/classify-objects.py	Fri Jul 21 11:25:20 2017 -0400
@@ -102,15 +102,15 @@
                 if obj.getFirstInstant() <= frameNum: # if images are skipped
                     obj.initClassifyUserTypeHoGSVM(speedAggregationFunc, pedBikeCarSVM, bikeCarSVM, classifierParams.maxPedestrianSpeed, classifierParams.maxCyclistSpeed, classifierParams.nFramesIgnoreAtEnds, invHomography, intrinsicCameraMatrix, distortionCoefficients)
                     currentObjects.append(obj)
-                    objects.remove(obj)
+            objects[:] = [obj for obj in objects if obj.getFirstInstant() > frameNum]
 
             for obj in currentObjects:
                 if obj.getLastInstant() <= frameNum:  # if images are skipped
                     obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = classifierParams.minSpeedEquiprobable, speedProbabilities = speedProbabilities, maxPercentUnknown = classifierParams.maxPercentUnknown)
                     pastObjects.append(obj)
-                    currentObjects.remove(obj)
                 else:
                     obj.classifyUserTypeHoGSVMAtInstant(img, frameNum, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels, classifierParams.hogRescaleSize, classifierParams.hogNOrientations, classifierParams.hogNPixelsPerCell, classifierParams.hogNCellsPerBlock, classifierParams.hogBlockNorm)
+            currentObjects[:] = [obj for obj in objects if obj.getLastInstant() > frameNum]
         frameNum += 1
     
     for obj in currentObjects:
--- a/scripts/extract-appearance-images.py	Thu Jul 20 14:29:46 2017 -0400
+++ b/scripts/extract-appearance-images.py	Fri Jul 21 11:25:20 2017 -0400
@@ -80,8 +80,9 @@
                             croppedImg = cvutils.imageBox(img, obj, frameNum, invHomography, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels)
                             if croppedImg is not None:
                                 cv2.imwrite(args.directoryName+os.sep+moving.userTypeNames[obj.getUserType()]+os.sep+args.imagePrefix+'-{}-{}.png'.format(obj.getNum(), frameNum), croppedImg)
-                    elif obj.getLastInstant() == frameNum:
-                        objects.remove(obj)
+#                    elif obj.getLastInstant() == frameNum:
+#                        objects.remove(obj)
+            objects[:] = [obj for obj in objects if obj.getLastInstant() != frameNum]
         frameNum += 1
 
 # todo speed info: distributions AND min speed equiprobable
--- a/scripts/safety-analysis.py	Thu Jul 20 14:29:46 2017 -0400
+++ b/scripts/safety-analysis.py	Fri Jul 21 11:25:20 2017 -0400
@@ -16,6 +16,7 @@
 # TODO analyze only 
 parser.add_argument('--prediction-method', dest = 'predictionMethod', help = 'prediction method (constant velocity (cvd: vector computation (approximate); cve: equation solving; cv: discrete time (approximate)), normal adaptation, point set prediction)', choices = ['cvd', 'cve', 'cv', 'na', 'ps', 'mp'])
 parser.add_argument('--prototypeDatabaseFilename', dest = 'prototypeDatabaseFilename', help = 'name of the database containing the prototypes')
+parser.add_argument('--no-motion-prediction', dest = 'noMotionPrediction', help = 'does not compute indicators like TTC depending on motion prediction', action = 'store_true')
 parser.add_argument('--pet', dest = 'computePET', help = 'computes PET', action = 'store_true')
 parser.add_argument('--display-cp', dest = 'displayCollisionPoints', help = 'display collision points', action = 'store_true')
 parser.add_argument('--nthreads', dest = 'nProcesses', help = 'number of processes to run in parallel', type = int, default = 1)
@@ -75,7 +76,8 @@
 interactions = events.createInteractions(objects)
 for inter in interactions:
     inter.computeIndicators()
-#    inter.computeCrossingsCollisions(predictionParameters, params.collisionDistance, params.predictionTimeHorizon, params.crossingZones, nProcesses = args.nProcesses, debug = True)
+    if not args.noMotionPrediction:
+        inter.computeCrossingsCollisions(predictionParameters, params.collisionDistance, params.predictionTimeHorizon, params.crossingZones, nProcesses = args.nProcesses, debug = True)
 
 if args.computePET:
     for inter in interactions: