Mercurial Hosting > traffic-intelligence
diff scripts/classify-objects.py @ 911:3dd5acfa1899
corrected potential issues with videos where one cannot reach a give frame from its number
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Wed, 28 Jun 2017 16:46:45 -0400 |
parents | 0e017178f7ab |
children | fd057a6b04db |
line wrap: on
line diff
--- a/scripts/classify-objects.py Wed Jun 28 15:36:25 2017 -0400 +++ b/scripts/classify-objects.py Wed Jun 28 16:46:45 2017 -0400 @@ -15,6 +15,7 @@ parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file (overrides the configuration file)') parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)') parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to classify', type = int, default = None) +parser.add_argument('--start-frame0', dest = 'startFrame0', help = 'starts with first frame for videos with index problem where frames cannot be reached', action = 'store_true') parser.add_argument('--plot-speed-distributions', dest = 'plotSpeedDistribution', help = 'simply plots the distributions used for each user type', action = 'store_true') parser.add_argument('--max-speed-distribution-plot', dest = 'maxSpeedDistributionPlot', help = 'if plotting the user distributions, the maximum speed to display (km/h)', type = float, default = 50.) @@ -66,6 +67,8 @@ objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', args.nObjects, withFeatures = True) timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects]) +if args.startFrame0: + timeInterval.first = 0 capture = cv2.VideoCapture(videoFilename) width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) @@ -80,7 +83,8 @@ if capture.isOpened(): ret = True frameNum = timeInterval.first - capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum) + if not args.startFrame0: + capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum) lastFrameNum = timeInterval.last while ret and frameNum <= lastFrameNum: @@ -91,13 +95,13 @@ if undistort: img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR) for obj in objects: - if obj.getFirstInstant() == frameNum: + if obj.getFirstInstant() >= frameNum: # if images are skipped obj.initClassifyUserTypeHoGSVM(speedAggregationFunc, pedBikeCarSVM, bikeCarSVM, classifierParams.maxPedestrianSpeed, classifierParams.maxCyclistSpeed, classifierParams.nFramesIgnoreAtEnds) currentObjects.append(obj) objects.remove(obj) for obj in currentObjects: - if obj.getLastInstant() == frameNum: + if obj.getLastInstant() <= frameNum: # if images are skipped obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = classifierParams.minSpeedEquiprobable, speedProbabilities = speedProbabilities, maxPercentUnknown = classifierParams.maxPercentUnknown) pastObjects.append(obj) currentObjects.remove(obj)