Mercurial Hosting > traffic-intelligence
changeset 911:3dd5acfa1899
corrected potential issues with videos where one cannot reach a give frame from its number
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Wed, 28 Jun 2017 16:46:45 -0400 |
parents | b58a1061a717 |
children | fd057a6b04db |
files | scripts/classify-objects.py scripts/extract-appearance-images.py |
diffstat | 2 files changed, 12 insertions(+), 4 deletions(-) [+] |
line wrap: on
line diff
--- a/scripts/classify-objects.py Wed Jun 28 15:36:25 2017 -0400 +++ b/scripts/classify-objects.py Wed Jun 28 16:46:45 2017 -0400 @@ -15,6 +15,7 @@ parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file (overrides the configuration file)') parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)') parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to classify', type = int, default = None) +parser.add_argument('--start-frame0', dest = 'startFrame0', help = 'starts with first frame for videos with index problem where frames cannot be reached', action = 'store_true') parser.add_argument('--plot-speed-distributions', dest = 'plotSpeedDistribution', help = 'simply plots the distributions used for each user type', action = 'store_true') parser.add_argument('--max-speed-distribution-plot', dest = 'maxSpeedDistributionPlot', help = 'if plotting the user distributions, the maximum speed to display (km/h)', type = float, default = 50.) @@ -66,6 +67,8 @@ objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', args.nObjects, withFeatures = True) timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects]) +if args.startFrame0: + timeInterval.first = 0 capture = cv2.VideoCapture(videoFilename) width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) @@ -80,7 +83,8 @@ if capture.isOpened(): ret = True frameNum = timeInterval.first - capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum) + if not args.startFrame0: + capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum) lastFrameNum = timeInterval.last while ret and frameNum <= lastFrameNum: @@ -91,13 +95,13 @@ if undistort: img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR) for obj in objects: - if obj.getFirstInstant() == frameNum: + if obj.getFirstInstant() >= frameNum: # if images are skipped obj.initClassifyUserTypeHoGSVM(speedAggregationFunc, pedBikeCarSVM, bikeCarSVM, classifierParams.maxPedestrianSpeed, classifierParams.maxCyclistSpeed, classifierParams.nFramesIgnoreAtEnds) currentObjects.append(obj) objects.remove(obj) for obj in currentObjects: - if obj.getLastInstant() == frameNum: + if obj.getLastInstant() <= frameNum: # if images are skipped obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = classifierParams.minSpeedEquiprobable, speedProbabilities = speedProbabilities, maxPercentUnknown = classifierParams.maxPercentUnknown) pastObjects.append(obj) currentObjects.remove(obj)
--- a/scripts/extract-appearance-images.py Wed Jun 28 15:36:25 2017 -0400 +++ b/scripts/extract-appearance-images.py Wed Jun 28 16:46:45 2017 -0400 @@ -15,6 +15,7 @@ parser.add_argument('--delimiter', dest = 'classificationAnnotationFilenameDelimiter', help = 'delimiter for the fields in the correct classification file', default= ' ') parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each saved patch', default = 50, type = int) parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to use to extract patches from', type = int, default = None) +parser.add_argument('--start-frame0', dest = 'startFrame0', help = 'starts with first frame for videos with index problem where frames cannot be reached', action = 'store_true') parser.add_argument('-o', dest = 'overlap', help = 'maximum intersection over union of the features nFramesStep apart to save image', type = float, default = 0.2) parser.add_argument('--extract-all', dest = 'extractAllObjectImages', help = 'extracts the images for all objects, well classified or not (otherwise, extracts only for the misclassified)', action = 'store_true') parser.add_argument('--prefix', dest = 'imagePrefix', help = 'image prefix', default = 'img') @@ -40,6 +41,8 @@ if obj.getNum() in annotatedObjectNumbers: obj.setUserType(classificationAnnotations.loc[obj.getNum(), 'road_user_type']) timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects]) +if args.startFrame0: + timeInterval.first = 0 for userType in classificationAnnotations['road_user_type'].unique(): if not os.path.exists(args.directoryName+os.sep+moving.userTypeNames[userType]): @@ -56,7 +59,8 @@ if capture.isOpened(): ret = True frameNum = timeInterval.first - capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum) + if not args.startFrame0: + capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum) lastFrameNum = timeInterval.last while ret and frameNum <= timeInterval.last: ret, img = capture.read()