diff scripts/extract-appearance-images.py @ 911:3dd5acfa1899

corrected potential issues with videos where one cannot reach a give frame from its number
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Wed, 28 Jun 2017 16:46:45 -0400
parents cd038493f8c6
children e5970606066f
line wrap: on
line diff
--- a/scripts/extract-appearance-images.py	Wed Jun 28 15:36:25 2017 -0400
+++ b/scripts/extract-appearance-images.py	Wed Jun 28 16:46:45 2017 -0400
@@ -15,6 +15,7 @@
 parser.add_argument('--delimiter', dest = 'classificationAnnotationFilenameDelimiter', help = 'delimiter for the fields in the correct classification file', default= ' ')
 parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each saved patch', default = 50, type = int)
 parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to use to extract patches from', type = int, default = None)
+parser.add_argument('--start-frame0', dest = 'startFrame0', help = 'starts with first frame for videos with index problem where frames cannot be reached', action = 'store_true')
 parser.add_argument('-o', dest = 'overlap', help = 'maximum intersection over union of the features nFramesStep apart to save image', type = float, default = 0.2)
 parser.add_argument('--extract-all', dest = 'extractAllObjectImages', help = 'extracts the images for all objects, well classified or not (otherwise, extracts only for the misclassified)', action = 'store_true')
 parser.add_argument('--prefix', dest = 'imagePrefix', help = 'image prefix', default = 'img')
@@ -40,6 +41,8 @@
     if obj.getNum() in annotatedObjectNumbers:
         obj.setUserType(classificationAnnotations.loc[obj.getNum(), 'road_user_type'])
 timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects])
+if args.startFrame0:
+    timeInterval.first = 0
 
 for userType in classificationAnnotations['road_user_type'].unique():
     if not os.path.exists(args.directoryName+os.sep+moving.userTypeNames[userType]):
@@ -56,7 +59,8 @@
 if capture.isOpened():
     ret = True
     frameNum = timeInterval.first
-    capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
+    if not args.startFrame0:
+        capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
     lastFrameNum = timeInterval.last
     while ret and frameNum <= timeInterval.last:
         ret, img = capture.read()