Mercurial Hosting > traffic-intelligence
comparison scripts/extract-appearance-images.py @ 911:3dd5acfa1899
corrected potential issues with videos where one cannot reach a give frame from its number
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Wed, 28 Jun 2017 16:46:45 -0400 |
parents | cd038493f8c6 |
children | e5970606066f |
comparison
equal
deleted
inserted
replaced
910:b58a1061a717 | 911:3dd5acfa1899 |
---|---|
13 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)') | 13 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)') |
14 parser.add_argument('--gt', dest = 'classificationAnnotationFilename', help = 'name of the file containing the correct classes (user types)', required = True) | 14 parser.add_argument('--gt', dest = 'classificationAnnotationFilename', help = 'name of the file containing the correct classes (user types)', required = True) |
15 parser.add_argument('--delimiter', dest = 'classificationAnnotationFilenameDelimiter', help = 'delimiter for the fields in the correct classification file', default= ' ') | 15 parser.add_argument('--delimiter', dest = 'classificationAnnotationFilenameDelimiter', help = 'delimiter for the fields in the correct classification file', default= ' ') |
16 parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each saved patch', default = 50, type = int) | 16 parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each saved patch', default = 50, type = int) |
17 parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to use to extract patches from', type = int, default = None) | 17 parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to use to extract patches from', type = int, default = None) |
18 parser.add_argument('--start-frame0', dest = 'startFrame0', help = 'starts with first frame for videos with index problem where frames cannot be reached', action = 'store_true') | |
18 parser.add_argument('-o', dest = 'overlap', help = 'maximum intersection over union of the features nFramesStep apart to save image', type = float, default = 0.2) | 19 parser.add_argument('-o', dest = 'overlap', help = 'maximum intersection over union of the features nFramesStep apart to save image', type = float, default = 0.2) |
19 parser.add_argument('--extract-all', dest = 'extractAllObjectImages', help = 'extracts the images for all objects, well classified or not (otherwise, extracts only for the misclassified)', action = 'store_true') | 20 parser.add_argument('--extract-all', dest = 'extractAllObjectImages', help = 'extracts the images for all objects, well classified or not (otherwise, extracts only for the misclassified)', action = 'store_true') |
20 parser.add_argument('--prefix', dest = 'imagePrefix', help = 'image prefix', default = 'img') | 21 parser.add_argument('--prefix', dest = 'imagePrefix', help = 'image prefix', default = 'img') |
21 parser.add_argument('--ouput', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', default = '.') | 22 parser.add_argument('--ouput', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', default = '.') |
22 parser.add_argument('--compute-speed-distributions', dest = 'computeSpeedDistribution', help = 'computes the distribution of the road users of each type and fits parameters to each', action = 'store_true') | 23 parser.add_argument('--compute-speed-distributions', dest = 'computeSpeedDistribution', help = 'computes the distribution of the road users of each type and fits parameters to each', action = 'store_true') |
38 objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', annotatedObjectNumbers, withFeatures = True) | 39 objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', annotatedObjectNumbers, withFeatures = True) |
39 for obj in objects: | 40 for obj in objects: |
40 if obj.getNum() in annotatedObjectNumbers: | 41 if obj.getNum() in annotatedObjectNumbers: |
41 obj.setUserType(classificationAnnotations.loc[obj.getNum(), 'road_user_type']) | 42 obj.setUserType(classificationAnnotations.loc[obj.getNum(), 'road_user_type']) |
42 timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects]) | 43 timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects]) |
44 if args.startFrame0: | |
45 timeInterval.first = 0 | |
43 | 46 |
44 for userType in classificationAnnotations['road_user_type'].unique(): | 47 for userType in classificationAnnotations['road_user_type'].unique(): |
45 if not os.path.exists(args.directoryName+os.sep+moving.userTypeNames[userType]): | 48 if not os.path.exists(args.directoryName+os.sep+moving.userTypeNames[userType]): |
46 os.mkdir(args.directoryName+os.sep+moving.userTypeNames[userType]) | 49 os.mkdir(args.directoryName+os.sep+moving.userTypeNames[userType]) |
47 | 50 |
54 height, width = map1.shape | 57 height, width = map1.shape |
55 | 58 |
56 if capture.isOpened(): | 59 if capture.isOpened(): |
57 ret = True | 60 ret = True |
58 frameNum = timeInterval.first | 61 frameNum = timeInterval.first |
59 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum) | 62 if not args.startFrame0: |
63 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum) | |
60 lastFrameNum = timeInterval.last | 64 lastFrameNum = timeInterval.last |
61 while ret and frameNum <= timeInterval.last: | 65 while ret and frameNum <= timeInterval.last: |
62 ret, img = capture.read() | 66 ret, img = capture.read() |
63 distorted = True | 67 distorted = True |
64 if ret: | 68 if ret: |