comparison scripts/extract-appearance-images.py @ 909:cd038493f8c6

finished image extraction script for HoG-SVM training
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Mon, 26 Jun 2017 17:45:32 -0400
parents a57e6fbcd8e3
children 3dd5acfa1899
comparison
equal deleted inserted replaced
908:b297525b2cbf 909:cd038493f8c6
1 #! /usr/bin/env python 1 #! /usr/bin/env python
2 2
3 import numpy as np, cv2 3 import numpy as np, cv2
4 import argparse, os 4 import argparse, os
5 from pandas import read_csv 5 from pandas import read_csv
6 from matplotlib.pyplot import imsave, imshow, figure 6 from matplotlib.pyplot import imshow, figure
7 7
8 import cvutils, moving, ml, storage 8 import cvutils, moving, ml, storage
9 9
10 parser = argparse.ArgumentParser(description='The program extracts labeled image patches to train the HoG-SVM classifier, and optionnally speed information') 10 parser = argparse.ArgumentParser(description='The program extracts labeled image patches to train the HoG-SVM classifier, and optionnally speed information')
11 parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file', required = True) 11 parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file', required = True)
13 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)') 13 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)')
14 parser.add_argument('--gt', dest = 'classificationAnnotationFilename', help = 'name of the file containing the correct classes (user types)', required = True) 14 parser.add_argument('--gt', dest = 'classificationAnnotationFilename', help = 'name of the file containing the correct classes (user types)', required = True)
15 parser.add_argument('--delimiter', dest = 'classificationAnnotationFilenameDelimiter', help = 'delimiter for the fields in the correct classification file', default= ' ') 15 parser.add_argument('--delimiter', dest = 'classificationAnnotationFilenameDelimiter', help = 'delimiter for the fields in the correct classification file', default= ' ')
16 parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each saved patch', default = 50, type = int) 16 parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each saved patch', default = 50, type = int)
17 parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to use to extract patches from', type = int, default = None) 17 parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to use to extract patches from', type = int, default = None)
18 parser.add_argument('-o', dest = 'overlap', help = 'maximum intersection over union of the features nFramesStep apart to save image', type = float, default = 0.2)
18 parser.add_argument('--extract-all', dest = 'extractAllObjectImages', help = 'extracts the images for all objects, well classified or not (otherwise, extracts only for the misclassified)', action = 'store_true') 19 parser.add_argument('--extract-all', dest = 'extractAllObjectImages', help = 'extracts the images for all objects, well classified or not (otherwise, extracts only for the misclassified)', action = 'store_true')
19 parser.add_argument('--prefix', dest = 'imagePrefix', help = 'image prefix', default = 'img') 20 parser.add_argument('--prefix', dest = 'imagePrefix', help = 'image prefix', default = 'img')
20 parser.add_argument('--ouput', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', default = '.') 21 parser.add_argument('--ouput', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', default = '.')
21 parser.add_argument('--compute-speed-distributions', dest = 'computeSpeedDistribution', help = 'computes the distribution of the road users of each type and fits parameters to each', action = 'store_true') 22 parser.add_argument('--compute-speed-distributions', dest = 'computeSpeedDistribution', help = 'computes the distribution of the road users of each type and fits parameters to each', action = 'store_true')
22
23
24 #parser.add_argument('-d', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', required = True)
25 23
26 args = parser.parse_args() 24 args = parser.parse_args()
27 params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum = storage.processVideoArguments(args) 25 params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum = storage.processVideoArguments(args)
28 classifierParams = storage.ClassifierParameters(params.classifierFilename) 26 classifierParams = storage.ClassifierParameters(params.classifierFilename)
29 27
60 frameNum = timeInterval.first 58 frameNum = timeInterval.first
61 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum) 59 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
62 lastFrameNum = timeInterval.last 60 lastFrameNum = timeInterval.last
63 while ret and frameNum <= timeInterval.last: 61 while ret and frameNum <= timeInterval.last:
64 ret, img = capture.read() 62 ret, img = capture.read()
63 distorted = True
65 if ret: 64 if ret:
66 if frameNum%50 == 0: 65 if frameNum%50 == 0:
67 print('frame number: {}'.format(frameNum)) 66 print('frame number: {}'.format(frameNum))
68 if undistort: # undistort only if necessary
69 img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
70 for obj in objects: 67 for obj in objects:
71 if obj.existsAtInstant(frameNum): 68 if obj.existsAtInstant(frameNum):
72 if (10+frameNum-obj.getFirstInstant())%args.nFramesStep == 0: 69 if (10+frameNum-obj.getFirstInstant())%args.nFramesStep == 0:
73 # todo find next non zero image if none 70 currentImageFeatures = set([f.num for f in obj.getFeatures() if f.existsAtInstant(frameNum)])
74 # todo get several images if different features (measure of similarity) 71 if not hasattr(obj, 'lastImageFeatures') or len(currentImageFeatures.intersection(obj.lastImageFeatures))/len(currentImageFeatures.union(obj.lastImageFeatures)) < args.overlap:
75 croppedImg = cvutils.imageBox(img, obj, frameNum, invHomography, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels) 72 obj.lastImageFeatures = currentImageFeatures
76 if croppedImg is not None: 73 if undistort and distorted: # undistort only if necessary
77 imsave(args.directoryName+os.sep+moving.userTypeNames[obj.getUserType()]+os.sep+args.imagePrefix+'-{}-{}.png'.format(obj.getNum(), frameNum), croppedImg) 74 img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
75 distorted = False
76 croppedImg = cvutils.imageBox(img, obj, frameNum, invHomography, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels)
77 if croppedImg is not None:
78 cv2.imwrite(args.directoryName+os.sep+moving.userTypeNames[obj.getUserType()]+os.sep+args.imagePrefix+'-{}-{}.png'.format(obj.getNum(), frameNum), croppedImg)
78 elif obj.getLastInstant() == frameNum: 79 elif obj.getLastInstant() == frameNum:
79 objects.remove(obj) 80 objects.remove(obj)
80 frameNum += 1 81 frameNum += 1
81 82
82 # todo speed info: distributions AND min speed equiprobable 83 # todo speed info: distributions AND min speed equiprobable