Mercurial Hosting > traffic-intelligence
comparison scripts/extract-appearance-images.py @ 904:8f60ecfc2f06
work in progress, almost ready
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Thu, 22 Jun 2017 18:08:46 -0400 |
parents | c69a8defe5c3 |
children | 0e017178f7ab |
comparison
equal
deleted
inserted
replaced
903:81ee5aaf213d | 904:8f60ecfc2f06 |
---|---|
1 #! /usr/bin/env python | 1 #! /usr/bin/env python |
2 | 2 |
3 import numpy as np | 3 import numpy as np, cv2 |
4 import argparse | 4 import argparse, os |
5 from cv2 import SVM_RBF, SVM_C_SVC | 5 from pandas import read_csv |
6 #from cv2.ml import SVM_RBF, SVM_C_SVC, ROW_SAMPLE # row_sample for layout in cv2.ml.SVM_load | 6 from matplotlib.pyplot import imsave |
7 | 7 |
8 import cvutils, moving, ml, storage | 8 import cvutils, moving, ml, storage |
9 | 9 |
10 parser = argparse.ArgumentParser(description='The program extracts labeled image patches to train the HoG-SVM classifier, and optionnally speed information') | 10 parser = argparse.ArgumentParser(description='The program extracts labeled image patches to train the HoG-SVM classifier, and optionnally speed information') |
11 parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file', required = True) | 11 parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file', required = True) |
12 parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file (overrides the configuration file)') | 12 parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file (overrides the configuration file)') |
13 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)') | 13 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)') |
14 parser.add_argument('--gt', dest = 'classificationAnnotationFilename', help = 'name of the file containing the correct classes (user types)', required = True) | 14 parser.add_argument('--gt', dest = 'classificationAnnotationFilename', help = 'name of the file containing the correct classes (user types)', required = True) |
15 parser.add_argument('--delimiter', dest = 'classificationAnnotationFilenameDelimiter', help = 'delimiter for the fields in the correct classification file', default= ' ') | |
15 parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each saved patch', default = 50, type = int) | 16 parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each saved patch', default = 50, type = int) |
16 parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to use to extract patches from', type = int, default = None) | 17 parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to use to extract patches from', type = int, default = None) |
18 parser.add_argument('--extract-all', dest = 'extractAllObjectImages', help = 'extracts the images for all objects, well classified or not (otherwise, extracts only for the misclassified)', action = 'store_true') | |
19 parser.add_argument('--prefix', dest = 'imagePrefix', help = 'image prefix', default = 'img') | |
20 parser.add_argument('--ouput', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', default = '.') | |
17 parser.add_argument('--compute-speed-distributions', dest = 'computeSpeedDistribution', help = 'computes the distribution of the road users of each type and fits parameters to each', action = 'store_true') | 21 parser.add_argument('--compute-speed-distributions', dest = 'computeSpeedDistribution', help = 'computes the distribution of the road users of each type and fits parameters to each', action = 'store_true') |
18 | 22 |
19 | 23 |
20 #parser.add_argument('-d', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', required = True) | 24 #parser.add_argument('-d', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', required = True) |
21 | 25 |
22 args = parser.parse_args() | 26 args = parser.parse_args() |
23 params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum = storage.processVideoArguments(args) | 27 params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum = storage.processVideoArguments(args) |
24 classifierParams = storage.ClassifierParameters(params.classifierFilename) | 28 classifierParams = storage.ClassifierParameters(params.classifierFilename) |
25 | 29 |
26 objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', args.nObjects, withFeatures = True) | 30 classificationAnnotations = read_csv(args.classificationAnnotationFilename, index_col=0, delimiter = args.classificationAnnotationFilenameDelimiter, names = ["object_num", "road_user_type"]) |
31 annotatedObjectNumbers = classificationAnnotations.index.tolist() | |
32 | |
33 # objects has the objects for which we want to extract labeled images | |
34 if args.extractAllObjectImages: | |
35 objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', args.nObjects, withFeatures = True) | |
36 else: | |
37 if len(annotatedObjectNumbers) > args.nObjects: | |
38 classificationAnnotations = classificationAnnotations[:args.nObjects] | |
39 annotatedObjectNumbers = classificationAnnotations.index.tolist() | |
40 objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', annotatedObjectNumbers, withFeatures = True) | |
41 for obj in objects: | |
42 if obj.getNum() in annotatedObjectNumbers: | |
43 obj.setUserType(classificationAnnotations.loc[obj.getNum(), 'road_user_type']) | |
27 timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects]) | 44 timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects]) |
45 | |
46 for userType in classificationAnnotations['road_user_type'].unique(): | |
47 if not os.path.exists(args.directoryName+os.sep+moving.userTypeNames[userType]): | |
48 os.mkdir(args.directoryName+os.sep+moving.userTypeNames[userType]) | |
28 | 49 |
29 capture = cv2.VideoCapture(videoFilename) | 50 capture = cv2.VideoCapture(videoFilename) |
30 width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) | 51 width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) |
31 height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) | 52 height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) |
32 | 53 |
33 if undistort: # setup undistortion | 54 if undistort: # setup undistortion |
34 [map1, map2] = cvutils.computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients) | 55 [map1, map2] = cvutils.computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients) |
56 | |
57 print(timeInterval) | |
35 if capture.isOpened(): | 58 if capture.isOpened(): |
36 ret = True | 59 ret = True |
37 frameNum = timeInterval.first | 60 frameNum = timeInterval.first |
38 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum) | 61 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum) |
39 lastFrameNum = timeInterval.last | 62 lastFrameNum = timeInterval.last |
40 while ret and frameNum <= lastFrameNum: | 63 while ret and frameNum <= timeInterval.last: |
41 ret, img = capture.read() | 64 ret, img = capture.read() |
42 if ret: | 65 if ret: |
43 if frameNum%50 == 0: | 66 if frameNum%50 == 0: |
44 print('frame number: {}'.format(frameNum)) | 67 print('frame number: {}'.format(frameNum)) |
45 if undistort: | 68 if undistort: # undistort only if necessary |
46 img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR) | 69 img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR) |
47 | 70 for obj in objects: |
48 | 71 if obj.existsAtInstant(frameNum): |
72 if (obj.getFirstInstant()-frameNum)%args.nFramesStep == 0: # todo find next non zero image if none | |
73 croppedImg = cvutils.imageBox(img, obj, frameNum, invHomography, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels) | |
74 if croppedImg is not None: | |
75 imsave(args.directoryName+os.sep+moving.userTypeNames[obj.getUserType()]+args.imagePrefix+'-{}-{}.png'.format(obj.getNum(), frameNum), croppedImg) | |
76 elif obj.getLastInstant() == frameNum: | |
77 objects.remove(obj) | |
49 frameNum += 1 | 78 frameNum += 1 |
50 | |
51 | |
52 | 79 |
53 # todo speed info: distributions AND min speed equiprobable | 80 # todo speed info: distributions AND min speed equiprobable |
54 | 81 |
55 # provide csv delimiter for the classification file as arg | |
56 |