Mercurial Hosting > traffic-intelligence
changeset 909:cd038493f8c6
finished image extraction script for HoG-SVM training
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Mon, 26 Jun 2017 17:45:32 -0400 |
parents | b297525b2cbf |
children | b58a1061a717 |
files | python/storage.py scripts/extract-appearance-images.py |
diffstat | 2 files changed, 13 insertions(+), 12 deletions(-) [+] |
line wrap: on
line diff
--- a/python/storage.py Mon Jun 26 00:10:35 2017 -0400 +++ b/python/storage.py Mon Jun 26 17:45:32 2017 -0400 @@ -416,7 +416,7 @@ dbfn = filename cursor.execute('INSERT INTO prototypes (id, dbfilename, trajectory_type, nMatchings) VALUES ({},\"{}\",\"{}\",{})'.format(protoId, dbfn, trajectoryType, n)) cursor.execute('SELECT * from sqlite_master WHERE type = \"table\" and name = \"{}\"'.format(tableNames[trajectoryType])) - if len(cursor.fetchall) == 0: + if len(cursor.fetchall()) == 0: pass # save prototype trajectory data except sqlite3.OperationalError as error: printDBError(error)
--- a/scripts/extract-appearance-images.py Mon Jun 26 00:10:35 2017 -0400 +++ b/scripts/extract-appearance-images.py Mon Jun 26 17:45:32 2017 -0400 @@ -3,7 +3,7 @@ import numpy as np, cv2 import argparse, os from pandas import read_csv -from matplotlib.pyplot import imsave, imshow, figure +from matplotlib.pyplot import imshow, figure import cvutils, moving, ml, storage @@ -15,14 +15,12 @@ parser.add_argument('--delimiter', dest = 'classificationAnnotationFilenameDelimiter', help = 'delimiter for the fields in the correct classification file', default= ' ') parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each saved patch', default = 50, type = int) parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to use to extract patches from', type = int, default = None) +parser.add_argument('-o', dest = 'overlap', help = 'maximum intersection over union of the features nFramesStep apart to save image', type = float, default = 0.2) parser.add_argument('--extract-all', dest = 'extractAllObjectImages', help = 'extracts the images for all objects, well classified or not (otherwise, extracts only for the misclassified)', action = 'store_true') parser.add_argument('--prefix', dest = 'imagePrefix', help = 'image prefix', default = 'img') parser.add_argument('--ouput', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', default = '.') parser.add_argument('--compute-speed-distributions', dest = 'computeSpeedDistribution', help = 'computes the distribution of the road users of each type and fits parameters to each', action = 'store_true') - -#parser.add_argument('-d', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', required = True) - args = parser.parse_args() params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum = storage.processVideoArguments(args) classifierParams = storage.ClassifierParameters(params.classifierFilename) @@ -62,19 +60,22 @@ lastFrameNum = timeInterval.last while ret and frameNum <= timeInterval.last: ret, img = capture.read() + distorted = True if ret: if frameNum%50 == 0: print('frame number: {}'.format(frameNum)) - if undistort: # undistort only if necessary - img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR) for obj in objects: if obj.existsAtInstant(frameNum): if (10+frameNum-obj.getFirstInstant())%args.nFramesStep == 0: - # todo find next non zero image if none - # todo get several images if different features (measure of similarity) - croppedImg = cvutils.imageBox(img, obj, frameNum, invHomography, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels) - if croppedImg is not None: - imsave(args.directoryName+os.sep+moving.userTypeNames[obj.getUserType()]+os.sep+args.imagePrefix+'-{}-{}.png'.format(obj.getNum(), frameNum), croppedImg) + currentImageFeatures = set([f.num for f in obj.getFeatures() if f.existsAtInstant(frameNum)]) + if not hasattr(obj, 'lastImageFeatures') or len(currentImageFeatures.intersection(obj.lastImageFeatures))/len(currentImageFeatures.union(obj.lastImageFeatures)) < args.overlap: + obj.lastImageFeatures = currentImageFeatures + if undistort and distorted: # undistort only if necessary + img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR) + distorted = False + croppedImg = cvutils.imageBox(img, obj, frameNum, invHomography, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels) + if croppedImg is not None: + cv2.imwrite(args.directoryName+os.sep+moving.userTypeNames[obj.getUserType()]+os.sep+args.imagePrefix+'-{}-{}.png'.format(obj.getNum(), frameNum), croppedImg) elif obj.getLastInstant() == frameNum: objects.remove(obj) frameNum += 1