Mercurial Hosting > traffic-intelligence
changeset 482:f6415f012640
adding functionalities (save images directly to display trajectories to create movies
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Wed, 02 Apr 2014 16:12:24 -0400 |
parents | b6ad86ee7033 |
children | 30b3455978d9 |
files | python/cvutils.py python/storage.py scripts/display-trajectories.py scripts/safety-analysis.py |
diffstat | 4 files changed, 16 insertions(+), 12 deletions(-) [+] |
line wrap: on
line diff
--- a/python/cvutils.py Wed Apr 02 01:45:53 2014 -0400 +++ b/python/cvutils.py Wed Apr 02 16:12:24 2014 -0400 @@ -202,10 +202,10 @@ return imgcrop, yCropMin, yCropMax, xCropMin, xCropMax - def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1): + def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False): '''Displays the objects overlaid frame by frame over the video ''' from moving import userTypeNames - + from math import ceil, log10 capture = cv2.VideoCapture(videoFilename) width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) @@ -214,11 +214,12 @@ ret = True frameNum = firstFrameNum capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum) - if not lastFrameNumArg: + if lastFrameNumArg == None: from sys import maxint lastFrameNum = maxint else: lastFrameNum = lastFrameNumArg + nZerosFilename = int(ceil(log10(lastFrameNum))) while ret and not quitKey(key) and frameNum < lastFrameNum: ret, img = capture.read() if ret: @@ -242,10 +243,11 @@ if userTypeNames[obj.userType] != 'unknown': objDescription += userTypeNames[obj.userType][0].upper() cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) - cvImshow('frame', img, rescale) - key = cv2.waitKey() - if saveKey(key): - cv2.imwrite('image.png', img) + if not saveAllImages: + cvImshow('frame', img, rescale) + key = cv2.waitKey() + if saveAllImages or saveKey(key): + cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img) frameNum += nFramesStep if nFramesStep > 1: capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
--- a/python/storage.py Wed Apr 02 01:45:53 2014 -0400 +++ b/python/storage.py Wed Apr 02 16:12:24 2014 -0400 @@ -252,8 +252,8 @@ connection.commit() connection.close() -def loadIndicators(filename): - '''Loads interaction indicators +def loadInteractions(filename): + '''Loads interaction and their indicators TODO choose the interactions to load''' interactions = [] @@ -295,7 +295,7 @@ # CREATE TEMP TABLE IF NOT EXISTS object_instants AS SELECT OF.object_id, min(frame_number) as first_instant, max(frame_number) as last_instant from positions P, objects_features OF where P.trajectory_id = OF.trajectory_id group by OF.object_id order by OF.object_id def createBoundingBoxTable(filename, invHomography = None): - '''Create the table to store the object bounding boxes + '''Create the table to store the object bounding boxes in image space ''' connection = sqlite3.connect(filename) cursor = connection.cursor()
--- a/scripts/display-trajectories.py Wed Apr 02 01:45:53 2014 -0400 +++ b/scripts/display-trajectories.py Wed Apr 02 16:12:24 2014 -0400 @@ -16,6 +16,8 @@ parser.add_argument('-f', dest = 'firstFrameNum', help = 'number of first frame number to display', default = 0, type = int) parser.add_argument('-r', dest = 'rescale', help = 'rescaling factor for the displayed image', default = 1., type = float) parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each display', default = 1, type = int) +parser.add_argument('--save-images', dest = 'saveAllImages', help = 'save all images', action = 'store_true') +parser.add_argument('--last-frame', dest = 'lastFrameNum', help = 'number of last frame number to save (for image saving, no display is made)', default = None, type = int) args = parser.parse_args() @@ -39,4 +41,4 @@ objects = storage.loadTrajectoriesFromSqlite(databaseFilename, args.trajectoryType) boundingBoxes = storage.loadBoundingBoxTable(databaseFilename) -cvutils.displayTrajectories(videoFilename, objects, boundingBoxes, homography, firstFrameNum, rescale = args.rescale, nFramesStep = args.nFramesStep) +cvutils.displayTrajectories(videoFilename, objects, boundingBoxes, homography, firstFrameNum, args.lastFrameNum, rescale = args.rescale, nFramesStep = args.nFramesStep, saveAllImages = args.saveAllImages)
--- a/scripts/safety-analysis.py Wed Apr 02 01:45:53 2014 -0400 +++ b/scripts/safety-analysis.py Wed Apr 02 16:12:24 2014 -0400 @@ -13,7 +13,7 @@ parser = argparse.ArgumentParser(description='The program processes indicators for all pairs of road users in the scene') parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file') parser.add_argument('--prediction-method', dest = 'predictionMethod', help = 'prediction method (constant velocity (vector computation), constant velocity, normal adaptation, point set prediction)', choices = ['cvd', 'cv', 'na', 'ps']) -parser.add_argument('--display-cp', dest = 'displayCollisionPoints', help = 'display collision points') +parser.add_argument('--display-cp', dest = 'displayCollisionPoints', help = 'display collision points', action = 'store_true') args = parser.parse_args() params = utils.TrackingParameters()