Mercurial Hosting > traffic-intelligence
changeset 726:43ae3a1af290
added functionality to display matchings between ground truth and tracked objects
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Fri, 07 Aug 2015 13:07:53 -0400 |
parents | 35bc5e30a53f |
children | c6d4ea05a2d0 |
files | python/cvutils.py python/moving.py scripts/compute-clearmot.py |
diffstat | 3 files changed, 97 insertions(+), 13 deletions(-) [+] |
line wrap: on
line diff
diff -r 35bc5e30a53f -r 43ae3a1af290 python/cvutils.py --- a/python/cvutils.py Fri Aug 07 01:05:01 2015 -0400 +++ b/python/cvutils.py Fri Aug 07 13:07:53 2015 -0400 @@ -25,9 +25,22 @@ cvRed = (0,0,255) cvGreen = (0,255,0) cvBlue = (255,0,0) +cvCyan = (255, 255, 0) +cvYellow = (0, 255, 255) +cvMagenta = (255, 0, 255) +cvWhite = (255, 255, 255) +cvBlack = (0,0,0) +cvColors3 = utils.PlottingPropertyValues([cvRed, + cvGreen, + cvBlue]) cvColors = utils.PlottingPropertyValues([cvRed, cvGreen, - cvBlue]) + cvBlue, + cvCyan, + cvYellow, + cvMagenta, + cvWhite, + cvBlack]) def quitKey(key): return chr(key&255)== 'q' or chr(key&255) == 'Q' @@ -58,6 +71,9 @@ #out = utils.openCheck(resultFilename) img.save(resultFilename) +def rgb2gray(rgb): + return dot(rgb[...,:3], [0.299, 0.587, 0.144]) + def matlab2PointCorrespondences(filename): '''Loads and converts the point correspondences saved by the matlab camera calibration tool''' @@ -98,10 +114,11 @@ return cvmat def cvPlot(img, positions, color, lastCoordinate = None, **kwargs): - last = lastCoordinate+1 - if lastCoordinate is not None and lastCoordinate >=0: + if lastCoordinate is None: + last = positions.length()-1 + elif lastCoordinate >=0: last = min(positions.length()-1, lastCoordinate) - for i in range(0, last-1): + for i in range(0, last): cv2.line(img, positions[i].asint().astuple(), positions[i+1].asint().astuple(), color, **kwargs) def cvImshow(windowName, img, rescale = 1.0): @@ -264,7 +281,7 @@ return croppedImg, yCropMin, yCropMax, xCropMin, xCropMax - def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1.): + def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., annotations = [], gtMatches = {}, toMatches = {}): '''Displays the objects overlaid frame by frame over the video ''' from moving import userTypeNames from math import ceil, log10 @@ -301,6 +318,7 @@ if len(objectToDeleteIds) > 0: objects = [o for o in objects if o.getNum() not in objectToDeleteIds] objectToDeleteIds = [] + # plot objects for obj in objects: if obj.existsAtInstant(frameNum): if obj.getLastInstant() == frameNum: @@ -310,18 +328,32 @@ obj.projectedPositions = obj.positions.project(homography) else: obj.projectedPositions = obj.positions - cvPlot(img, obj.projectedPositions, cvRed, frameNum-obj.getFirstInstant()) + cvPlot(img, obj.projectedPositions, cvColors[obj.getNum()], frameNum-obj.getFirstInstant()) if frameNum not in boundingBoxes.keys() and obj.hasFeatures(): imgcrop, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, obj, frameNum, homography, width, height) cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue, 1) objDescription = '{} '.format(obj.num) if userTypeNames[obj.userType] != 'unknown': objDescription += userTypeNames[obj.userType][0].upper() - cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) + if len(annotations) > 0: # if we loaded annotations, but there is no match + if frameNum not in toMatches[obj.getNum()]: + objDescription += " FA" + cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvColors[obj.getNum()]) # plot object bounding boxes if frameNum in boundingBoxes.keys(): for rect in boundingBoxes[frameNum]: - cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvRed) + cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvColors[obj.getNum()]) + # plot ground truth + if len(annotations) > 0: + for gt in annotations: + if gt.existsAtInstant(frameNum): + if frameNum in gtMatches[gt.getNum()]: + color = cvColors[gtMatches[gt.getNum()][frameNum]] # same color as object + else: + color = cvRed + cv2.putText(img, 'Miss', gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) + cv2.rectangle(img, gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), gt.bottomRightPositions[frameNum-gt.getFirstInstant()].asint().astuple(), color) + # saving images and going to next if not saveAllImages: cvImshow(windowName, img, rescale) key = cv2.waitKey()
diff -r 35bc5e30a53f -r 43ae3a1af290 python/moving.py --- a/python/moving.py Fri Aug 07 01:05:01 2015 -0400 +++ b/python/moving.py Fri Aug 07 13:07:53 2015 -0400 @@ -1067,6 +1067,42 @@ print 'The object does not exist at '+str(inter) return None + def getObjectsInMask(self, mask, homography = None, minLength = 1): + '''Returns new objects made of the positions in the mask + mask is in the destination of the homography space''' + if homography is not None: + self.projectedPositions = self.positions.project(homography) + else: + self.projectedPositions = self.positions + def inMask(positions, i, mask): + p = positions[i] + return mask[p.y, p.x] != 0. + + #subTimeIntervals self.getFirstInstant()+i + filteredIndices = [inMask(self.projectedPositions, i, mask) for i in range(int(self.length()))] + # 'connected components' in subTimeIntervals + l = 0 + intervalLabels = [] + prev = True + for i in filteredIndices: + if i: + if not prev: # new interval + l += 1 + intervalLabels.append(l) + else: + intervalLabels.append(-1) + prev = i + intervalLabels = array(intervalLabels) + subObjects = [] + for l in set(intervalLabels): + if l >= 0: + if sum(intervalLabels == l) >= minLength: + times = [self.getFirstInstant()+i for i in range(len(intervalLabels)) if intervalLabels[i] == l] + subTimeInterval = TimeInterval(min(times), max(times)) + subObjects.append(self.getObjectInTimeInterval(subTimeInterval)) + + return subObjects + def getPositions(self): return self.positions
diff -r 35bc5e30a53f -r 43ae3a1af290 scripts/compute-clearmot.py --- a/scripts/compute-clearmot.py Fri Aug 07 01:05:01 2015 -0400 +++ b/scripts/compute-clearmot.py Fri Aug 07 13:07:53 2015 -0400 @@ -2,7 +2,8 @@ import sys, argparse from numpy import loadtxt -import moving, storage +from numpy.linalg import inv +import moving, storage, cvutils # TODO: need to trim objects to same mask ? @@ -16,9 +17,11 @@ parser.add_argument('-g', dest = 'groundTruthDatabaseFilename', help = 'name of the Sqlite database containing the ground truth', required = True) parser.add_argument('-o', dest = 'homographyFilename', help = 'name of the filename for the homography (if tracking was done using the homography)') parser.add_argument('-m', dest = 'matchingDistance', help = 'matching distance between tracker and ground truth trajectories', required = True, type = float) +parser.add_argument('--mask', dest = 'maskFilename', help = 'filename of the mask file used to define the where objects were tracked') parser.add_argument('-f', dest = 'firstInstant', help = 'first instant for measurement', required = True, type = int) parser.add_argument('-l', dest = 'lastInstant', help = 'last instant for measurement', required = True, type = int) parser.add_argument('--display', dest = 'display', help = 'display the ground truth to object matches (graphically)', action = 'store_true') +parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (for display)') args = parser.parse_args() if args.homographyFilename is not None: @@ -27,6 +30,17 @@ homography = None objects = storage.loadTrajectoriesFromSqlite(args.trackerDatabaseFilename, 'object') + +if args.maskFilename is not None: + maskObjects = [] + from matplotlib.pyplot import imread + mask = imread(args.maskFilename) + if len(mask) > 1: + mask = mask[:,:,0] + for obj in objects: + maskObjects += obj.getObjectsInMask(mask, inv(homography), 2) # TODO add option to keep object if at least one feature in mask + objects = maskObjects + annotations = storage.loadGroundTruthFromSqlite(args.groundTruthDatabaseFilename) for a in annotations: a.computeCentroidTrajectory(homography) @@ -43,7 +57,9 @@ print 'Number of mismatches: {}'.format(mme) print 'Number of false alarms.frames: {}'.format(fpt) if args.display: - print('Ground truth matches') - print(gtMatches) - print('Object matches') - print toMatches + cvutils.displayTrajectories(args.videoFilename, objects, {}, inv(homography), args.firstInstant, args.lastInstant, annotations = annotations, gtMatches = gtMatches, toMatches = toMatches)#, rescale = args.rescale, nFramesStep = args.nFramesStep, saveAllImages = args.saveAllImages, undistort = (undistort or args.undistort), intrinsicCameraMatrix = intrinsicCameraMatrix, distortionCoefficients = distortionCoefficients, undistortedImageMultiplication = undistortedImageMultiplication) + + #print('Ground truth matches') + #print(gtMatches) + #print('Object matches') + #rint toMatches