Mercurial Hosting > traffic-intelligence
comparison python/cvutils.py @ 726:43ae3a1af290
added functionality to display matchings between ground truth and tracked objects
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Fri, 07 Aug 2015 13:07:53 -0400 |
parents | 35bc5e30a53f |
children | 6022350f8173 |
comparison
equal
deleted
inserted
replaced
725:35bc5e30a53f | 726:43ae3a1af290 |
---|---|
23 | 23 |
24 | 24 |
25 cvRed = (0,0,255) | 25 cvRed = (0,0,255) |
26 cvGreen = (0,255,0) | 26 cvGreen = (0,255,0) |
27 cvBlue = (255,0,0) | 27 cvBlue = (255,0,0) |
28 cvCyan = (255, 255, 0) | |
29 cvYellow = (0, 255, 255) | |
30 cvMagenta = (255, 0, 255) | |
31 cvWhite = (255, 255, 255) | |
32 cvBlack = (0,0,0) | |
33 cvColors3 = utils.PlottingPropertyValues([cvRed, | |
34 cvGreen, | |
35 cvBlue]) | |
28 cvColors = utils.PlottingPropertyValues([cvRed, | 36 cvColors = utils.PlottingPropertyValues([cvRed, |
29 cvGreen, | 37 cvGreen, |
30 cvBlue]) | 38 cvBlue, |
39 cvCyan, | |
40 cvYellow, | |
41 cvMagenta, | |
42 cvWhite, | |
43 cvBlack]) | |
31 | 44 |
32 def quitKey(key): | 45 def quitKey(key): |
33 return chr(key&255)== 'q' or chr(key&255) == 'Q' | 46 return chr(key&255)== 'q' or chr(key&255) == 'Q' |
34 | 47 |
35 def saveKey(key): | 48 def saveKey(key): |
55 #draw.line([p1.x, p1.y, p2.x, p2.y], pen) | 68 #draw.line([p1.x, p1.y, p2.x, p2.y], pen) |
56 del draw | 69 del draw |
57 | 70 |
58 #out = utils.openCheck(resultFilename) | 71 #out = utils.openCheck(resultFilename) |
59 img.save(resultFilename) | 72 img.save(resultFilename) |
73 | |
74 def rgb2gray(rgb): | |
75 return dot(rgb[...,:3], [0.299, 0.587, 0.144]) | |
60 | 76 |
61 def matlab2PointCorrespondences(filename): | 77 def matlab2PointCorrespondences(filename): |
62 '''Loads and converts the point correspondences saved | 78 '''Loads and converts the point correspondences saved |
63 by the matlab camera calibration tool''' | 79 by the matlab camera calibration tool''' |
64 from numpy.lib.io import loadtxt, savetxt | 80 from numpy.lib.io import loadtxt, savetxt |
96 for j in range(cvmat.cols): | 112 for j in range(cvmat.cols): |
97 cvmat[i,j] = a[i,j] | 113 cvmat[i,j] = a[i,j] |
98 return cvmat | 114 return cvmat |
99 | 115 |
100 def cvPlot(img, positions, color, lastCoordinate = None, **kwargs): | 116 def cvPlot(img, positions, color, lastCoordinate = None, **kwargs): |
101 last = lastCoordinate+1 | 117 if lastCoordinate is None: |
102 if lastCoordinate is not None and lastCoordinate >=0: | 118 last = positions.length()-1 |
119 elif lastCoordinate >=0: | |
103 last = min(positions.length()-1, lastCoordinate) | 120 last = min(positions.length()-1, lastCoordinate) |
104 for i in range(0, last-1): | 121 for i in range(0, last): |
105 cv2.line(img, positions[i].asint().astuple(), positions[i+1].asint().astuple(), color, **kwargs) | 122 cv2.line(img, positions[i].asint().astuple(), positions[i+1].asint().astuple(), color, **kwargs) |
106 | 123 |
107 def cvImshow(windowName, img, rescale = 1.0): | 124 def cvImshow(windowName, img, rescale = 1.0): |
108 'Rescales the image (in particular if too large)' | 125 'Rescales the image (in particular if too large)' |
109 from cv2 import resize | 126 from cv2 import resize |
262 else: | 279 else: |
263 croppedImg = None | 280 croppedImg = None |
264 return croppedImg, yCropMin, yCropMax, xCropMin, xCropMax | 281 return croppedImg, yCropMin, yCropMax, xCropMin, xCropMax |
265 | 282 |
266 | 283 |
267 def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1.): | 284 def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., annotations = [], gtMatches = {}, toMatches = {}): |
268 '''Displays the objects overlaid frame by frame over the video ''' | 285 '''Displays the objects overlaid frame by frame over the video ''' |
269 from moving import userTypeNames | 286 from moving import userTypeNames |
270 from math import ceil, log10 | 287 from math import ceil, log10 |
271 | 288 |
272 capture = cv2.VideoCapture(videoFilename) | 289 capture = cv2.VideoCapture(videoFilename) |
299 if printFrames: | 316 if printFrames: |
300 print('frame {0}'.format(frameNum)) | 317 print('frame {0}'.format(frameNum)) |
301 if len(objectToDeleteIds) > 0: | 318 if len(objectToDeleteIds) > 0: |
302 objects = [o for o in objects if o.getNum() not in objectToDeleteIds] | 319 objects = [o for o in objects if o.getNum() not in objectToDeleteIds] |
303 objectToDeleteIds = [] | 320 objectToDeleteIds = [] |
321 # plot objects | |
304 for obj in objects: | 322 for obj in objects: |
305 if obj.existsAtInstant(frameNum): | 323 if obj.existsAtInstant(frameNum): |
306 if obj.getLastInstant() == frameNum: | 324 if obj.getLastInstant() == frameNum: |
307 objectToDeleteIds.append(obj.getNum()) | 325 objectToDeleteIds.append(obj.getNum()) |
308 if not hasattr(obj, 'projectedPositions'): | 326 if not hasattr(obj, 'projectedPositions'): |
309 if homography is not None: | 327 if homography is not None: |
310 obj.projectedPositions = obj.positions.project(homography) | 328 obj.projectedPositions = obj.positions.project(homography) |
311 else: | 329 else: |
312 obj.projectedPositions = obj.positions | 330 obj.projectedPositions = obj.positions |
313 cvPlot(img, obj.projectedPositions, cvRed, frameNum-obj.getFirstInstant()) | 331 cvPlot(img, obj.projectedPositions, cvColors[obj.getNum()], frameNum-obj.getFirstInstant()) |
314 if frameNum not in boundingBoxes.keys() and obj.hasFeatures(): | 332 if frameNum not in boundingBoxes.keys() and obj.hasFeatures(): |
315 imgcrop, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, obj, frameNum, homography, width, height) | 333 imgcrop, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, obj, frameNum, homography, width, height) |
316 cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue, 1) | 334 cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue, 1) |
317 objDescription = '{} '.format(obj.num) | 335 objDescription = '{} '.format(obj.num) |
318 if userTypeNames[obj.userType] != 'unknown': | 336 if userTypeNames[obj.userType] != 'unknown': |
319 objDescription += userTypeNames[obj.userType][0].upper() | 337 objDescription += userTypeNames[obj.userType][0].upper() |
320 cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) | 338 if len(annotations) > 0: # if we loaded annotations, but there is no match |
339 if frameNum not in toMatches[obj.getNum()]: | |
340 objDescription += " FA" | |
341 cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvColors[obj.getNum()]) | |
321 # plot object bounding boxes | 342 # plot object bounding boxes |
322 if frameNum in boundingBoxes.keys(): | 343 if frameNum in boundingBoxes.keys(): |
323 for rect in boundingBoxes[frameNum]: | 344 for rect in boundingBoxes[frameNum]: |
324 cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvRed) | 345 cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvColors[obj.getNum()]) |
346 # plot ground truth | |
347 if len(annotations) > 0: | |
348 for gt in annotations: | |
349 if gt.existsAtInstant(frameNum): | |
350 if frameNum in gtMatches[gt.getNum()]: | |
351 color = cvColors[gtMatches[gt.getNum()][frameNum]] # same color as object | |
352 else: | |
353 color = cvRed | |
354 cv2.putText(img, 'Miss', gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) | |
355 cv2.rectangle(img, gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), gt.bottomRightPositions[frameNum-gt.getFirstInstant()].asint().astuple(), color) | |
356 # saving images and going to next | |
325 if not saveAllImages: | 357 if not saveAllImages: |
326 cvImshow(windowName, img, rescale) | 358 cvImshow(windowName, img, rescale) |
327 key = cv2.waitKey() | 359 key = cv2.waitKey() |
328 if saveAllImages or saveKey(key): | 360 if saveAllImages or saveKey(key): |
329 cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img) | 361 cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img) |