Mercurial Hosting > traffic-intelligence
comparison python/cvutils.py @ 904:8f60ecfc2f06
work in progress, almost ready
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Thu, 22 Jun 2017 18:08:46 -0400 |
parents | 81ee5aaf213d |
children | a71455bd8367 |
comparison
equal
deleted
inserted
replaced
903:81ee5aaf213d | 904:8f60ecfc2f06 |
---|---|
256 capture.release() | 256 capture.release() |
257 return fps | 257 return fps |
258 else: | 258 else: |
259 print('Video capture for {} failed'.format(videoFilename)) | 259 print('Video capture for {} failed'.format(videoFilename)) |
260 return None | 260 return None |
261 | 261 |
262 def imageBox(img, obj, frameNum, homography, width, height, px = 0.2, py = 0.2, minNPixels = 800): | 262 def imageBoxSize(obj, frameNum, homography, width, height, px = 0.2, py = 0.2): |
263 'Computes the bounding box of object at frameNum' | 263 'Computes the bounding box size of object at frameNum' |
264 x = [] | 264 x = [] |
265 y = [] | 265 y = [] |
266 if obj.hasFeatures(): | 266 if obj.hasFeatures(): |
267 for f in obj.getFeatures(): | 267 for f in obj.getFeatures(): |
268 if f.existsAtInstant(frameNum): | 268 if f.existsAtInstant(frameNum): |
278 a = max(ymax - ymin + (2 * yMm), xmax - (xmin + 2 * xMm)) | 278 a = max(ymax - ymin + (2 * yMm), xmax - (xmin + 2 * xMm)) |
279 yCropMin = int(max(0, .5 * (ymin + ymax - a))) | 279 yCropMin = int(max(0, .5 * (ymin + ymax - a))) |
280 yCropMax = int(min(height - 1, .5 * (ymin + ymax + a))) | 280 yCropMax = int(min(height - 1, .5 * (ymin + ymax + a))) |
281 xCropMin = int(max(0, .5 * (xmin + xmax - a))) | 281 xCropMin = int(max(0, .5 * (xmin + xmax - a))) |
282 xCropMax = int(min(width - 1, .5 * (xmin + xmax + a))) | 282 xCropMax = int(min(width - 1, .5 * (xmin + xmax + a))) |
283 return yCropMin, yCropMax, xCropMin, xCropMax | |
284 | |
285 def imageBox(img, obj, frameNum, homography, width, height, px = 0.2, py = 0.2, minNPixels = 800): | |
286 'Computes the bounding box of object at frameNum' | |
287 yCropMin, yCropMax, xCropMin, xCropMax = imageBoxSize(obj, frameNum, homography, width, height, px, py) | |
283 if yCropMax != yCropMin and xCropMax != xCropMin and (yCropMax - yCropMin) * (xCropMax - xCropMin) > minNPixels: | 288 if yCropMax != yCropMin and xCropMax != xCropMin and (yCropMax - yCropMin) * (xCropMax - xCropMin) > minNPixels: |
284 croppedImg = img[yCropMin : yCropMax, xCropMin : xCropMax] | 289 return img[yCropMin : yCropMax, xCropMin : xCropMax] |
285 else: | 290 else: |
286 croppedImg = None | 291 return None |
287 return croppedImg, yCropMin, yCropMax, xCropMin, xCropMax | |
288 | |
289 | 292 |
290 def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, nZerosFilenameArg = None, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., annotations = [], gtMatches = {}, toMatches = {}, colorBlind = False): | 293 def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, nZerosFilenameArg = None, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., annotations = [], gtMatches = {}, toMatches = {}, colorBlind = False): |
291 '''Displays the objects overlaid frame by frame over the video ''' | 294 '''Displays the objects overlaid frame by frame over the video ''' |
292 if colorBlind: | 295 if colorBlind: |
293 colorType = 'colorblind' | 296 colorType = 'colorblind' |
332 obj.projectedPositions = obj.positions.project(homography) | 335 obj.projectedPositions = obj.positions.project(homography) |
333 else: | 336 else: |
334 obj.projectedPositions = obj.positions | 337 obj.projectedPositions = obj.positions |
335 cvPlot(img, obj.projectedPositions, cvColors[colorType][obj.getNum()], frameNum-obj.getFirstInstant()) | 338 cvPlot(img, obj.projectedPositions, cvColors[colorType][obj.getNum()], frameNum-obj.getFirstInstant()) |
336 if frameNum not in boundingBoxes.keys() and obj.hasFeatures(): | 339 if frameNum not in boundingBoxes.keys() and obj.hasFeatures(): |
337 imgcrop, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, obj, frameNum, homography, width, height) | 340 yCropMin, yCropMax, xCropMin, xCropMax = imageBoxSize(obj, frameNum, homography, width, height) |
338 cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue[colorType], 1) | 341 cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue[colorType], 1) |
339 objDescription = '{} '.format(obj.num) | 342 objDescription = '{} '.format(obj.num) |
340 if moving.userTypeNames[obj.userType] != 'unknown': | 343 if moving.userTypeNames[obj.userType] != 'unknown': |
341 objDescription += moving.userTypeNames[obj.userType][0].upper() | 344 objDescription += moving.userTypeNames[obj.userType][0].upper() |
342 if len(annotations) > 0: # if we loaded annotations, but there is no match | 345 if len(annotations) > 0: # if we loaded annotations, but there is no match |