Mercurial Hosting > traffic-intelligence
comparison python/cvutils.py @ 936:56cc8a1f7082
removed all old versions of projection methods
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Fri, 14 Jul 2017 16:48:42 -0400 |
parents | 0e63a918a1ca |
children | e5970606066f |
comparison
equal
deleted
inserted
replaced
935:0e63a918a1ca | 936:56cc8a1f7082 |
---|---|
331 if obj.existsAtInstant(frameNum): | 331 if obj.existsAtInstant(frameNum): |
332 if not hasattr(obj, 'projectedPositions'): | 332 if not hasattr(obj, 'projectedPositions'): |
333 obj.projectedPositions = obj.getPositions().homographyProject(homography) | 333 obj.projectedPositions = obj.getPositions().homographyProject(homography) |
334 if undistort: | 334 if undistort: |
335 obj.projectedPositions = obj.projectedPositions.newCameraProject(newCameraMatrix) | 335 obj.projectedPositions = obj.projectedPositions.newCameraProject(newCameraMatrix) |
336 #obj.projectedPositions = obj.positions | |
337 cvPlot(img, obj.projectedPositions, cvColors[colorType][obj.getNum()], frameNum-obj.getFirstInstant()) | 336 cvPlot(img, obj.projectedPositions, cvColors[colorType][obj.getNum()], frameNum-obj.getFirstInstant()) |
338 if frameNum not in boundingBoxes.keys() and obj.hasFeatures(): | 337 if frameNum not in boundingBoxes.keys() and obj.hasFeatures(): |
339 yCropMin, yCropMax, xCropMin, xCropMax = imageBoxSize(obj, frameNum, homography, width, height) | 338 yCropMin, yCropMax, xCropMin, xCropMax = imageBoxSize(obj, frameNum, homography, width, height) |
340 cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue[colorType], 1) | 339 cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue[colorType], 1) |
341 objDescription = '{} '.format(obj.num) | 340 objDescription = '{} '.format(obj.num) |
575 projected = dot(newCameraMatrix, augmentedPoints) | 574 projected = dot(newCameraMatrix, augmentedPoints) |
576 return projected[:2,:] | 575 return projected[:2,:] |
577 else: | 576 else: |
578 return points | 577 return points |
579 | 578 |
580 def projectArray(homography, points, intrinsicCameraMatrix = None, distortionCoefficients = None, newCameraMatrix = None): | |
581 '''Returns the coordinates of the projected points through homography | |
582 (format: array 2xN points)''' | |
583 if points.shape[0] != 2: | |
584 raise Exception('points of dimension {}'.format(points.shape)) | |
585 | |
586 augmentedPoints = append(points,[[1]*points.shape[1]], 0) # 3xN | |
587 if homography is not None and homography.size>0: | |
588 prod = dot(homography, augmentedPoints) | |
589 projected = prod/prod[2] | |
590 else: | |
591 projected = augmentedPoints | |
592 | |
593 if intrinsicCameraMatrix is not None and distortionCoefficients is not None: | |
594 if newCameraMatrix is not None: | |
595 invNewCameraMatrix = inv(newCameraMatrix) | |
596 reducedPoints = dot(invNewCameraMatrix, projected) | |
597 else: | |
598 reducedPoints = projected | |
599 projected, jacobian = cv2.projectPoints(reducedPoints.T, (0.,0.,0.), (0.,0.,0.), intrinsicCameraMatrix, distortionCoefficients) # in: 3xN, out: 2x1xN | |
600 projected = projected.reshape(-1,2).T | |
601 return projected[:2,:] | |
602 | |
603 def project(homography, p, intrinsicCameraMatrix = None, distortionCoefficients = None, newCameraMatrix = None): | |
604 '''Returns the coordinates of the projection of the point p with coordinates p[0], p[1] | |
605 through homography''' | |
606 return projectArray(homography, array([[p[0]],[p[1]]]), intrinsicCameraMatrix, distortionCoefficients, newCameraMatrix) | |
607 | |
608 def projectTrajectory(homography, trajectory, intrinsicCameraMatrix = None, distortionCoefficients = None, newCameraMatrix = None): | |
609 '''Projects a series of points in the format | |
610 [[x1, x2, ...], | |
611 [y1, y2, ...]]''' | |
612 return projectArray(homography, array(trajectory), intrinsicCameraMatrix, distortionCoefficients, newCameraMatrix) | |
613 | |
614 def invertHomography(homography): | |
615 '''Returns an inverted homography | |
616 Unnecessary for reprojection over camera image''' | |
617 invH = inv(homography) | |
618 invH /= invH[2,2] | |
619 return invH | |
620 | |
621 def undistortTrajectory(invMap1, invMap2, positions): | |
622 floorPositions = npfloor(positions) | |
623 #ceilPositions = npceil(positions) | |
624 undistortedTrajectory = [[],[]] | |
625 for i in xrange(len(positions[0])): | |
626 x,y = None, None | |
627 if positions[0][i]+1 < invMap1.shape[1] and positions[1][i]+1 < invMap1.shape[0]: | |
628 floorX = invMap1[floorPositions[1][i], floorPositions[0][i]] | |
629 floorY = invMap2[floorPositions[1][i], floorPositions[0][i]] | |
630 ceilX = invMap1[floorPositions[1][i]+1, floorPositions[0][i]+1] | |
631 ceilY = invMap2[floorPositions[1][i]+1, floorPositions[0][i]+1] | |
632 #ceilX = invMap1[ceilPositions[1][i], ceilPositions[0][i]] | |
633 #ceilY = invMap2[ceilPositions[1][i], ceilPositions[0][i]] | |
634 if floorX >=0 and floorY >=0 and ceilX >=0 and ceilY >=0: | |
635 x = floorX+(positions[0][i]-floorPositions[0][i])*(ceilX-floorX) | |
636 y = floorY+(positions[1][i]-floorPositions[1][i])*(ceilY-floorY) | |
637 undistortedTrajectory[0].append(x) | |
638 undistortedTrajectory[1].append(y) | |
639 return undistortedTrajectory | |
640 | |
641 def projectGInputPoints(homography, points): | |
642 return projectTrajectory(homography, array(points+[points[0]]).T) | |
643 | |
644 if opencvAvailable: | 579 if opencvAvailable: |
645 def computeTranslation(img1, img2, img1Points, maxTranslation2, minNMatches, windowSize = (5,5), level = 5, criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)): | 580 def computeTranslation(img1, img2, img1Points, maxTranslation2, minNMatches, windowSize = (5,5), level = 5, criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)): |
646 '''Computes the translation of img2 with respect to img1 | 581 '''Computes the translation of img2 with respect to img1 |
647 (loaded using OpenCV as numpy arrays) | 582 (loaded using OpenCV as numpy arrays) |
648 img1Points are used to compute the translation | 583 img1Points are used to compute the translation |