Mercurial Hosting > traffic-intelligence
changeset 929:be28a3538dc9
work in progress on projection
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Wed, 12 Jul 2017 18:00:53 -0400 |
parents | 063d1267585d |
children | 7db0f2853bfd |
files | python/cvutils.py python/moving.py python/tests/cvutils.txt |
diffstat | 3 files changed, 29 insertions(+), 18 deletions(-) [+] |
line wrap: on
line diff
--- a/python/cvutils.py Wed Jul 12 01:24:31 2017 -0400 +++ b/python/cvutils.py Wed Jul 12 18:00:53 2017 -0400 @@ -264,9 +264,9 @@ if obj.hasFeatures(): for f in obj.getFeatures(): if f.existsAtInstant(frameNum): - projectedPosition = f.projectPositions[:, frameNum-f.getFirstInstant()] - x.append(projectedPosition[0]) - y.append(projectedPosition[1]) + p = f.getPositionAtInstant(frameNum) + x.append(p.x) + y.append(p.y) xmin = min(x) xmax = max(x) ymin = min(y) @@ -521,31 +521,32 @@ def projectArray(homography, points, intrinsicCameraMatrix = None, distortionCoefficients = None): '''Returns the coordinates of the projected points through homography (format: array 2xN points)''' - if points.shape[0] not in [2, 3]: + if points.shape[0] != 2: raise Exception('points of dimension {0} {1}'.format(points.shape[0], points.shape[1])) - augmentedPoints = append(points,[[1]*points.shape[1]], 0) + augmentedPoints = append(points,[[1]*points.shape[1]], 0) # 3xN if homography is not None and homography.size>0: prod = dot(homography, augmentedPoints) projected = prod/prod[2] - projected[3,:] = 0 else: projected = augmentedPoints if intrinsicCameraMatrix is not None and distortionCoefficients is not None: - projected = cv2.projectPoints(projected, None, None, intrinsicCameraMatrix, distortionCoefficients) - return projected + #projected[2,:] = 0 + projected, jacobian = cv2.projectPoints(projected.T, (0,0,0), (0,0,0), intrinsicCameraMatrix, distortionCoefficients) # in: 3xN, out: 2x1xN + projected = projected.reshape(-1,2).T + return projected[:2,:] -def project(homography, p): +def project(homography, p, intrinsicCameraMatrix = None, distortionCoefficients = None): '''Returns the coordinates of the projection of the point p with coordinates p[0], p[1] through homography''' - return projectArray(homography, array([[p[0]],[p[1]]])) + return projectArray(homography, array([[p[0]],[p[1]]]), intrinsicCameraMatrix, distortionCoefficients) -def projectTrajectory(homography, trajectory): +def projectTrajectory(homography, trajectory, intrinsicCameraMatrix = None, distortionCoefficients = None): '''Projects a series of points in the format [[x1, x2, ...], [y1, y2, ...]]''' - return projectArray(homography, array(trajectory)) + return projectArray(homography, array(trajectory), intrinsicCameraMatrix, distortionCoefficients) def invertHomography(homography): '''Returns an inverted homography
--- a/python/moving.py Wed Jul 12 01:24:31 2017 -0400 +++ b/python/moving.py Wed Jul 12 18:00:53 2017 -0400 @@ -1578,16 +1578,17 @@ # project feature positions if self.hasFeatures(): for f in self.getFeatures(): - f.projectedPositions = cvutils.projectArray(homography, f.getPositions().asArray(), intrinsicCameraMatrix, distortionCoefficients) + pp = cvutils.projectArray(homography, f.getPositions().asArray(), intrinsicCameraMatrix, array(distortionCoefficients)).tolist() + f.positions = Trajectory(pp) self.userTypes = {} - def classifyUserTypeHoGSVMAtInstant(self, img, instant, homography, width, height, px, py, minNPixels, rescaleSize, orientations, pixelsPerCell, cellsPerBlock, blockNorm): + def classifyUserTypeHoGSVMAtInstant(self, img, instant, width, height, px, py, minNPixels, rescaleSize, orientations, pixelsPerCell, cellsPerBlock, blockNorm): '''Extracts the image box around the object (of square size max(width, height) of the box around the features, with an added px or py for width and height (around the box)) computes HOG on this cropped image (with parameters rescaleSize, orientations, pixelsPerCell, cellsPerBlock) and applies the SVM model on it''' - croppedImg = cvutils.imageBox(img, self, instant, homography, width, height, px, py, minNPixels) + croppedImg = cvutils.imageBox(img, self, instant, width, height, px, py, minNPixels) if croppedImg is not None and len(croppedImg) > 0: hog = cvutils.HOG(croppedImg, rescaleSize, orientations, pixelsPerCell, cellsPerBlock, blockNorm, visualize=False, normalize=False) self.userTypes[instant] = int(self.appearanceClassifier.predict(hog))
--- a/python/tests/cvutils.txt Wed Jul 12 01:24:31 2017 -0400 +++ b/python/tests/cvutils.txt Wed Jul 12 18:00:53 2017 -0400 @@ -1,10 +1,19 @@ >>> import cv2, cvutils ->>> from numpy import array, round +>>> from numpy import array, round, ones >>> img = cv2.imread("../samples/val-dor-117-111.png") >>> width = img.shape[1] >>> height = img.shape[0] >>> intrinsicCameraMatrix = array([[ 377.42, 0. , 639.12], [ 0. , 378.43, 490.2 ], [ 0. , 0. , 1. ]]) ->>> [map1, map2] = cvutils.computeUndistortMaps(width, height, 1.31, intrinsicCameraMatrix, [-0.11759321, 0.0148536, 0.00030756, -0.00020578, -0.00091816]) +>>> distortionCoefficients = array([-0.11759321, 0.0148536, 0.00030756, -0.00020578, -0.00091816]) +>>> distortionCoefficients = array([-0.11759321, 0., 0., 0., 0.]) +>>> multiplicationFactor = 1.31 +>>> [map1, map2] = cvutils.computeUndistortMaps(width, height, multiplicationFactor, intrinsicCameraMatrix, distortionCoefficients) >>> undistorted = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR) ->>> (undistorted.shape == array([int(round(height*1.31)), int(round(width*1.31)), 3])).all() +>>> (undistorted.shape == array([int(round(height*multiplicationFactor)), int(round(width*multiplicationFactor)), 3])).all() True +>>> imgPoints = array([[[150.,170.],[220.,340.],[340.,440.],[401.,521.]]]) +>>> newCameraMatrix = cv2.getDefaultNewCameraMatrix(intrinsicCameraMatrix, (int(round(width*multiplicationFactor)), int(round(height*multiplicationFactor))), True) +>>> undistortedPoints = cv2.undistortPoints(imgPoints, intrinsicCameraMatrix, distortionCoefficients, P = newCameraMatrix).reshape(-1, 2) +>>> tmp = ones((imgPoints[0].shape[0], 3)) +>>> tmp[:,:2] = undistortedPoints +>>> origPoints = cv2.projectPoints(tmp, (0.,0.,0.), (0.,0.,0.), intrinsicCameraMatrix, distortionCoefficients)[0].reshape(-1,2)