changeset 924:a71455bd8367

work in progress on undistortion acceleration
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Fri, 07 Jul 2017 18:01:45 -0400
parents 238008f81c16
children 974077e23804
files c/Makefile c/feature-based-tracking.cpp python/cvutils.py scripts/undistort-video.py
diffstat 4 files changed, 60 insertions(+), 32 deletions(-) [+]
line wrap: on
line diff
--- a/c/Makefile	Wed Jul 05 23:01:24 2017 -0400
+++ b/c/Makefile	Fri Jul 07 18:01:45 2017 -0400
@@ -19,7 +19,7 @@
 
 ifneq ($(OPENCV), 0)
 	CFLAGS += -DUSE_OPENCV
-	LDFLAGS += -lopencv_highgui -lopencv_core -lopencv_video -lopencv_features2d -lopencv_imgproc
+	LDFLAGS += -lopencv_highgui -lopencv_core -lopencv_video -lopencv_features2d -lopencv_imgproc -lopencv_calib3d
 endif
 
 #LDFLAGS += -Wl,--as-needed -Wl,-Bdynamic,-lgcc_s,-Bstatic
--- a/c/feature-based-tracking.cpp	Wed Jul 05 23:01:24 2017 -0400
+++ b/c/feature-based-tracking.cpp	Fri Jul 07 18:01:45 2017 -0400
@@ -13,6 +13,7 @@
 #include "opencv2/features2d/features2d.hpp"
 #include "opencv2/highgui/highgui.hpp"
 #include "opencv2/objdetect/objdetect.hpp"
+#include "opencv2/calib3d/calib3d.hpp"
 
 #include <boost/foreach.hpp>
 
@@ -110,16 +111,19 @@
 	  ", nframes=" << nFrames << endl;
 
   Mat map1, map2;
+  Mat intrinsicCameraMatrix, newIntrinsicCameraMatrix;
   if (params.undistort) {
-    Mat intrinsicCameraMatrix = ::loadMat(params.intrinsicCameraFilename, " ");
-    Mat newIntrinsicCameraMatrix = intrinsicCameraMatrix.clone(); 
-    videoSize = Size(static_cast<int>(round(videoSize.width*params.undistortedImageMultiplication)), static_cast<int>(round(videoSize.height*params.undistortedImageMultiplication)));
-    newIntrinsicCameraMatrix.at<float>(0,2) = videoSize.width/2.;
-    newIntrinsicCameraMatrix.at<float>(1,2) = videoSize.height/2.;
-    initUndistortRectifyMap(intrinsicCameraMatrix, params.distortionCoefficients, Mat::eye(3,3, CV_32FC1), newIntrinsicCameraMatrix, videoSize, CV_32FC1, map1, map2);
+    intrinsicCameraMatrix = ::loadMat(params.intrinsicCameraFilename, " ");
+    //videoSize = Size(static_cast<int>(round(videoSize.width*params.undistortedImageMultiplication)), static_cast<int>(round(videoSize.height*params.undistortedImageMultiplication)));
+    // newIntrinsicCameraMatrix = intrinsicCameraMatrix.clone(); 
+    // newIntrinsicCameraMatrix.at<float>(0,2) = undistortedVideoSize.width/2.;
+    // newIntrinsicCameraMatrix.at<float>(1,2) = undistortedVideoSize.height/2.;
+    Size undistortedVideoSize = Size(static_cast<int>(round(videoSize.width*params.undistortedImageMultiplication)), static_cast<int>(round(videoSize.height*params.undistortedImageMultiplication)));
+    newIntrinsicCameraMatrix = getDefaultNewCameraMatrix(intrinsicCameraMatrix, undistortedVideoSize, true);//getOptimalNewCameraMatrix(intrinsicCameraMatrix, params.distortionCoefficients, videoSize, 1, undistortedVideoSize);//, 0, true);
+    initUndistortRectifyMap(intrinsicCameraMatrix, params.distortionCoefficients, Mat::eye(3,3, CV_32FC1) /* 0 ?*/, newIntrinsicCameraMatrix, undistortedVideoSize, CV_32FC1, map1, map2);
     
-    cout << "Undistorted width=" << videoSize.width <<
-      ", height=" << videoSize.height << endl;
+    cout << "Undistorted width=" << undistortedVideoSize.width <<
+      ", height=" << undistortedVideoSize.height << endl;
   }
   
   Mat mask = imread(params.maskFilename, 0);
@@ -136,7 +140,7 @@
   trajectoryDB->beginTransaction();
 
   std::vector<KeyPoint> prevKpts, currKpts;
-  std::vector<Point2f> prevPts, currPts, newPts;
+  std::vector<Point2f> prevPts, currPts, newPts, undistortedPts;
   std::vector<uchar> status;
   std::vector<float> errors;
   Mat prevDesc, currDesc;
@@ -146,7 +150,7 @@
 
   int key = '?';
   unsigned int savedFeatureId=0;
-  Mat frame = Mat::zeros(1, 1, CV_8UC1), currentFrameBW, previousFrameBW, undistortedFrame;
+  Mat frame, currentFrameBW, previousFrameBW, displayFrame; // = Mat::zeros(1, 1, CV_8UC1)
 
   unsigned int lastFrameNum = nFrames;
   if (params.nFrames > 0)
@@ -161,15 +165,15 @@
     } else if (frameNum%50 ==0)
       cout << "frame " << frameNum << endl;
 
-    if (params.undistort) {
-      remap(frame, undistortedFrame, map1, map2, interpolationMethod, BORDER_CONSTANT, 0.);
-      frame = undistortedFrame;
+    // if (params.undistort) {
+    //   remap(frame, undistortedFrame, map1, map2, interpolationMethod, BORDER_CONSTANT, 0.);
+    //   frame = undistortedFrame;
 
-      if (frame.size() != videoSize) {
-	cout << "Different frame size " << frameNum << ", breaking ([" << frame.size().width << "x" << frame.size().height << "])" << endl;
-	break;
-      }
-    }
+    //   if (frame.size() != videoSize) {
+    // 	cout << "Different frame size " << frameNum << ", breaking ([" << frame.size().width << "x" << frame.size().height << "])" << endl;
+    // 	break;
+    //   }
+    // }
     
     cvtColor(frame, currentFrameBW, CV_RGB2GRAY);
     
@@ -178,6 +182,11 @@
       calcOpticalFlowPyrLK(previousFrameBW, currentFrameBW, prevPts, currPts, status, errors, window, params.pyramidLevel, TermCriteria(static_cast<int>(TermCriteria::COUNT)+static_cast<int>(TermCriteria::EPS) /* = 3 */, params.maxNumberTrackingIterations, params.minTrackingError), /* int flags = */ 0, params.minFeatureEigThreshold);
       /// \todo try calcOpticalFlowFarneback
 
+      if (params.undistort) {
+	undistortPoints(currPts, undistortedPts, intrinsicCameraMatrix, params.distortionCoefficients, noArray(), newIntrinsicCameraMatrix);
+	//currPts = undistortedPts;
+      }
+      
       std::vector<Point2f> trackedPts;
       std::vector<FeaturePointMatch>::iterator iter = featurePointMatches.begin();
       while (iter != featurePointMatches.end()) {
@@ -185,11 +194,14 @@
 	
 	int currPtX = static_cast<int>(floor(currPts[iter->pointNum].x));
 	int currPtY = static_cast<int>(floor(currPts[iter->pointNum].y));
-	if ((status[iter->pointNum] =!0) && 
+	if ((status[iter->pointNum] =! 0) && 
 	    (currPtX >= 0) && (currPtX < videoSize.width) && 
 	    (currPtY >= 0) && (currPtY < videoSize.height) && 
-	    (mask.at<uchar>(currPtY, currPtX) != 0)) {
-	  iter->feature->addPoint(frameNum, currPts[iter->pointNum], homography);
+	    (mask.at<uchar>(currPtY, currPtX) != 0)) { // todo check point in mask in image space
+	  if (params.undistort)
+	    iter->feature->addPoint(frameNum, undistortedPts[iter->pointNum], homography);
+	  else
+	    iter->feature->addPoint(frameNum, currPts[iter->pointNum], homography);
 	  
 	  deleteFeature = iter->feature->isDisplacementSmall(params.nDisplacements, minTotalFeatureDisplacement)
 	    || !iter->feature->isMotionSmooth(params.accelerationBound, params.deviationBound);
@@ -217,8 +229,13 @@
       saveFeatures(lostFeatures, *trajectoryDB, "positions", "velocities");
 	
       if (params.display) {
+	if (params.undistort)
+	  remap(frame, displayFrame, map1, map2, interpolationMethod, BORDER_CONSTANT, 0.);
+	 else
+	  displayFrame = frame.clone();
+	
 	BOOST_FOREACH(FeaturePointMatch fp, featurePointMatches)
-	  fp.feature->draw(frame, invHomography, Colors::red());
+	  fp.feature->draw(displayFrame, invHomography, Colors::red());
       }
     }
     
@@ -229,15 +246,24 @@
 	for (int i=MAX(0, currPts[n].y-params.minFeatureDistanceKLT); i<MIN(videoSize.height, currPts[n].y+params.minFeatureDistanceKLT+1); i++)
 	  featureMask.at<uchar>(i,j)=0;
     goodFeaturesToTrack(currentFrameBW, newPts, params.maxNFeatures, params.featureQuality, params.minFeatureDistanceKLT, featureMask, params.blockSize, params.useHarrisDetector, params.k);
-    BOOST_FOREACH(Point2f p, newPts) {
-      FeatureTrajectoryPtr f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, p, homography));
+    if (params.undistort) {
+      undistortPoints(newPts, undistortedPts, intrinsicCameraMatrix, params.distortionCoefficients, noArray(), newIntrinsicCameraMatrix);
+      //newPts = undistortedPts;
+    }
+    //BOOST_FOREACH(Point2f p, newPts) {
+    for (unsigned int i=0; i<newPts.size(); i++) {
+      FeatureTrajectoryPtr f;
+      if (params.undistort) // write function
+	f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, undistortedPts[i], homography));
+      else
+	f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, newPts[i], homography));
       featurePointMatches.push_back(FeaturePointMatch(f, currPts.size()));
-      currPts.push_back(p);
+      currPts.push_back(newPts[i]);
     }
       
-    if (params.display) {
+    if (params.display && !displayFrame.empty()) {
       imshow("mask", featureMask*256);
-      imshow("frame", frame);
+      imshow("frame", displayFrame);
       key = waitKey(2);
     }
     previousFrameBW = currentFrameBW.clone();
--- a/python/cvutils.py	Wed Jul 05 23:01:24 2017 -0400
+++ b/python/cvutils.py	Fri Jul 07 18:01:45 2017 -0400
@@ -122,10 +122,11 @@
 
     def computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients):
         newImgSize = (int(round(width*undistortedImageMultiplication)), int(round(height*undistortedImageMultiplication)))
-        newCameraMatrix = deepcopy(intrinsicCameraMatrix)
-        newCameraMatrix[0,2] = newImgSize[0]/2.
-        newCameraMatrix[1,2] = newImgSize[1]/2.
-        return cv2.initUndistortRectifyMap(intrinsicCameraMatrix, array(distortionCoefficients), identity(3), newCameraMatrix, newImgSize, cv2.CV_32FC1)
+        #newCameraMatrix = deepcopy(intrinsicCameraMatrix)
+        #newCameraMatrix[0,2] = newImgSize[0]/2.
+        #newCameraMatrix[1,2] = newImgSize[1]/2.
+        newCameraMatrix = cv2.getDefaultNewCameraMatrix(intrinsicCameraMatrix, newImgSize, True)
+        return cv2.initUndistortRectifyMap(intrinsicCameraMatrix, array(distortionCoefficients), None, newCameraMatrix, newImgSize, cv2.CV_32FC1)
 
     def playVideo(filenames, windowNames = None, firstFrameNums = None, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1., step = 1, colorBlind = False):
         '''Plays the video(s)'''
--- a/scripts/undistort-video.py	Wed Jul 05 23:01:24 2017 -0400
+++ b/scripts/undistort-video.py	Fri Jul 07 18:01:45 2017 -0400
@@ -43,6 +43,7 @@
 width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
 height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
 [map1, map2] = cvutils.computeUndistortMaps(width, height, args.undistortedImageMultiplication, intrinsicCameraMatrix, args.distortionCoefficients)
+
 if capture.isOpened():
     ret = True
     frameNum = args.firstFrameNum