diff c/feature-based-tracking.cpp @ 614:5e09583275a4

Merged Nicolas/trafficintelligence into default
author Mohamed Gomaa <eng.m.gom3a@gmail.com>
date Fri, 05 Dec 2014 12:13:53 -0500
parents a3add9f751ef
children 045d05cef9d0
line wrap: on
line diff
--- a/c/feature-based-tracking.cpp	Thu Apr 18 15:29:33 2013 -0400
+++ b/c/feature-based-tracking.cpp	Fri Dec 05 12:13:53 2014 -0500
@@ -2,6 +2,8 @@
 #include "Parameters.hpp"
 #include "cvutils.hpp"
 #include "utils.hpp"
+#include "InputVideoFileModule.h"
+#include "InputFrameListModule.h"
 
 #include "src/Trajectory.h"
 #include "src/TrajectoryDBAccessList.h"
@@ -16,13 +18,16 @@
 
 #include <boost/shared_ptr.hpp>
 #include <boost/foreach.hpp>
+#include <boost/filesystem.hpp>
 
 #include <iostream>
 #include <vector>
 #include <ctime>
+#include <cmath>
 
 using namespace std;
 using namespace cv;
+namespace fs = boost::filesystem;
 
 void drawMatchesRelative(const vector<KeyPoint>& train, const vector<KeyPoint>& query, std::vector<cv::DMatch>& matches, Mat& img) {
   for (int i = 0; i < (int)matches.size(); i++)
@@ -54,12 +59,9 @@
     feature(_feature), pointNum(_pointNum) {}
 };
 
-inline void saveFeatures(vector<FeatureTrajectoryPtr>& features, TrajectoryDBAccess<Point2f>& db, const string& positionsTableName, const string& velocitiesTableName, const unsigned int& minNFeatures = 0) {
-  /// \todo smoothing
-  if (features.size() >= minNFeatures) {
-    BOOST_FOREACH(FeatureTrajectoryPtr f, features) f->write(db, positionsTableName, velocitiesTableName);
-    features.clear();
-  }
+inline void saveFeatures(vector<FeatureTrajectoryPtr>& features, TrajectoryDBAccess<Point2f>& db, const string& positionsTableName, const string& velocitiesTableName) {
+  BOOST_FOREACH(FeatureTrajectoryPtr f, features) f->write(db, positionsTableName, velocitiesTableName);
+  features.clear();
 }
 
 void trackFeatures(const KLTFeatureTrackingParameters& params) {
@@ -75,34 +77,59 @@
   float minTotalFeatureDisplacement = params.nDisplacements*params.minFeatureDisplacement;
   Size window = Size(params.windowSize, params.windowSize);
 
+  int interpolationMethod = -1;
+  if (params.interpolationMethod == 0)
+    interpolationMethod = INTER_NEAREST;
+  else if (params.interpolationMethod == 1)
+    interpolationMethod = INTER_LINEAR;
+  else if (params.interpolationMethod == 2)
+    interpolationMethod = INTER_CUBIC;
+  else if (params.interpolationMethod == 3)
+    interpolationMethod = INTER_LANCZOS4;
+  else
+    cout << "Unsupported option " << interpolationMethod << " for interpolation method" << endl;
+
   // BruteForceMatcher<Hamming> descMatcher;
   // vector<DMatch> matches;
 
-  VideoCapture capture;
-  Size videoSize;
-  unsigned int nFrames = 0;
-  capture.open(params.videoFilename);
-  if(capture.isOpened()) {
-    videoSize = Size(capture.get(CV_CAP_PROP_FRAME_WIDTH), capture.get(CV_CAP_PROP_FRAME_HEIGHT));
-    nFrames = capture.get(CV_CAP_PROP_FRAME_COUNT);
-    cout << "Video " << params.videoFilename <<
-      ": width=" << videoSize.width <<
-      ", height=" << videoSize.height <<
-      ", nframes=" << nFrames << endl;
-  } else {
+  boost::shared_ptr<InputFrameProviderIface> capture;
+  if (fs::is_directory(fs::path(params.videoFilename)))
+    capture = boost::shared_ptr<InputFrameListModule>(new InputFrameListModule(params.videoFilename));
+  else if(!params.videoFilename.empty())
+    capture = boost::shared_ptr<InputVideoFileModule>(new InputVideoFileModule(params.videoFilename));
+  else
+    cout << "No valid input parameters" << endl;
+  
+  if(!capture->isOpen()) {
     cout << "Video filename " << params.videoFilename << " could not be opened. Exiting." << endl;
     exit(0);
   }
-  // if (!capture.isOpened())
-  //   {
-  //     //help(argv);
-  //     cout << "capture device " << argv[1] << " failed to open!" << endl;
-  //     return 1;
-  //   }
+  
+  Size videoSize = capture->getSize();
+  unsigned int nFrames = capture->getNbFrames();
+  cout << "Video " << params.videoFilename <<
+	  ": width=" << videoSize.width <<
+	  ", height=" << videoSize.height <<
+	  ", nframes=" << nFrames << endl;
 
+  Mat map1, map2;
+  if (params.undistort) {
+    Mat intrinsicCameraMatrix = ::loadMat(params.intrinsicCameraFilename, " ");
+    Mat newIntrinsicCameraMatrix = intrinsicCameraMatrix.clone(); 
+    videoSize = Size(static_cast<int>(round(videoSize.width*params.undistortedImageMultiplication)), static_cast<int>(round(videoSize.height*params.undistortedImageMultiplication)));
+    newIntrinsicCameraMatrix.at<float>(0,2) = videoSize.width/2.;
+    newIntrinsicCameraMatrix.at<float>(1,2) = videoSize.height/2.;
+    initUndistortRectifyMap(intrinsicCameraMatrix, params.distortionCoefficients, Mat::eye(3,3, CV_32FC1), newIntrinsicCameraMatrix, videoSize, CV_32FC1, map1, map2);
+    
+    cout << "Undistorted width=" << videoSize.width <<
+      ", height=" << videoSize.height << endl;
+  }
+  
   Mat mask = imread(params.maskFilename, 0);
-  if (mask.empty())
+  if (mask.empty()) {
+    cout << "Mask filename " << params.maskFilename << " could not be opened." << endl;
     mask = Mat::ones(videoSize, CV_8UC1);
+  }
 
   boost::shared_ptr<TrajectoryDBAccess<Point2f> > trajectoryDB = boost::shared_ptr<TrajectoryDBAccess<Point2f> >(new TrajectoryDBAccessList<Point2f>());
   //TrajectoryDBAccess<Point2f>* trajectoryDB = new TrajectoryDBAccessBlob<Point2f>();
@@ -120,46 +147,39 @@
   std::vector<FeatureTrajectoryPtr> lostFeatures;
   std::vector<FeaturePointMatch> featurePointMatches;
 
-  HOGDescriptor hog;
-  hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
-
   int key = '?';
   unsigned int savedFeatureId=0;
-  Mat frame = Mat::zeros(1, 1, CV_8UC1), currentFrameBW, previousFrameBW;
+  Mat frame = Mat::zeros(1, 1, CV_8UC1), currentFrameBW, previousFrameBW, undistortedFrame;
 
   unsigned int lastFrameNum = nFrames;
   if (params.nFrames > 0)
     lastFrameNum = MIN(params.frame1+static_cast<unsigned int>(params.nFrames), nFrames);
-  
-  //capture.set(CV_CAP_PROP_POS_FRAMES, params.frame1);
+
+  capture->setFrameNumber(params.frame1);
   for (unsigned int frameNum = params.frame1; (frameNum < lastFrameNum) && !::interruptionKey(key); frameNum++) {
-      capture >> frame;
-
-      if (frame.empty() || frame.size() != videoSize)
+      bool success = capture->getNextFrame(frame);
+      if (!success || frame.empty()) {
+	cout << "Empty frame " << frameNum << ", breaking (" << success << " " << frame.empty() << " [" << frame.size().width << "x" << frame.size().height << "])" << endl;
 	break;
-
-      if (frameNum%50 ==0)
+      } else if (frameNum%50 ==0)
 	cout << "frame " << frameNum << endl;
 
-      //capture.get(CV_CAP_PROP_POS_FRAMES) << " " << prevPts.size() << endl;
+      if (params.undistort) {
+	remap(frame, undistortedFrame, map1, map2, interpolationMethod, BORDER_CONSTANT, 0.);
+	frame = undistortedFrame;
 
-      // int emptyFrameNum = 0;
-      // while (frame.empty()) {
-      // 	cerr << "empty frame " << emptyFrameNum  << " " << capture.get(CV_CAP_PROP_POS_FRAMES)<< endl;
-      // 	capture >> frame;//break;
-      // 	emptyFrameNum++;
-      // 	if (emptyFrameNum>=3000)
-      // 	  exit(0);
-      // }
+	if (frame.size() != videoSize) {
+	  cout << "Different frame size " << frameNum << ", breaking ([" << frame.size().width << "x" << frame.size().height << "])" << endl;
+	break;
+	}
+      }
+
       
       cvtColor(frame, currentFrameBW, CV_RGB2GRAY);
-      // "normal" feature detectors: detect features here
-      // detector.detect(currentFrameBW, currKpts); // see video_homography c++ sample
       
       if (!prevPts.empty()) {
-	//::keyPoints2Points(prevKpts, prevPts);
 	currPts.clear();
-	calcOpticalFlowPyrLK(previousFrameBW, currentFrameBW, prevPts, currPts, status, errors, window, params.pyramidLevel, TermCriteria(3 /*static_cast<int>(TermCriteria::COUNT)+static_cast<int>(TermCriteria::EPS)*/, params.maxNumberTrackingIterations, params.minTrackingError), 0.5 /* unused */, 0); // OPTFLOW_USE_INITIAL_FLOW
+	calcOpticalFlowPyrLK(previousFrameBW, currentFrameBW, prevPts, currPts, status, errors, window, params.pyramidLevel, TermCriteria(static_cast<int>(TermCriteria::COUNT)+static_cast<int>(TermCriteria::EPS) /* = 3 */, params.maxNumberTrackingIterations, params.minTrackingError), /* int flags = */ 0, params.minFeatureEigThreshold);
 	/// \todo try calcOpticalFlowFarneback
 
 	std::vector<Point2f> trackedPts;
@@ -170,7 +190,7 @@
 	  if (status[iter->pointNum]) {
 	    iter->feature->addPoint(frameNum, currPts[iter->pointNum], homography);
 
-	    deleteFeature |= iter->feature->isDisplacementSmall(params.nDisplacements, minTotalFeatureDisplacement)
+	    deleteFeature = iter->feature->isDisplacementSmall(params.nDisplacements, minTotalFeatureDisplacement)
 	      || !iter->feature->isMotionSmooth(params.accelerationBound, params.deviationBound);
 	    if (deleteFeature)
 	      iter->feature->shorten();
@@ -181,6 +201,7 @@
 	    if (iter->feature->length() >= params.minFeatureTime) {
 	      iter->feature->setId(savedFeatureId);
 	      savedFeatureId++;
+	      iter->feature->movingAverage(params.nFramesSmoothing);
 	      lostFeatures.push_back(iter->feature);
 	    }
 	    iter = featurePointMatches.erase(iter);
@@ -203,12 +224,6 @@
 	  // BOOST_FOREACH(Rect r, locations)
 	  //   rectangle(frame, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
 	}
-	//drawOpticalFlow(prevPts, currPts, status, frame);
-	
-	// cout << matches.size() << " matches" << endl;
-	// descMatcher.match(currDesc, prevDesc, matches);
-	// cout << matches.size() << " matches" << endl;
-	//drawMatchesRelative(prevKpts, currKpts, matches, frame);
       }
       
       // adding new features, using mask around existing feature positions
@@ -217,27 +232,33 @@
 	for (int j=MAX(0, currPts[n].x-params.minFeatureDistanceKLT); j<MIN(videoSize.width, currPts[n].x+params.minFeatureDistanceKLT+1); j++)
 	  for (int i=MAX(0, currPts[n].y-params.minFeatureDistanceKLT); i<MIN(videoSize.height, currPts[n].y+params.minFeatureDistanceKLT+1); i++)
 	    featureMask.at<uchar>(i,j)=0;
-      goodFeaturesToTrack(currentFrameBW, newPts, params.maxNFeatures, params.featureQuality, params.minFeatureDistanceKLT, featureMask, params.windowSize, params.useHarrisDetector, params.k);
+      goodFeaturesToTrack(currentFrameBW, newPts, params.maxNFeatures, params.featureQuality, params.minFeatureDistanceKLT, featureMask, params.blockSize, params.useHarrisDetector, params.k);
       BOOST_FOREACH(Point2f p, newPts) { //for (unsigned int i=0; i<newPts.size(); i++) {
 	FeatureTrajectoryPtr f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, p, homography));
 	featurePointMatches.push_back(FeaturePointMatch(f, currPts.size()));
 	currPts.push_back(p);
       }
-      // currPts.insert(currPts.end(), newPts.begin(), newPts.end());
-      //::keyPoints2Points(currKpts, currPts, false);
-
-      //brief.compute(currentFrameBW, currKpts, currDesc); //Compute brief descriptors at each keypoint location
       
       if (params.display) {
+	imshow("mask", featureMask*256);
 	imshow("frame", frame);
-	imshow("mask", featureMask*256);
 	key = waitKey(2);
       }
       previousFrameBW = currentFrameBW.clone();
       prevPts = currPts;
-      //prevKpts = currKpts;
-      //currDesc.copyTo(prevDesc);
-    }  
+  }
+
+  // save the remaining currently tracked features
+  std::vector<FeaturePointMatch>::iterator iter = featurePointMatches.begin();
+  while (iter != featurePointMatches.end()) {
+    if (iter->feature->length() >= params.minFeatureTime) {
+      iter->feature->setId(savedFeatureId);
+      savedFeatureId++;
+      iter->feature->movingAverage(params.nFramesSmoothing);
+      iter->feature->write(*trajectoryDB, "positions", "velocities");
+    }
+    iter++;
+  }
   
   trajectoryDB->endTransaction();
   trajectoryDB->disconnect();
@@ -312,9 +333,13 @@
     int lastInstant = frameNum+params.minFeatureTime-maxTrajectoryLength;
     if (lastInstant > 0 && frameNum%10==0) {
       featureGraph.connectedComponents(lastInstant);
-      vector<vector<unsigned int> > featureGroups = featureGraph.getFeatureGroups();
+      vector<vector<FeatureTrajectoryPtr> > featureGroups;
+      featureGraph.getFeatureGroups(featureGroups);
       for (unsigned int i=0; i<featureGroups.size(); ++i) {
-	trajectoryDB->writeObject(savedObjectId, featureGroups[i], -1, 1, string("objects"), string("objects_features"));
+	vector<unsigned int> featureNumbers;
+	for (unsigned int j=0; j<featureGroups[i].size(); ++j)
+	  featureNumbers.push_back(featureGroups[i][j]->getId());
+	trajectoryDB->writeObject(savedObjectId, featureNumbers, 0 /* unknown */, 1, string("objects"), string("objects_features"));
 	savedObjectId++;
       }
     }
@@ -325,9 +350,13 @@
 
   // save remaining objects
   featureGraph.connectedComponents(frameNum+maxTrajectoryLength+1);
-  vector<vector<unsigned int> > featureGroups = featureGraph.getFeatureGroups();
+  vector<vector<FeatureTrajectoryPtr> > featureGroups;
+  featureGraph.getFeatureGroups(featureGroups);
   for (unsigned int i=0; i<featureGroups.size(); ++i) {
-    trajectoryDB->writeObject(savedObjectId, featureGroups[i], -1, 1, string("objects"), string("objects_features"));
+    vector<unsigned int> featureNumbers;
+    for (unsigned int j=0; j<featureGroups[i].size(); ++j)
+      featureNumbers.push_back(featureGroups[i][j]->getId());
+    trajectoryDB->writeObject(savedObjectId, featureNumbers, 0 /* unknown */, 1, string("objects"), string("objects_features"));
     savedObjectId++;
   }
 
@@ -345,6 +374,8 @@
   } else if (params.groupFeatures) {
     cout << "The program groups features" << endl;
     groupFeatures(params);
+  } else {
+    cout << "Main option missing or misspelt" << endl;
   }
 
   return 0;