diff c/feature-based-tracking.cpp @ 655:39fa1c998b29

removed the abstract class to represent folders of images or video files since the capability is now built in OpenCV
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Thu, 07 May 2015 16:30:58 +0200
parents 045d05cef9d0
children 576d9ea4b41a
line wrap: on
line diff
--- a/c/feature-based-tracking.cpp	Thu May 07 16:09:47 2015 +0200
+++ b/c/feature-based-tracking.cpp	Thu May 07 16:30:58 2015 +0200
@@ -2,8 +2,6 @@
 #include "Parameters.hpp"
 #include "cvutils.hpp"
 #include "utils.hpp"
-#include "InputVideoFileModule.h"
-#include "InputFrameListModule.h"
 
 #include "src/Trajectory.h"
 #include "src/TrajectoryDBAccessList.h"
@@ -17,7 +15,6 @@
 #include "opencv2/objdetect/objdetect.hpp"
 
 #include <boost/foreach.hpp>
-#include <boost/filesystem.hpp>
 
 #include <iostream>
 #include <vector>
@@ -27,7 +24,6 @@
 
 using namespace std;
 using namespace cv;
-namespace fs = boost::filesystem;
 
 void drawMatchesRelative(const vector<KeyPoint>& train, const vector<KeyPoint>& query, std::vector<cv::DMatch>& matches, Mat& img) {
   for (int i = 0; i < (int)matches.size(); i++)
@@ -92,21 +88,19 @@
   // BruteForceMatcher<Hamming> descMatcher;
   // vector<DMatch> matches;
 
-  std::shared_ptr<InputFrameProviderIface> capture;
-  if (fs::is_directory(fs::path(params.videoFilename)))
-    capture = std::shared_ptr<InputFrameListModule>(new InputFrameListModule(params.videoFilename));
-  else if(!params.videoFilename.empty())
-    capture = std::shared_ptr<InputVideoFileModule>(new InputVideoFileModule(params.videoFilename));
-  else
-    cout << "No valid input parameters" << endl;
-  
-  if(!capture->isOpen()) {
+  if(params.videoFilename.empty()) {
+    cout << "Empty video filename. Exiting." << endl;
+    exit(0);
+  }
+    
+  VideoCapture capture(params.videoFilename);
+  if(!capture.isOpened()) {
     cout << "Video filename " << params.videoFilename << " could not be opened. Exiting." << endl;
     exit(0);
   }
   
-  Size videoSize = capture->getSize();
-  unsigned int nFrames = capture->getNbFrames();
+  Size videoSize = Size(capture.get(CV_CAP_PROP_FRAME_WIDTH), capture.get(CV_CAP_PROP_FRAME_HEIGHT));
+  unsigned int nFrames = capture.get(CV_CAP_PROP_FRAME_COUNT);
   cout << "Video " << params.videoFilename <<
 	  ": width=" << videoSize.width <<
 	  ", height=" << videoSize.height <<
@@ -155,97 +149,97 @@
   if (params.nFrames > 0)
     lastFrameNum = MIN(params.frame1+static_cast<unsigned int>(params.nFrames), nFrames);
 
-  capture->setFrameNumber(params.frame1);
+  capture.set(CV_CAP_PROP_POS_FRAMES, params.frame1);
   for (unsigned int frameNum = params.frame1; (frameNum < lastFrameNum) && !::interruptionKey(key); frameNum++) {
-      bool success = capture->getNextFrame(frame);
-      if (!success || frame.empty()) {
-	cout << "Empty frame " << frameNum << ", breaking (" << success << " " << frame.empty() << " [" << frame.size().width << "x" << frame.size().height << "])" << endl;
-	break;
-      } else if (frameNum%50 ==0)
-	cout << "frame " << frameNum << endl;
+    capture >> frame;
+    if (frame.empty()) {
+      cout << "Empty frame " << frameNum << ", breaking (" << frame.empty() << " [" << frame.size().width << "x" << frame.size().height << "])" << endl;
+      break;
+    } else if (frameNum%50 ==0)
+      cout << "frame " << frameNum << endl;
 
-      if (params.undistort) {
-	remap(frame, undistortedFrame, map1, map2, interpolationMethod, BORDER_CONSTANT, 0.);
-	frame = undistortedFrame;
+    if (params.undistort) {
+      remap(frame, undistortedFrame, map1, map2, interpolationMethod, BORDER_CONSTANT, 0.);
+      frame = undistortedFrame;
 
-	if (frame.size() != videoSize) {
-	  cout << "Different frame size " << frameNum << ", breaking ([" << frame.size().width << "x" << frame.size().height << "])" << endl;
+      if (frame.size() != videoSize) {
+	cout << "Different frame size " << frameNum << ", breaking ([" << frame.size().width << "x" << frame.size().height << "])" << endl;
 	break;
-	}
       }
+    }
 
       
-      cvtColor(frame, currentFrameBW, CV_RGB2GRAY);
-      
-      if (!prevPts.empty()) {
-	currPts.clear();
-	calcOpticalFlowPyrLK(previousFrameBW, currentFrameBW, prevPts, currPts, status, errors, window, params.pyramidLevel, TermCriteria(static_cast<int>(TermCriteria::COUNT)+static_cast<int>(TermCriteria::EPS) /* = 3 */, params.maxNumberTrackingIterations, params.minTrackingError), /* int flags = */ 0, params.minFeatureEigThreshold);
-	/// \todo try calcOpticalFlowFarneback
+    cvtColor(frame, currentFrameBW, CV_RGB2GRAY);
+    
+    if (!prevPts.empty()) {
+      currPts.clear();
+      calcOpticalFlowPyrLK(previousFrameBW, currentFrameBW, prevPts, currPts, status, errors, window, params.pyramidLevel, TermCriteria(static_cast<int>(TermCriteria::COUNT)+static_cast<int>(TermCriteria::EPS) /* = 3 */, params.maxNumberTrackingIterations, params.minTrackingError), /* int flags = */ 0, params.minFeatureEigThreshold);
+      /// \todo try calcOpticalFlowFarneback
 
-	std::vector<Point2f> trackedPts;
-	std::vector<FeaturePointMatch>::iterator iter = featurePointMatches.begin();
-	while (iter != featurePointMatches.end()) {
-	  bool deleteFeature = false;
+      std::vector<Point2f> trackedPts;
+      std::vector<FeaturePointMatch>::iterator iter = featurePointMatches.begin();
+      while (iter != featurePointMatches.end()) {
+	bool deleteFeature = false;
 	  
-	  if (status[iter->pointNum]) {
-	    iter->feature->addPoint(frameNum, currPts[iter->pointNum], homography);
-
-	    deleteFeature = iter->feature->isDisplacementSmall(params.nDisplacements, minTotalFeatureDisplacement)
-	      || !iter->feature->isMotionSmooth(params.accelerationBound, params.deviationBound);
-	    if (deleteFeature)
-	      iter->feature->shorten();
-	  } else
-	    deleteFeature = true;
+	if (status[iter->pointNum]) {
+	  iter->feature->addPoint(frameNum, currPts[iter->pointNum], homography);
 
-	  if (deleteFeature) {
-	    if (iter->feature->length() >= params.minFeatureTime) {
-	      iter->feature->setId(savedFeatureId);
-	      savedFeatureId++;
-	      iter->feature->movingAverage(params.nFramesSmoothing);
-	      lostFeatures.push_back(iter->feature);
-	    }
-	    iter = featurePointMatches.erase(iter);
-	  } else {
-	    trackedPts.push_back(currPts[iter->pointNum]);
-	    iter->pointNum = trackedPts.size()-1;
-	    iter++;
+	  deleteFeature = iter->feature->isDisplacementSmall(params.nDisplacements, minTotalFeatureDisplacement)
+	    || !iter->feature->isMotionSmooth(params.accelerationBound, params.deviationBound);
+	  if (deleteFeature)
+	    iter->feature->shorten();
+	} else
+	  deleteFeature = true;
+
+	if (deleteFeature) {
+	  if (iter->feature->length() >= params.minFeatureTime) {
+	    iter->feature->setId(savedFeatureId);
+	    savedFeatureId++;
+	    iter->feature->movingAverage(params.nFramesSmoothing);
+	    lostFeatures.push_back(iter->feature);
 	  }
-	}
-	currPts = trackedPts;
-	assert(currPts.size() == featurePointMatches.size());
-	saveFeatures(lostFeatures, *trajectoryDB, "positions", "velocities");
-	
-	if (params.display) {
-	  BOOST_FOREACH(FeaturePointMatch fp, featurePointMatches)
-	    fp.feature->draw(frame, invHomography, Colors::red());
-	  // object detection
-	  // vector<Rect> locations;
-	  // hog.detectMultiScale(frame, locations, 0, Size(8,8), Size(32,32), 1.05, 2);
-	  // BOOST_FOREACH(Rect r, locations)
-	  //   rectangle(frame, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
+	  iter = featurePointMatches.erase(iter);
+	} else {
+	  trackedPts.push_back(currPts[iter->pointNum]);
+	  iter->pointNum = trackedPts.size()-1;
+	  iter++;
 	}
       }
+      currPts = trackedPts;
+      assert(currPts.size() == featurePointMatches.size());
+      saveFeatures(lostFeatures, *trajectoryDB, "positions", "velocities");
+	
+      if (params.display) {
+	BOOST_FOREACH(FeaturePointMatch fp, featurePointMatches)
+	  fp.feature->draw(frame, invHomography, Colors::red());
+	// object detection
+	// vector<Rect> locations;
+	// hog.detectMultiScale(frame, locations, 0, Size(8,8), Size(32,32), 1.05, 2);
+	// BOOST_FOREACH(Rect r, locations)
+	//   rectangle(frame, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
+      }
+    }
       
-      // adding new features, using mask around existing feature positions
-      Mat featureMask = mask.clone();
-      for (unsigned int n=0;n<currPts.size(); n++)
-	for (int j=MAX(0, currPts[n].x-params.minFeatureDistanceKLT); j<MIN(videoSize.width, currPts[n].x+params.minFeatureDistanceKLT+1); j++)
-	  for (int i=MAX(0, currPts[n].y-params.minFeatureDistanceKLT); i<MIN(videoSize.height, currPts[n].y+params.minFeatureDistanceKLT+1); i++)
-	    featureMask.at<uchar>(i,j)=0;
-      goodFeaturesToTrack(currentFrameBW, newPts, params.maxNFeatures, params.featureQuality, params.minFeatureDistanceKLT, featureMask, params.blockSize, params.useHarrisDetector, params.k);
-      BOOST_FOREACH(Point2f p, newPts) { //for (unsigned int i=0; i<newPts.size(); i++) {
-	FeatureTrajectoryPtr f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, p, homography));
-	featurePointMatches.push_back(FeaturePointMatch(f, currPts.size()));
-	currPts.push_back(p);
-      }
+    // adding new features, using mask around existing feature positions
+    Mat featureMask = mask.clone();
+    for (unsigned int n=0;n<currPts.size(); n++)
+      for (int j=MAX(0, currPts[n].x-params.minFeatureDistanceKLT); j<MIN(videoSize.width, currPts[n].x+params.minFeatureDistanceKLT+1); j++)
+	for (int i=MAX(0, currPts[n].y-params.minFeatureDistanceKLT); i<MIN(videoSize.height, currPts[n].y+params.minFeatureDistanceKLT+1); i++)
+	  featureMask.at<uchar>(i,j)=0;
+    goodFeaturesToTrack(currentFrameBW, newPts, params.maxNFeatures, params.featureQuality, params.minFeatureDistanceKLT, featureMask, params.blockSize, params.useHarrisDetector, params.k);
+    BOOST_FOREACH(Point2f p, newPts) { //for (unsigned int i=0; i<newPts.size(); i++) {
+      FeatureTrajectoryPtr f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, p, homography));
+      featurePointMatches.push_back(FeaturePointMatch(f, currPts.size()));
+      currPts.push_back(p);
+    }
       
-      if (params.display) {
-	imshow("mask", featureMask*256);
-	imshow("frame", frame);
-	key = waitKey(2);
-      }
-      previousFrameBW = currentFrameBW.clone();
-      prevPts = currPts;
+    if (params.display) {
+      imshow("mask", featureMask*256);
+      imshow("frame", frame);
+      key = waitKey(2);
+    }
+    previousFrameBW = currentFrameBW.clone();
+    prevPts = currPts;
   }
 
   // save the remaining currently tracked features