view c/feature-based-tracking.cpp @ 207:48f83ff769fd

removed unused code that has changed in OpenCV 2.4
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Wed, 16 May 2012 02:23:49 -0400
parents b0b964ba9489
children 841a1714f702 bc4ea09b1743
line wrap: on
line source

#include "Motion.hpp"
#include "Parameters.hpp"
#include "cvutils.hpp"
#include "utils.hpp"

#include "src/Trajectory.h"
#include "src/TrajectoryDBAccessList.h"
#include "src/TrajectoryDBAccessBlob.h"

#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/tracking.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/objdetect/objdetect.hpp"

#include <boost/shared_ptr.hpp>
#include <boost/foreach.hpp>

#include <iostream>
#include <vector>
#include <ctime>

using namespace std;
using namespace cv;

void drawMatchesRelative(const vector<KeyPoint>& train, const vector<KeyPoint>& query, std::vector<cv::DMatch>& matches, Mat& img) {
  for (int i = 0; i < (int)matches.size(); i++)
    {
      Point2f pt_new = query[matches[i].queryIdx].pt;
      Point2f pt_old = train[matches[i].trainIdx].pt;
      Point2f dist = pt_new - pt_old;
      if (norm(dist) < 20) {
	line(img, pt_new, pt_old, Scalar(125, 255, 125), 1);
	circle(img, pt_old, 2, Scalar(255, 0, 125), 1);
      }
    }
}

void drawOpticalFlow(const vector<Point2f>& prevPts, const vector<Point2f>& currPts, const vector<uchar> status, Mat& img) {
  for (unsigned int i=0; i<status.size(); i++) {
    if (status[i]) {
 	line(img, prevPts[i], currPts[i], Scalar(125, 255, 125), 1);
	circle(img, prevPts[i], 2, Scalar(255, 0, 125), 1);     
    }
  }
}

struct FeaturePointMatch {
  FeatureTrajectoryPtr feature;
  int pointNum;

  FeaturePointMatch(FeatureTrajectoryPtr _feature, const int& _pointNum):
    feature(_feature), pointNum(_pointNum) {}
};

inline void saveFeatures(vector<FeatureTrajectoryPtr>& features, TrajectoryDBAccess<Point2f>& db, const string& positionsTableName, const string& velocitiesTableName, const unsigned int& minNFeatures = 0) {
  /// \todo smoothing
  if (features.size() >= minNFeatures) {
    BOOST_FOREACH(FeatureTrajectoryPtr f, features) f->write(db, positionsTableName, velocitiesTableName);
    features.clear();
  }
}

void trackFeatures(const KLTFeatureTrackingParameters& params) {
  // BriefDescriptorExtractor brief(32);
  // const int DESIRED_FTRS = 500;
  // GridAdaptedFeatureDetector detector(new FastFeatureDetector(10, true), DESIRED_FTRS, 4, 4);

  Mat homography = ::loadMat(params.homographyFilename, " ");
  Mat invHomography;
  if (params.display && !homography.empty())
    invHomography = homography.inv();

  float minTotalFeatureDisplacement = params.nDisplacements*params.minFeatureDisplacement;
  Size window = Size(params.windowSize, params.windowSize);

  // BruteForceMatcher<Hamming> descMatcher;
  // vector<DMatch> matches;
  Size videoSize;

  // if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0]))) // if no parameter or number parameter
  //   capture.open(argc == 2 ? argv[1][0] - '0' : 0);
  // else if( argc >= 2 )
  //   {
  //     capture.open(argv[1]);
  //     if( capture.isOpened() )
  // 	videoSize = Size(capture.get(CV_CAP_PROP_FRAME_WIDTH), capture.get(CV_CAP_PROP_FRAME_HEIGHT));
  // 	cout << "Video " << argv[1] <<
  // 	  ": width=" << videoSize.width <<
  // 	  ", height=" << videoSize.height <<
  // 	  ", nframes=" << capture.get(CV_CAP_PROP_FRAME_COUNT) << endl;
  //     if( argc > 2 && isdigit(argv[2][0]) ) // could be used to reach first frame, dumping library messages to log file (2> /tmp/log.txt)
  //       {
  // 	  sscanf(argv[2], "%d", &params.frame1);
  //     	  cout << "seeking to frame #" << params.frame1 << endl;
  //     	  //cap.set(CV_CAP_PROP_POS_FRAMES, pos);
  // 	  for (int i=0; i<params.frame1; i++)
  // 	    capture >> frame;
  //       }
  //   }

  VideoCapture capture;
  capture.open(params.videoFilename);
  if(capture.isOpened()) {
    videoSize = Size(capture.get(CV_CAP_PROP_FRAME_WIDTH), capture.get(CV_CAP_PROP_FRAME_HEIGHT));
    cout << "Video " << params.videoFilename <<
      ": width=" << videoSize.width <<
      ", height=" << videoSize.height <<
      ", nframes=" << capture.get(CV_CAP_PROP_FRAME_COUNT) << endl;
  } else {
    cout << "Video filename " << params.videoFilename << " could not be opened. Exiting." << endl;
    exit(0);
  }
  // if (!capture.isOpened())
  //   {
  //     //help(argv);
  //     cout << "capture device " << argv[1] << " failed to open!" << endl;
  //     return 1;
  //   }

  Mat mask = imread(params.maskFilename, 0);
  if (mask.empty())
    mask = Mat::ones(videoSize, CV_8UC1);

  boost::shared_ptr<TrajectoryDBAccess<Point2f> > trajectoryDB = boost::shared_ptr<TrajectoryDBAccess<Point2f> >(new TrajectoryDBAccessList<Point2f>());
  //TrajectoryDBAccess<Point2f>* trajectoryDB = new TrajectoryDBAccessBlob<Point2f>();
  trajectoryDB->connect(params.databaseFilename.c_str());
  trajectoryDB->createTable("positions");
  trajectoryDB->createTable("velocities");
  trajectoryDB->beginTransaction();

  vector<KeyPoint> prevKpts, currKpts;
  vector<Point2f> prevPts, currPts, newPts;
  vector<uchar> status;
  vector<float> errors;
  Mat prevDesc, currDesc;

  vector<FeatureTrajectoryPtr> lostFeatures;
  vector<FeaturePointMatch> featurePointMatches;

  HOGDescriptor hog;
  hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

  int key = '?';
  unsigned int savedFeatureId=0;
  Mat frame, currentFrameBW, previousFrameBW;
  for (int frameNum = params.frame1; ((frameNum-params.frame1 < params.nFrames) || (params.nFrames < 0)) && !::interruptionKey(key); frameNum++) {
      capture >> frame;
      cout << frameNum << " " << capture.get(CV_CAP_PROP_POS_FRAMES) << " " << prevPts.size() << endl;
      int emptyFrameNum = 0;
      while (frame.empty()) {
	cerr << "empty frame " << emptyFrameNum  << " " << capture.get(CV_CAP_PROP_POS_FRAMES)<< endl;
	capture >> frame;//break;
	emptyFrameNum++;
	if (emptyFrameNum>=3000)
	  exit(0);
      }
      
      cvtColor(frame, currentFrameBW, CV_RGB2GRAY);

      // "normal" feature detectors: detect features here
      // detector.detect(currentFrameBW, currKpts); // see video_homography c++ sample
      
      if (!prevPts.empty()) {
	//::keyPoints2Points(prevKpts, prevPts);
	currPts.clear();
	calcOpticalFlowPyrLK(previousFrameBW, currentFrameBW, prevPts, currPts, status, errors, window, params.pyramidLevel, TermCriteria(3 /*static_cast<int>(TermCriteria::COUNT)+static_cast<int>(TermCriteria::EPS)*/, params.maxNumberTrackingIterations, params.minTrackingError), 0.5 /* unused */, 0); // OPTFLOW_USE_INITIAL_FLOW
	/// \todo try calcOpticalFlowFarneback

	vector<Point2f> trackedPts;
	vector<FeaturePointMatch>::iterator iter = featurePointMatches.begin();
	while (iter != featurePointMatches.end()) {
	  bool deleteFeature = false;
	  
	  if (status[iter->pointNum]) {
	    iter->feature->addPoint(frameNum, currPts[iter->pointNum], homography);

	    deleteFeature |= iter->feature->isDisplacementSmall(params.nDisplacements, minTotalFeatureDisplacement)
	      || !iter->feature->isMotionSmooth(params.accelerationBound, params.deviationBound);
	    if (deleteFeature)
	      iter->feature->shorten();
	  } else
	    deleteFeature = true;

	  if (deleteFeature) {
	    if (iter->feature->length() >= params.minFeatureTime) {
	      iter->feature->setId(savedFeatureId);
	      savedFeatureId++;
	      lostFeatures.push_back(iter->feature);
	    }
	    iter = featurePointMatches.erase(iter);
	  } else {
	    trackedPts.push_back(currPts[iter->pointNum]);
	    iter->pointNum = trackedPts.size()-1;
	    iter++;
	  }
	}
	currPts = trackedPts;
	assert(currPts.size() == featurePointMatches.size());
	saveFeatures(lostFeatures, *trajectoryDB, "positions", "velocities");
	
	if (params.display) {
	  BOOST_FOREACH(FeaturePointMatch fp, featurePointMatches)
	    fp.feature->draw(frame, invHomography, Colors::red());
	  // object detection
	  // vector<Rect> locations;
	  // hog.detectMultiScale(frame, locations, 0, Size(8,8), Size(32,32), 1.05, 2);
	  // BOOST_FOREACH(Rect r, locations)
	  //   rectangle(frame, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
	}
	//drawOpticalFlow(prevPts, currPts, status, frame);
	
	// cout << matches.size() << " matches" << endl;
	// descMatcher.match(currDesc, prevDesc, matches);
	// cout << matches.size() << " matches" << endl;
	//drawMatchesRelative(prevKpts, currKpts, matches, frame);
      }
      
      // adding new features, using mask around existing feature positions
      Mat featureMask = mask.clone();
      for (unsigned int n=0;n<currPts.size(); n++)
	for (int j=MAX(0, currPts[n].x-params.minFeatureDistanceKLT); j<MIN(videoSize.width, currPts[n].x+params.minFeatureDistanceKLT+1); j++)
	  for (int i=MAX(0, currPts[n].y-params.minFeatureDistanceKLT); i<MIN(videoSize.height, currPts[n].y+params.minFeatureDistanceKLT+1); i++)
	    featureMask.at<uchar>(i,j)=0;
      goodFeaturesToTrack(currentFrameBW, newPts, params.maxNFeatures, params.featureQuality, params.minFeatureDistanceKLT, featureMask, params.windowSize, params.useHarrisDetector, params.k);
      BOOST_FOREACH(Point2f p, newPts) { //for (unsigned int i=0; i<newPts.size(); i++) {
	FeatureTrajectoryPtr f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, p, homography));
	featurePointMatches.push_back(FeaturePointMatch(f, currPts.size()));
	currPts.push_back(p);
      }
      // currPts.insert(currPts.end(), newPts.begin(), newPts.end());
      //::keyPoints2Points(currKpts, currPts, false);

      //brief.compute(currentFrameBW, currKpts, currDesc); //Compute brief descriptors at each keypoint location
      
      if (params.display) {
	imshow("frame", frame);
	imshow("mask", featureMask*256);
	key = waitKey(2);
      }
      previousFrameBW = currentFrameBW.clone();
      prevPts = currPts;
      //prevKpts = currKpts;
      //currDesc.copyTo(prevDesc);
    }  
  
  trajectoryDB->endTransaction();
  trajectoryDB->disconnect();
}

void groupFeatures(const KLTFeatureTrackingParameters& params) {
  cout << "group" << endl;
    
  boost::shared_ptr<TrajectoryDBAccessList<Point2f> > trajectoryDB = boost::shared_ptr<TrajectoryDBAccessList<Point2f> >(new TrajectoryDBAccessList<Point2f>());
  //TODO write generic methods for blob and list versions TrajectoryDBAccess<Point2f>* trajectoryDB = new TrajectoryDBAccessBlob<Point2f>();
  bool success = trajectoryDB->connect(params.databaseFilename.c_str());
  trajectoryDB->createObjectTable("objects", "objects_features");
  unsigned int savedObjectId=0;

  // vector<boost::shared_ptr<Trajectory<Point2f> > > trajectories;
  // cout << trajectories.size() << endl;
  // std::clock_t c_start = std::clock();
  // success = trajectoryDB->read(trajectories, "positions"); // TODO load velocities as well in a FeatureTrajectory object // attention, velocities lack the first instant
  // std::clock_t c_end = std::clock();
  // cout << "Loaded " << trajectories.size() << " trajectories in " << 1000.0 * (c_end-c_start) / CLOCKS_PER_SEC << " CPU seconds" << endl;

  // boost::shared_ptr<Trajectory<cv::Point2f> > trajectory;
  // c_start = std::clock();
  // for (unsigned int i = 0; i<trajectories.size(); ++i) {
  //   success = trajectoryDB->read(trajectory, i, "positions");
  // }
  // c_end = std::clock();
  // cout << "Loaded " << trajectories.size() << " trajectories one by one in " << 1000.0 * (c_end-c_start) / CLOCKS_PER_SEC << " CPU seconds" << endl;

  trajectoryDB->createViewInstants();
  int maxTrajectoryLength;
  trajectoryDB->maxTrajectoryLength(maxTrajectoryLength);
  cout << "max trajectory length " << maxTrajectoryLength << endl;
  maxTrajectoryLength = 20; // for tests

  FeatureGraph featureGraph(params.mmConnectionDistance, params.mmSegmentationDistance, params.minFeatureTime, params.minNFeaturesPerGroup);

  // main loop
  // TODO version that can be interrupted?
  for (int frameNum = params.frame1; ((frameNum-params.frame1 < params.nFrames) || (params.nFrames < 0)); frameNum++) {
    vector<int> trajectoryIds;
    success  = trajectoryDB->trajectoryIdEndingAt(trajectoryIds, frameNum); // ending
    cout << "frame " << frameNum << " " << success << endl;
    cout << trajectoryIds.size() << " trajectories " << endl;
    BOOST_FOREACH(int trajectoryId, trajectoryIds) {
      //cout << trajectoryId << " " << endl;
      // boost::shared_ptr<Trajectory<cv::Point2f> > trajectory;
      // success = trajectoryDB->read(trajectory, trajectoryId, "positions"); // velocities
      FeatureTrajectoryPtr ft = FeatureTrajectoryPtr(new FeatureTrajectory(trajectoryId, *trajectoryDB, "positions", "velocities"));
      // stringstream ss;ss << *ft; cout << ss.str() << endl;
      // cout << ft->getFirstInstant() << " " << ft->getLastInstant() << endl;
      featureGraph.addFeature(ft);
    }

    // check for connected components that are old enough (no chance to match with trajectories to be added later
    // we could check only when some features are getting old enough?
    if (frameNum%10 == 0) {
      featureGraph.connectedComponents(frameNum-maxTrajectoryLength+params.minFeatureTime);
      vector<vector<unsigned int> > featureGroups = featureGraph.getFeatureGroups();
      for (unsigned int i=0; i<featureGroups.size(); ++i) {
	trajectoryDB->writeObject(savedObjectId, featureGroups[i], -1, 1, string("objects"), string("objects_features"));
	savedObjectId++;
      }
    }

    cout << featureGraph.informationString() << endl;
  }

  trajectoryDB->endTransaction();
  trajectoryDB->disconnect();
 }

int main(int argc, char *argv[]) {
  KLTFeatureTrackingParameters params(argc, argv);
  cout << params.parameterDescription << endl;

  if (params.trackFeatures)
    trackFeatures(params);
  else if (params.groupFeatures)
    groupFeatures(params);

  return 0;
}

/* ------------------ DOCUMENTATION ------------------ */


/*! \mainpage 

This project is a collection of software tools for transportation called Traffic Intelligence. Other documents are:

- \ref feature_based_tracking

The code is partially self-described using the doxygen tool and comment formatting. The documentation can be extracted using doxygen, typing \c doxygen in the main directory (or <tt>make doc</tt> on a system with the Makefile tool installed). 

*/

/*! \page feature_based_tracking Feature-based Tracking: User Manual

This document describes a software tool for object tracking in video data, developed for road traffic monitoring and safety diagnosis. It is part of a larger collection of software tools for transportation called Traffic Intelligence. 

The tool relies on feature-based tracking, a robust object tracking methods, particularly suited for the extraction of traffic data such as trajectories and speeds. The best description of this method is given in <a href="http://nicolas.saunier.confins.net/data/saunier06crv.html">this paper</a>. The program has a command line interface and this document will shortly explain how to use the tool. Keep in mind this is a work in progress and major changes are continuously being made. 

\section License

The code is licensed under the MIT open source license (http://www.opensource.org/licenses/mit-license).

If you make use of this piece of software, please cite one of my paper, e.g. N. Saunier, T. Sayed and K. Ismail. Large Scale Automated Analysis of Vehicle Interactions and Collisions. Transportation Research Record: Journal of the Transportation Research Board, 2147:42-50, 2010. I would be very happy in any case to know about any use of the code, and to discuss any opportunity for collaboration. 

Contact me at nicolas.saunier@polymtl.ca and learn more about my work at http://nicolas.saunier.confins.net.

*/