Mercurial Hosting > traffic-intelligence
changeset 655:39fa1c998b29
removed the abstract class to represent folders of images or video files since the capability is now built in OpenCV
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Thu, 07 May 2015 16:30:58 +0200 |
parents | 045d05cef9d0 |
children | 2813d74b3635 |
files | c/InputFrameListModule.cpp c/InputVideoFileModule.cpp c/Makefile c/feature-based-tracking.cpp include/InputFrameListModule.h include/InputFrameProviderIface.h include/InputVideoFileModule.h |
diffstat | 7 files changed, 87 insertions(+), 266 deletions(-) [+] |
line wrap: on
line diff
--- a/c/InputFrameListModule.cpp Thu May 07 16:09:47 2015 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,61 +0,0 @@ -#include "InputFrameListModule.h" -#include "utils.hpp" - -#include <fstream> -#include <ostream> -#include <iostream> -#include <algorithm> - -//#include <boost/algorithm/string.hpp> -#include <boost/filesystem.hpp> - -#include "opencv2/core/core.hpp" -#include "opencv2/highgui/highgui.hpp" - -namespace fs = boost::filesystem; - -InputFrameListModule::InputFrameListModule(const std::string& _dirname) - : mCurrentIdx(0), mInit(false), dirname(_dirname){ - loadImageList(); -} - -InputFrameListModule::~InputFrameListModule(void) { } - - -void InputFrameListModule::setFrameNumber(const unsigned int& frameNumber) { - if (frameNumber < filenames.size()) - mCurrentIdx = frameNumber; - else - mCurrentIdx = filenames.size()-1; -} - -bool InputFrameListModule::getNextFrame(cv::Mat& mat) -{ - bool success = false; - if(mCurrentIdx < filenames.size()) { - mat = cv::imread(dirname+filenames[mCurrentIdx++]); - - if(!mat.empty()) - success = true; - } - - return success; -} - -unsigned int InputFrameListModule::getNbFrames(void) { - return filenames.size(); -} - -void InputFrameListModule::loadImageList(void) { - for (fs::directory_iterator iter(dirname); iter!=fs::directory_iterator(); iter++) - filenames.push_back(iter->path().filename().string()); - - sort(filenames.begin(), filenames.end()); - - if(!filenames.empty()) { - std::cout << dirname+filenames[0] << std::endl; - cv::Mat tmpImg = cv::imread(dirname+filenames[0]); - mSize = cv::Size(tmpImg.cols, tmpImg.rows); - mInit = true; - } -}
--- a/c/InputVideoFileModule.cpp Thu May 07 16:09:47 2015 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,31 +0,0 @@ -#include "InputVideoFileModule.h" - -InputVideoFileModule::InputVideoFileModule(const std::string& videoPath) - : mInit(false) - , mNumberOfFrame(0) -{ - mInit = mVideoCapture.open(videoPath.c_str()); - double frameCount; - frameCount = mVideoCapture.get(CV_CAP_PROP_FRAME_COUNT); - mSize = cv::Size(mVideoCapture.get(CV_CAP_PROP_FRAME_WIDTH), mVideoCapture.get(CV_CAP_PROP_FRAME_HEIGHT)); - mNumberOfFrame = (unsigned int)frameCount; -} - -InputVideoFileModule::~InputVideoFileModule(void) { } - - -void InputVideoFileModule::setFrameNumber(const unsigned int& frameNumber) { - mVideoCapture.set(CV_CAP_PROP_POS_FRAMES, frameNumber); -} - -bool InputVideoFileModule::getNextFrame(cv::Mat& outputPicture) -{ - bool success = false; - if(mInit) - { - mVideoCapture >> outputPicture; - success = !outputPicture.empty(); - } - return success; -} -
--- a/c/Makefile Thu May 07 16:09:47 2015 +0200 +++ b/c/Makefile Thu May 07 16:30:58 2015 +0200 @@ -10,7 +10,7 @@ LDFLAGS = -lm LDFLAGS += -lTrajectoryManagementAndAnalysis -lsqlite3 -LDFLAGS += -lboost_program_options -lboost_filesystem -lboost_system +LDFLAGS += -lboost_program_options #LDFLAGS += -lfltk CFLAGS = -Wall -W -Wextra -std=c++11 @@ -48,7 +48,7 @@ CXXFLAGS = $(INCLUDE) $(CFLAGS) #GUI_OBJS = -CV_OBJS = cvutils.o InputFrameListModule.o InputVideoFileModule.o +CV_OBJS = cvutils.o COMMON_OBJS = utils.o Motion.o Parameters.o utils.o OBJS = $(COMMON_OBJS) $(CV_OBJS) TESTS_OBJS = test_feature.o test_graph.o
--- a/c/feature-based-tracking.cpp Thu May 07 16:09:47 2015 +0200 +++ b/c/feature-based-tracking.cpp Thu May 07 16:30:58 2015 +0200 @@ -2,8 +2,6 @@ #include "Parameters.hpp" #include "cvutils.hpp" #include "utils.hpp" -#include "InputVideoFileModule.h" -#include "InputFrameListModule.h" #include "src/Trajectory.h" #include "src/TrajectoryDBAccessList.h" @@ -17,7 +15,6 @@ #include "opencv2/objdetect/objdetect.hpp" #include <boost/foreach.hpp> -#include <boost/filesystem.hpp> #include <iostream> #include <vector> @@ -27,7 +24,6 @@ using namespace std; using namespace cv; -namespace fs = boost::filesystem; void drawMatchesRelative(const vector<KeyPoint>& train, const vector<KeyPoint>& query, std::vector<cv::DMatch>& matches, Mat& img) { for (int i = 0; i < (int)matches.size(); i++) @@ -92,21 +88,19 @@ // BruteForceMatcher<Hamming> descMatcher; // vector<DMatch> matches; - std::shared_ptr<InputFrameProviderIface> capture; - if (fs::is_directory(fs::path(params.videoFilename))) - capture = std::shared_ptr<InputFrameListModule>(new InputFrameListModule(params.videoFilename)); - else if(!params.videoFilename.empty()) - capture = std::shared_ptr<InputVideoFileModule>(new InputVideoFileModule(params.videoFilename)); - else - cout << "No valid input parameters" << endl; - - if(!capture->isOpen()) { + if(params.videoFilename.empty()) { + cout << "Empty video filename. Exiting." << endl; + exit(0); + } + + VideoCapture capture(params.videoFilename); + if(!capture.isOpened()) { cout << "Video filename " << params.videoFilename << " could not be opened. Exiting." << endl; exit(0); } - Size videoSize = capture->getSize(); - unsigned int nFrames = capture->getNbFrames(); + Size videoSize = Size(capture.get(CV_CAP_PROP_FRAME_WIDTH), capture.get(CV_CAP_PROP_FRAME_HEIGHT)); + unsigned int nFrames = capture.get(CV_CAP_PROP_FRAME_COUNT); cout << "Video " << params.videoFilename << ": width=" << videoSize.width << ", height=" << videoSize.height << @@ -155,97 +149,97 @@ if (params.nFrames > 0) lastFrameNum = MIN(params.frame1+static_cast<unsigned int>(params.nFrames), nFrames); - capture->setFrameNumber(params.frame1); + capture.set(CV_CAP_PROP_POS_FRAMES, params.frame1); for (unsigned int frameNum = params.frame1; (frameNum < lastFrameNum) && !::interruptionKey(key); frameNum++) { - bool success = capture->getNextFrame(frame); - if (!success || frame.empty()) { - cout << "Empty frame " << frameNum << ", breaking (" << success << " " << frame.empty() << " [" << frame.size().width << "x" << frame.size().height << "])" << endl; - break; - } else if (frameNum%50 ==0) - cout << "frame " << frameNum << endl; + capture >> frame; + if (frame.empty()) { + cout << "Empty frame " << frameNum << ", breaking (" << frame.empty() << " [" << frame.size().width << "x" << frame.size().height << "])" << endl; + break; + } else if (frameNum%50 ==0) + cout << "frame " << frameNum << endl; - if (params.undistort) { - remap(frame, undistortedFrame, map1, map2, interpolationMethod, BORDER_CONSTANT, 0.); - frame = undistortedFrame; + if (params.undistort) { + remap(frame, undistortedFrame, map1, map2, interpolationMethod, BORDER_CONSTANT, 0.); + frame = undistortedFrame; - if (frame.size() != videoSize) { - cout << "Different frame size " << frameNum << ", breaking ([" << frame.size().width << "x" << frame.size().height << "])" << endl; + if (frame.size() != videoSize) { + cout << "Different frame size " << frameNum << ", breaking ([" << frame.size().width << "x" << frame.size().height << "])" << endl; break; - } } + } - cvtColor(frame, currentFrameBW, CV_RGB2GRAY); - - if (!prevPts.empty()) { - currPts.clear(); - calcOpticalFlowPyrLK(previousFrameBW, currentFrameBW, prevPts, currPts, status, errors, window, params.pyramidLevel, TermCriteria(static_cast<int>(TermCriteria::COUNT)+static_cast<int>(TermCriteria::EPS) /* = 3 */, params.maxNumberTrackingIterations, params.minTrackingError), /* int flags = */ 0, params.minFeatureEigThreshold); - /// \todo try calcOpticalFlowFarneback + cvtColor(frame, currentFrameBW, CV_RGB2GRAY); + + if (!prevPts.empty()) { + currPts.clear(); + calcOpticalFlowPyrLK(previousFrameBW, currentFrameBW, prevPts, currPts, status, errors, window, params.pyramidLevel, TermCriteria(static_cast<int>(TermCriteria::COUNT)+static_cast<int>(TermCriteria::EPS) /* = 3 */, params.maxNumberTrackingIterations, params.minTrackingError), /* int flags = */ 0, params.minFeatureEigThreshold); + /// \todo try calcOpticalFlowFarneback - std::vector<Point2f> trackedPts; - std::vector<FeaturePointMatch>::iterator iter = featurePointMatches.begin(); - while (iter != featurePointMatches.end()) { - bool deleteFeature = false; + std::vector<Point2f> trackedPts; + std::vector<FeaturePointMatch>::iterator iter = featurePointMatches.begin(); + while (iter != featurePointMatches.end()) { + bool deleteFeature = false; - if (status[iter->pointNum]) { - iter->feature->addPoint(frameNum, currPts[iter->pointNum], homography); - - deleteFeature = iter->feature->isDisplacementSmall(params.nDisplacements, minTotalFeatureDisplacement) - || !iter->feature->isMotionSmooth(params.accelerationBound, params.deviationBound); - if (deleteFeature) - iter->feature->shorten(); - } else - deleteFeature = true; + if (status[iter->pointNum]) { + iter->feature->addPoint(frameNum, currPts[iter->pointNum], homography); - if (deleteFeature) { - if (iter->feature->length() >= params.minFeatureTime) { - iter->feature->setId(savedFeatureId); - savedFeatureId++; - iter->feature->movingAverage(params.nFramesSmoothing); - lostFeatures.push_back(iter->feature); - } - iter = featurePointMatches.erase(iter); - } else { - trackedPts.push_back(currPts[iter->pointNum]); - iter->pointNum = trackedPts.size()-1; - iter++; + deleteFeature = iter->feature->isDisplacementSmall(params.nDisplacements, minTotalFeatureDisplacement) + || !iter->feature->isMotionSmooth(params.accelerationBound, params.deviationBound); + if (deleteFeature) + iter->feature->shorten(); + } else + deleteFeature = true; + + if (deleteFeature) { + if (iter->feature->length() >= params.minFeatureTime) { + iter->feature->setId(savedFeatureId); + savedFeatureId++; + iter->feature->movingAverage(params.nFramesSmoothing); + lostFeatures.push_back(iter->feature); } - } - currPts = trackedPts; - assert(currPts.size() == featurePointMatches.size()); - saveFeatures(lostFeatures, *trajectoryDB, "positions", "velocities"); - - if (params.display) { - BOOST_FOREACH(FeaturePointMatch fp, featurePointMatches) - fp.feature->draw(frame, invHomography, Colors::red()); - // object detection - // vector<Rect> locations; - // hog.detectMultiScale(frame, locations, 0, Size(8,8), Size(32,32), 1.05, 2); - // BOOST_FOREACH(Rect r, locations) - // rectangle(frame, r.tl(), r.br(), cv::Scalar(0,255,0), 3); + iter = featurePointMatches.erase(iter); + } else { + trackedPts.push_back(currPts[iter->pointNum]); + iter->pointNum = trackedPts.size()-1; + iter++; } } + currPts = trackedPts; + assert(currPts.size() == featurePointMatches.size()); + saveFeatures(lostFeatures, *trajectoryDB, "positions", "velocities"); + + if (params.display) { + BOOST_FOREACH(FeaturePointMatch fp, featurePointMatches) + fp.feature->draw(frame, invHomography, Colors::red()); + // object detection + // vector<Rect> locations; + // hog.detectMultiScale(frame, locations, 0, Size(8,8), Size(32,32), 1.05, 2); + // BOOST_FOREACH(Rect r, locations) + // rectangle(frame, r.tl(), r.br(), cv::Scalar(0,255,0), 3); + } + } - // adding new features, using mask around existing feature positions - Mat featureMask = mask.clone(); - for (unsigned int n=0;n<currPts.size(); n++) - for (int j=MAX(0, currPts[n].x-params.minFeatureDistanceKLT); j<MIN(videoSize.width, currPts[n].x+params.minFeatureDistanceKLT+1); j++) - for (int i=MAX(0, currPts[n].y-params.minFeatureDistanceKLT); i<MIN(videoSize.height, currPts[n].y+params.minFeatureDistanceKLT+1); i++) - featureMask.at<uchar>(i,j)=0; - goodFeaturesToTrack(currentFrameBW, newPts, params.maxNFeatures, params.featureQuality, params.minFeatureDistanceKLT, featureMask, params.blockSize, params.useHarrisDetector, params.k); - BOOST_FOREACH(Point2f p, newPts) { //for (unsigned int i=0; i<newPts.size(); i++) { - FeatureTrajectoryPtr f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, p, homography)); - featurePointMatches.push_back(FeaturePointMatch(f, currPts.size())); - currPts.push_back(p); - } + // adding new features, using mask around existing feature positions + Mat featureMask = mask.clone(); + for (unsigned int n=0;n<currPts.size(); n++) + for (int j=MAX(0, currPts[n].x-params.minFeatureDistanceKLT); j<MIN(videoSize.width, currPts[n].x+params.minFeatureDistanceKLT+1); j++) + for (int i=MAX(0, currPts[n].y-params.minFeatureDistanceKLT); i<MIN(videoSize.height, currPts[n].y+params.minFeatureDistanceKLT+1); i++) + featureMask.at<uchar>(i,j)=0; + goodFeaturesToTrack(currentFrameBW, newPts, params.maxNFeatures, params.featureQuality, params.minFeatureDistanceKLT, featureMask, params.blockSize, params.useHarrisDetector, params.k); + BOOST_FOREACH(Point2f p, newPts) { //for (unsigned int i=0; i<newPts.size(); i++) { + FeatureTrajectoryPtr f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, p, homography)); + featurePointMatches.push_back(FeaturePointMatch(f, currPts.size())); + currPts.push_back(p); + } - if (params.display) { - imshow("mask", featureMask*256); - imshow("frame", frame); - key = waitKey(2); - } - previousFrameBW = currentFrameBW.clone(); - prevPts = currPts; + if (params.display) { + imshow("mask", featureMask*256); + imshow("frame", frame); + key = waitKey(2); + } + previousFrameBW = currentFrameBW.clone(); + prevPts = currPts; } // save the remaining currently tracked features
--- a/include/InputFrameListModule.h Thu May 07 16:09:47 2015 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,30 +0,0 @@ -#ifndef INPUT_FRAME_LIST_MODULE_H -#define INPUT_FRAME_LIST_MODULE_H - -#include "InputFrameProviderIface.h" - -#include <string> -#include <vector> - -class InputFrameListModule : public InputFrameProviderIface -{ - public: - InputFrameListModule(const std::string& _dirname); - ~InputFrameListModule(); - - bool getNextFrame(cv::Mat&); - unsigned int getNbFrames(); - bool isOpen() const { return mInit;} - void setFrameNumber(const unsigned int& frameNumber); - - virtual const cv::Size& getSize() const { return mSize;} - private: - void loadImageList(void); - std::vector<std::string> filenames; - unsigned int mCurrentIdx; - bool mInit; - std::string dirname; - cv::Size mSize; -}; - -#endif
--- a/include/InputFrameProviderIface.h Thu May 07 16:09:47 2015 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,19 +0,0 @@ -#ifndef INPUT_FRAME_PROVIDER_IFACE_H -#define INPUT_FRAME_PROVIDER_IFACE_H - -#include "opencv2/core/core.hpp" -#include <string> - - -class InputFrameProviderIface -{ -public: - virtual ~InputFrameProviderIface(){} - virtual bool getNextFrame(cv::Mat&)=0; - virtual unsigned int getNbFrames() = 0; - virtual bool isOpen() const = 0; - virtual void setFrameNumber(const unsigned int& frameNumber) = 0; - virtual const cv::Size& getSize() const = 0; -}; - -#endif
--- a/include/InputVideoFileModule.h Thu May 07 16:09:47 2015 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,32 +0,0 @@ -#ifndef INPUT_VIDEO_FILE_MODULE_H -#define INPUT_VIDEO_FILE_MODULE_H - -#include "InputFrameProviderIface.h" -#include <string> -#include "opencv2/core/core.hpp" -#include "opencv2/highgui/highgui.hpp" - -class InputVideoFileModule : public InputFrameProviderIface -{ -public: - InputVideoFileModule(const std::string& videoPath); - ~InputVideoFileModule(); - - bool getNextFrame(cv::Mat&); - - unsigned int getNbFrames(){ return mNumberOfFrame;} - - bool isOpen() const { return mInit;} - - void setFrameNumber(const unsigned int& frameNumber); - - const cv::Size& getSize() const { return mSize;} - -private: - cv::Size mSize; - cv::VideoCapture mVideoCapture; - bool mInit; - int mNumberOfFrame; -}; - -#endif