Mercurial Hosting > traffic-intelligence
changeset 801:c5f98916297e
merged with dev branch
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Tue, 31 May 2016 17:07:23 -0400 |
parents | a34ec862371f (current diff) 2cade72d75ad (diff) |
children | 180b6b0231c0 |
files | |
diffstat | 3 files changed, 20 insertions(+), 24 deletions(-) [+] |
line wrap: on
line diff
--- a/c/feature-based-tracking.cpp Mon May 09 15:33:11 2016 -0400 +++ b/c/feature-based-tracking.cpp Tue May 31 17:07:23 2016 -0400 @@ -62,10 +62,6 @@ } void trackFeatures(const KLTFeatureTrackingParameters& params) { - // BriefDescriptorExtractor brief(32); - // const int DESIRED_FTRS = 500; - // GridAdaptedFeatureDetector detector(new FastFeatureDetector(10, true), DESIRED_FTRS, 4, 4); - Mat homography = ::loadMat(params.homographyFilename, " "); Mat invHomography; if (params.display && !homography.empty()) @@ -174,8 +170,7 @@ break; } } - - + cvtColor(frame, currentFrameBW, CV_RGB2GRAY); if (!prevPts.empty()) { @@ -188,9 +183,9 @@ while (iter != featurePointMatches.end()) { bool deleteFeature = false; - if (status[iter->pointNum]) { + if (status[iter->pointNum] && (mask.at<uchar>(static_cast<int>(round(currPts[iter->pointNum].y)), static_cast<int>(round(currPts[iter->pointNum].x))) != 0)) { iter->feature->addPoint(frameNum, currPts[iter->pointNum], homography); - + deleteFeature = iter->feature->isDisplacementSmall(params.nDisplacements, minTotalFeatureDisplacement) || !iter->feature->isMotionSmooth(params.accelerationBound, params.deviationBound); if (deleteFeature) @@ -219,14 +214,9 @@ if (params.display) { BOOST_FOREACH(FeaturePointMatch fp, featurePointMatches) fp.feature->draw(frame, invHomography, Colors::red()); - // object detection - // vector<Rect> locations; - // hog.detectMultiScale(frame, locations, 0, Size(8,8), Size(32,32), 1.05, 2); - // BOOST_FOREACH(Rect r, locations) - // rectangle(frame, r.tl(), r.br(), cv::Scalar(0,255,0), 3); } } - + // adding new features, using mask around existing feature positions Mat featureMask = mask.clone(); for (unsigned int n=0;n<currPts.size(); n++) @@ -234,7 +224,7 @@ for (int i=MAX(0, currPts[n].y-params.minFeatureDistanceKLT); i<MIN(videoSize.height, currPts[n].y+params.minFeatureDistanceKLT+1); i++) featureMask.at<uchar>(i,j)=0; goodFeaturesToTrack(currentFrameBW, newPts, params.maxNFeatures, params.featureQuality, params.minFeatureDistanceKLT, featureMask, params.blockSize, params.useHarrisDetector, params.k); - BOOST_FOREACH(Point2f p, newPts) { //for (unsigned int i=0; i<newPts.size(); i++) { + BOOST_FOREACH(Point2f p, newPts) { FeatureTrajectoryPtr f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, p, homography)); featurePointMatches.push_back(FeaturePointMatch(f, currPts.size())); currPts.push_back(p);
--- a/python/cvutils.py Mon May 09 15:33:11 2016 -0400 +++ b/python/cvutils.py Tue May 31 17:07:23 2016 -0400 @@ -218,31 +218,33 @@ else: print('Video capture for {} failed'.format(filename)) - def getImagesFromVideo(videoFilename, firstFrameNum = 0, nFrames = 1, saveImage = False, outputPrefix = 'image'): + def getImagesFromVideo(videoFilename, firstFrameNum = 0, lastFrameNum = 1, step = 1, saveImage = False, outputPrefix = 'image'): '''Returns nFrames images from the video sequence''' images = [] capture = cv2.VideoCapture(videoFilename) if capture.isOpened(): rawCount = capture.get(cv2.CAP_PROP_FRAME_COUNT) if rawCount < 0: - rawCount = firstFrameNum+nFrames+1 + rawCount = lastFrameNum+1 nDigits = int(floor(log10(rawCount)))+1 ret = False capture.set(cv2.CAP_PROP_POS_FRAMES, firstFrameNum) - imgNum = 0 - while imgNum<nFrames: + frameNum = firstFrameNum + while frameNum<=lastFrameNum and frameNum<rawCount: ret, img = capture.read() i = 0 while not ret and i<10: ret, img = capture.read() i += 1 - if img.size>0: + if img is not None and img.size>0: if saveImage: - imgNumStr = format(firstFrameNum+imgNum, '0{}d'.format(nDigits)) - cv2.imwrite(outputPrefix+imgNumStr+'.png', img) + frameNumStr = format(frameNum, '0{}d'.format(nDigits)) + cv2.imwrite(outputPrefix+frameNumStr+'.png', img) else: images.append(img) - imgNum +=1 + frameNum +=step + if step > 1: + capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum) capture.release() else: print('Video capture for {} failed'.format(videoFilename))
--- a/python/traffic_engineering.py Mon May 09 15:33:11 2016 -0400 +++ b/python/traffic_engineering.py Tue May 31 17:07:23 2016 -0400 @@ -309,7 +309,11 @@ def uniformDelay(cycleLength, effectiveGreen, saturationDegree): '''Computes the uniform delay''' - return 0.5*cycleLength*(1-float(effectiveGreen)/cycleLength)/(1-float(effectiveGreen*saturationDegree)/cycleLength) + return 0.5*cycleLength*(1-float(effectiveGreen)/cycleLength)**2/(1-float(effectiveGreen*saturationDegree)/cycleLength) + +def randomDelay(volume, saturationDegree): + '''Computes the random delay = queueing time for M/D/1''' + return saturationDegree**2/(2*volume*(1-saturationDegree)) def incrementalDelay(T, X, c, k=0.5, I=1): '''Computes the incremental delay (HCM)