Mercurial Hosting > traffic-intelligence
diff python/cvutils.py @ 231:249d65ff6c35
merged modifications for windows
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Mon, 02 Jul 2012 23:49:39 -0400 |
parents | b7612c6d5702 |
children | ab1a11176d7b |
line wrap: on
line diff
--- a/python/cvutils.py Fri Jun 29 16:15:13 2012 -0400 +++ b/python/cvutils.py Mon Jul 02 23:49:39 2012 -0400 @@ -90,50 +90,67 @@ if lastCoordinate != None and lastCoordinate >=0: last = min(positions.length()-1, lastCoordinate) for i in range(0, last-1): - cv2.line(img, positions[i].astuple(), positions[i+1].astuple(), color) + cv2.line(img, positions[i].asint().astuple(), positions[i+1].asint().astuple(), color) - def playVideo(filename): + def playVideo(filename, firstFrameNum = 0): '''Plays the video''' capture = cv2.VideoCapture(filename) if capture.isOpened(): key = -1 - while key!= 113: # 'q' + ret = True + frameNum = firstFrameNum + capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum) + while ret and key!= 113: # 'q' ret, img = capture.read() if ret: + print('frame {0}'.format(frameNum)) + frameNum+=1 cv2.imshow('frame', img) key = cv2.waitKey(5) - def getImagesFromVideo(filename, nImages = 1): + def getImagesFromVideo(filename, nImages = 1, saveImage = False): '''Returns nImages images from the video sequence''' images = [] capture = cv2.VideoCapture(filename) if capture.isOpened(): ret = False - while len(images)<nImages: + numImg = 0 + while numImg<nImages: ret, img = capture.read() - while not ret: + i = 0 + while not ret and i<10: ret, img = capture.read() + i += 1 if img.size>0: - images.append(img) + numImg +=1 + if saveImage: + cv2.imwrite('image{0:04d}.png'.format(numImg), img) + else: + images.append(img) return images - def displayTrajectories(videoFilename, objects, homography = None): + def displayTrajectories(videoFilename, objects, homography = None, firstFrameNum = 0): '''Displays the objects overlaid frame by frame over the video ''' capture = cv2.VideoCapture(videoFilename) if capture.isOpened(): key = -1 - frameNum = 1 - while key!= 113: # 'q' + ret = True + frameNum = firstFrameNum + capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum) + while ret and key!= 113: # 'q' + print('capture') ret, img = capture.read() if ret: - print(frameNum) + print('frame {0}'.format(frameNum)) for obj in objects: if obj.existsAtInstant(frameNum): - #obj.getTimeInterval() - if homography != None and obj.getFirstInstant() == frameNum: - obj.projectedPositions = obj.positions.project(homography) + if obj.getFirstInstant() == frameNum: + if homography != None: + obj.projectedPositions = obj.positions.project(homography) + else: + obj.projectedPositions = obj.positions draw(img, obj.projectedPositions, cvRed, frameNum-obj.getFirstInstant()) - cv2.putText(img, '{0}'.format(obj.num), obj.projectedPositions[frameNum-obj.getFirstInstant()].astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) + cv2.putText(img, '{0}'.format(obj.num), obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) cv2.imshow('frame', img) key = cv2.waitKey(50) frameNum += 1