Mercurial Hosting > traffic-intelligence
comparison python/cvutils.py @ 231:249d65ff6c35
merged modifications for windows
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Mon, 02 Jul 2012 23:49:39 -0400 |
parents | b7612c6d5702 |
children | ab1a11176d7b |
comparison
equal
deleted
inserted
replaced
230:bc4ea09b1743 | 231:249d65ff6c35 |
---|---|
88 def draw(img, positions, color, lastCoordinate = None): | 88 def draw(img, positions, color, lastCoordinate = None): |
89 last = lastCoordinate+1 | 89 last = lastCoordinate+1 |
90 if lastCoordinate != None and lastCoordinate >=0: | 90 if lastCoordinate != None and lastCoordinate >=0: |
91 last = min(positions.length()-1, lastCoordinate) | 91 last = min(positions.length()-1, lastCoordinate) |
92 for i in range(0, last-1): | 92 for i in range(0, last-1): |
93 cv2.line(img, positions[i].astuple(), positions[i+1].astuple(), color) | 93 cv2.line(img, positions[i].asint().astuple(), positions[i+1].asint().astuple(), color) |
94 | 94 |
95 def playVideo(filename): | 95 def playVideo(filename, firstFrameNum = 0): |
96 '''Plays the video''' | 96 '''Plays the video''' |
97 capture = cv2.VideoCapture(filename) | 97 capture = cv2.VideoCapture(filename) |
98 if capture.isOpened(): | 98 if capture.isOpened(): |
99 key = -1 | 99 key = -1 |
100 while key!= 113: # 'q' | 100 ret = True |
101 frameNum = firstFrameNum | |
102 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum) | |
103 while ret and key!= 113: # 'q' | |
101 ret, img = capture.read() | 104 ret, img = capture.read() |
102 if ret: | 105 if ret: |
106 print('frame {0}'.format(frameNum)) | |
107 frameNum+=1 | |
103 cv2.imshow('frame', img) | 108 cv2.imshow('frame', img) |
104 key = cv2.waitKey(5) | 109 key = cv2.waitKey(5) |
105 | 110 |
106 def getImagesFromVideo(filename, nImages = 1): | 111 def getImagesFromVideo(filename, nImages = 1, saveImage = False): |
107 '''Returns nImages images from the video sequence''' | 112 '''Returns nImages images from the video sequence''' |
108 images = [] | 113 images = [] |
109 capture = cv2.VideoCapture(filename) | 114 capture = cv2.VideoCapture(filename) |
110 if capture.isOpened(): | 115 if capture.isOpened(): |
111 ret = False | 116 ret = False |
112 while len(images)<nImages: | 117 numImg = 0 |
118 while numImg<nImages: | |
113 ret, img = capture.read() | 119 ret, img = capture.read() |
114 while not ret: | 120 i = 0 |
121 while not ret and i<10: | |
115 ret, img = capture.read() | 122 ret, img = capture.read() |
123 i += 1 | |
116 if img.size>0: | 124 if img.size>0: |
117 images.append(img) | 125 numImg +=1 |
126 if saveImage: | |
127 cv2.imwrite('image{0:04d}.png'.format(numImg), img) | |
128 else: | |
129 images.append(img) | |
118 return images | 130 return images |
119 | 131 |
120 def displayTrajectories(videoFilename, objects, homography = None): | 132 def displayTrajectories(videoFilename, objects, homography = None, firstFrameNum = 0): |
121 '''Displays the objects overlaid frame by frame over the video ''' | 133 '''Displays the objects overlaid frame by frame over the video ''' |
122 capture = cv2.VideoCapture(videoFilename) | 134 capture = cv2.VideoCapture(videoFilename) |
123 if capture.isOpened(): | 135 if capture.isOpened(): |
124 key = -1 | 136 key = -1 |
125 frameNum = 1 | 137 ret = True |
126 while key!= 113: # 'q' | 138 frameNum = firstFrameNum |
139 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum) | |
140 while ret and key!= 113: # 'q' | |
141 print('capture') | |
127 ret, img = capture.read() | 142 ret, img = capture.read() |
128 if ret: | 143 if ret: |
129 print(frameNum) | 144 print('frame {0}'.format(frameNum)) |
130 for obj in objects: | 145 for obj in objects: |
131 if obj.existsAtInstant(frameNum): | 146 if obj.existsAtInstant(frameNum): |
132 #obj.getTimeInterval() | 147 if obj.getFirstInstant() == frameNum: |
133 if homography != None and obj.getFirstInstant() == frameNum: | 148 if homography != None: |
134 obj.projectedPositions = obj.positions.project(homography) | 149 obj.projectedPositions = obj.positions.project(homography) |
150 else: | |
151 obj.projectedPositions = obj.positions | |
135 draw(img, obj.projectedPositions, cvRed, frameNum-obj.getFirstInstant()) | 152 draw(img, obj.projectedPositions, cvRed, frameNum-obj.getFirstInstant()) |
136 cv2.putText(img, '{0}'.format(obj.num), obj.projectedPositions[frameNum-obj.getFirstInstant()].astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) | 153 cv2.putText(img, '{0}'.format(obj.num), obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) |
137 cv2.imshow('frame', img) | 154 cv2.imshow('frame', img) |
138 key = cv2.waitKey(50) | 155 key = cv2.waitKey(50) |
139 frameNum += 1 | 156 frameNum += 1 |
140 | 157 |
141 def printCvMat(cvmat, out = stdout): | 158 def printCvMat(cvmat, out = stdout): |