Mercurial Hosting > traffic-intelligence
comparison python/cvutils.py @ 766:6022350f8173 dev
updated to OpenCV 3.1
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Mon, 21 Dec 2015 15:15:45 -0500 |
parents | 43ae3a1af290 |
children | dfdb2a3722cc |
comparison
equal
deleted
inserted
replaced
765:9aac5f016842 | 766:6022350f8173 |
---|---|
153 capture = cv2.VideoCapture(filename) | 153 capture = cv2.VideoCapture(filename) |
154 if capture.isOpened(): | 154 if capture.isOpened(): |
155 key = -1 | 155 key = -1 |
156 ret = True | 156 ret = True |
157 frameNum = firstFrameNum | 157 frameNum = firstFrameNum |
158 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum) | 158 capture.set(cv2.CAP_PROP_POS_FRAMES, firstFrameNum) |
159 while ret and not quitKey(key): | 159 while ret and not quitKey(key): |
160 #ret, img = capture.read() | 160 #ret, img = capture.read() |
161 for i in xrange(step): | 161 for i in xrange(step): |
162 ret, img = capture.read() | 162 ret, img = capture.read() |
163 if ret: | 163 if ret: |
164 if printFrames: | 164 if printFrames: |
165 print('frame {0}'.format(frameNum)) | 165 print('frame {0}'.format(frameNum)) |
166 frameNum+=step | 166 frameNum+=step |
167 if text is not None: | 167 if text is not None: |
168 cv2.putText(img, text, (10,50), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) | 168 cv2.putText(img, text, (10,50), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) |
169 cvImshow(windowName, img, rescale) | 169 cvImshow(windowName, img, rescale) |
170 key = cv2.waitKey(wait) | 170 key = cv2.waitKey(wait) |
171 if saveKey(key): | 171 if saveKey(key): |
172 cv2.imwrite('image-{}.png'.format(frameNum), img) | 172 cv2.imwrite('image-{}.png'.format(frameNum), img) |
173 cv2.destroyAllWindows() | 173 cv2.destroyAllWindows() |
174 else: | 174 else: |
175 print('Video capture for {} failed'.format(filename)) | 175 print('Video capture for {} failed'.format(filename)) |
176 | 176 |
177 def infoVideo(filename): | 177 def infoVideo(filename): |
178 '''Provides all available info on video ''' | 178 '''Provides all available info on video ''' |
179 cvPropertyNames = {cv2.cv.CV_CAP_PROP_FORMAT: "format", | 179 cvPropertyNames = {cv2.CAP_PROP_FORMAT: "format", |
180 cv2.cv.CV_CAP_PROP_FOURCC: "codec (fourcc)", | 180 cv2.CAP_PROP_FOURCC: "codec (fourcc)", |
181 cv2.cv.CV_CAP_PROP_FPS: "fps", | 181 cv2.CAP_PROP_FPS: "fps", |
182 cv2.cv.CV_CAP_PROP_FRAME_COUNT: "number of frames", | 182 cv2.CAP_PROP_FRAME_COUNT: "number of frames", |
183 cv2.cv.CV_CAP_PROP_FRAME_HEIGHT: "heigh", | 183 cv2.CAP_PROP_FRAME_HEIGHT: "heigh", |
184 cv2.cv.CV_CAP_PROP_FRAME_WIDTH: "width", | 184 cv2.CAP_PROP_FRAME_WIDTH: "width", |
185 cv2.cv.CV_CAP_PROP_RECTIFICATION: "rectification", | 185 cv2.CAP_PROP_RECTIFICATION: "rectification", |
186 cv2.cv.CV_CAP_PROP_SATURATION: "saturation"} | 186 cv2.CAP_PROP_SATURATION: "saturation"} |
187 capture = cv2.VideoCapture(filename) | 187 capture = cv2.VideoCapture(filename) |
188 if capture.isOpened(): | 188 if capture.isOpened(): |
189 for cvprop in [#cv2.cv.CV_CAP_PROP_BRIGHTNESS | 189 for cvprop in [#cv2.CAP_PROP_BRIGHTNESS |
190 #cv2.cv.CV_CAP_PROP_CONTRAST | 190 #cv2.CAP_PROP_CONTRAST |
191 #cv2.cv.CV_CAP_PROP_CONVERT_RGB | 191 #cv2.CAP_PROP_CONVERT_RGB |
192 #cv2.cv.CV_CAP_PROP_EXPOSURE | 192 #cv2.CAP_PROP_EXPOSURE |
193 cv2.cv.CV_CAP_PROP_FORMAT, | 193 cv2.CAP_PROP_FORMAT, |
194 cv2.cv.CV_CAP_PROP_FOURCC, | 194 cv2.CAP_PROP_FOURCC, |
195 cv2.cv.CV_CAP_PROP_FPS, | 195 cv2.CAP_PROP_FPS, |
196 cv2.cv.CV_CAP_PROP_FRAME_COUNT, | 196 cv2.CAP_PROP_FRAME_COUNT, |
197 cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, | 197 cv2.CAP_PROP_FRAME_HEIGHT, |
198 cv2.cv.CV_CAP_PROP_FRAME_WIDTH, | 198 cv2.CAP_PROP_FRAME_WIDTH, |
199 #cv2.cv.CV_CAP_PROP_GAIN, | 199 #cv2.CAP_PROP_GAIN, |
200 #cv2.cv.CV_CAP_PROP_HUE | 200 #cv2.CAP_PROP_HUE |
201 #cv2.cv.CV_CAP_PROP_MODE | 201 #cv2.CAP_PROP_MODE |
202 #cv2.cv.CV_CAP_PROP_POS_AVI_RATIO | 202 #cv2.CAP_PROP_POS_AVI_RATIO |
203 #cv2.cv.CV_CAP_PROP_POS_FRAMES | 203 #cv2.CAP_PROP_POS_FRAMES |
204 #cv2.cv.CV_CAP_PROP_POS_MSEC | 204 #cv2.CAP_PROP_POS_MSEC |
205 #cv2.cv.CV_CAP_PROP_RECTIFICATION, | 205 #cv2.CAP_PROP_RECTIFICATION, |
206 #cv2.cv.CV_CAP_PROP_SATURATION | 206 #cv2.CAP_PROP_SATURATION |
207 ]: | 207 ]: |
208 prop = capture.get(cvprop) | 208 prop = capture.get(cvprop) |
209 if cvprop == cv2.cv.CV_CAP_PROP_FOURCC and prop > 0: | 209 if cvprop == cv2.CAP_PROP_FOURCC and prop > 0: |
210 prop = int2FOURCC(int(prop)) | 210 prop = int2FOURCC(int(prop)) |
211 print('Video {}: {}'.format(cvPropertyNames[cvprop], prop)) | 211 print('Video {}: {}'.format(cvPropertyNames[cvprop], prop)) |
212 else: | 212 else: |
213 print('Video capture for {} failed'.format(filename)) | 213 print('Video capture for {} failed'.format(filename)) |
214 | 214 |
216 '''Returns nFrames images from the video sequence''' | 216 '''Returns nFrames images from the video sequence''' |
217 from math import floor, log10 | 217 from math import floor, log10 |
218 images = [] | 218 images = [] |
219 capture = cv2.VideoCapture(videoFilename) | 219 capture = cv2.VideoCapture(videoFilename) |
220 if capture.isOpened(): | 220 if capture.isOpened(): |
221 rawCount = capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT) | 221 rawCount = capture.get(cv2.CAP_PROP_FRAME_COUNT) |
222 if rawCount < 0: | 222 if rawCount < 0: |
223 rawCount = firstFrameNum+nFrames+1 | 223 rawCount = firstFrameNum+nFrames+1 |
224 nDigits = int(floor(log10(rawCount)))+1 | 224 nDigits = int(floor(log10(rawCount)))+1 |
225 ret = False | 225 ret = False |
226 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum) | 226 capture.set(cv2.CAP_PROP_POS_FRAMES, firstFrameNum) |
227 imgNum = 0 | 227 imgNum = 0 |
228 while imgNum<nFrames: | 228 while imgNum<nFrames: |
229 ret, img = capture.read() | 229 ret, img = capture.read() |
230 i = 0 | 230 i = 0 |
231 while not ret and i<10: | 231 while not ret and i<10: |
244 return images | 244 return images |
245 | 245 |
246 def getFPS(videoFilename): | 246 def getFPS(videoFilename): |
247 capture = cv2.VideoCapture(videoFilename) | 247 capture = cv2.VideoCapture(videoFilename) |
248 if capture.isOpened(): | 248 if capture.isOpened(): |
249 fps = capture.get(cv2.cv.CV_CAP_PROP_FPS) | 249 fps = capture.get(cv2.CAP_PROP_FPS) |
250 capture.release() | 250 capture.release() |
251 return fps | 251 return fps |
252 else: | 252 else: |
253 print('Video capture for {} failed'.format(videoFilename)) | 253 print('Video capture for {} failed'.format(videoFilename)) |
254 return None | 254 return None |
285 '''Displays the objects overlaid frame by frame over the video ''' | 285 '''Displays the objects overlaid frame by frame over the video ''' |
286 from moving import userTypeNames | 286 from moving import userTypeNames |
287 from math import ceil, log10 | 287 from math import ceil, log10 |
288 | 288 |
289 capture = cv2.VideoCapture(videoFilename) | 289 capture = cv2.VideoCapture(videoFilename) |
290 width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) | 290 width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) |
291 height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) | 291 height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
292 | 292 |
293 windowName = 'frame' | 293 windowName = 'frame' |
294 if rescale == 1.: | 294 if rescale == 1.: |
295 cv2.namedWindow(windowName, cv2.WINDOW_NORMAL) | 295 cv2.namedWindow(windowName, cv2.WINDOW_NORMAL) |
296 | 296 |
298 [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients) | 298 [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients) |
299 if capture.isOpened(): | 299 if capture.isOpened(): |
300 key = -1 | 300 key = -1 |
301 ret = True | 301 ret = True |
302 frameNum = firstFrameNum | 302 frameNum = firstFrameNum |
303 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum) | 303 capture.set(cv2.CAP_PROP_POS_FRAMES, firstFrameNum) |
304 if lastFrameNumArg is None: | 304 if lastFrameNumArg is None: |
305 from sys import maxint | 305 from sys import maxint |
306 lastFrameNum = maxint | 306 lastFrameNum = maxint |
307 else: | 307 else: |
308 lastFrameNum = lastFrameNumArg | 308 lastFrameNum = lastFrameNumArg |
336 if userTypeNames[obj.userType] != 'unknown': | 336 if userTypeNames[obj.userType] != 'unknown': |
337 objDescription += userTypeNames[obj.userType][0].upper() | 337 objDescription += userTypeNames[obj.userType][0].upper() |
338 if len(annotations) > 0: # if we loaded annotations, but there is no match | 338 if len(annotations) > 0: # if we loaded annotations, but there is no match |
339 if frameNum not in toMatches[obj.getNum()]: | 339 if frameNum not in toMatches[obj.getNum()]: |
340 objDescription += " FA" | 340 objDescription += " FA" |
341 cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvColors[obj.getNum()]) | 341 cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvColors[obj.getNum()]) |
342 # plot object bounding boxes | 342 # plot object bounding boxes |
343 if frameNum in boundingBoxes.keys(): | 343 if frameNum in boundingBoxes.keys(): |
344 for rect in boundingBoxes[frameNum]: | 344 for rect in boundingBoxes[frameNum]: |
345 cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvColors[obj.getNum()]) | 345 cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvColors[obj.getNum()]) |
346 # plot ground truth | 346 # plot ground truth |
349 if gt.existsAtInstant(frameNum): | 349 if gt.existsAtInstant(frameNum): |
350 if frameNum in gtMatches[gt.getNum()]: | 350 if frameNum in gtMatches[gt.getNum()]: |
351 color = cvColors[gtMatches[gt.getNum()][frameNum]] # same color as object | 351 color = cvColors[gtMatches[gt.getNum()][frameNum]] # same color as object |
352 else: | 352 else: |
353 color = cvRed | 353 color = cvRed |
354 cv2.putText(img, 'Miss', gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) | 354 cv2.putText(img, 'Miss', gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) |
355 cv2.rectangle(img, gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), gt.bottomRightPositions[frameNum-gt.getFirstInstant()].asint().astuple(), color) | 355 cv2.rectangle(img, gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), gt.bottomRightPositions[frameNum-gt.getFirstInstant()].asint().astuple(), color) |
356 # saving images and going to next | 356 # saving images and going to next |
357 if not saveAllImages: | 357 if not saveAllImages: |
358 cvImshow(windowName, img, rescale) | 358 cvImshow(windowName, img, rescale) |
359 key = cv2.waitKey() | 359 key = cv2.waitKey() |
360 if saveAllImages or saveKey(key): | 360 if saveAllImages or saveKey(key): |
361 cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img) | 361 cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img) |
362 frameNum += nFramesStep | 362 frameNum += nFramesStep |
363 if nFramesStep > 1: | 363 if nFramesStep > 1: |
364 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum) | 364 capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum) |
365 cv2.destroyAllWindows() | 365 cv2.destroyAllWindows() |
366 else: | 366 else: |
367 print 'Cannot load file ' + videoFilename | 367 print 'Cannot load file ' + videoFilename |
368 | 368 |
369 def computeHomographyFromPDTV(camera): | 369 def computeHomographyFromPDTV(camera): |