Mercurial Hosting > traffic-intelligence
comparison python/cvutils.py @ 544:749672171789
added function to calibrate a camera intrinsic parameters
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Tue, 08 Jul 2014 15:32:47 -0400 |
parents | bd1ad468e928 |
children | 9816fab353f3 |
comparison
equal
deleted
inserted
replaced
543:cb213269d330 | 544:749672171789 |
---|---|
119 newCameraMatrix[1,2] = newImgSize[1]/2. | 119 newCameraMatrix[1,2] = newImgSize[1]/2. |
120 return cv2.initUndistortRectifyMap(intrinsicCameraMatrix, array(distortionCoefficients), identity(3), newCameraMatrix, newImgSize, cv2.CV_32FC1) | 120 return cv2.initUndistortRectifyMap(intrinsicCameraMatrix, array(distortionCoefficients), identity(3), newCameraMatrix, newImgSize, cv2.CV_32FC1) |
121 | 121 |
122 def playVideo(filename, firstFrameNum = 0, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1.): | 122 def playVideo(filename, firstFrameNum = 0, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1.): |
123 '''Plays the video''' | 123 '''Plays the video''' |
124 windowName = 'frame' | |
125 cv2.namedWindow(windowName, cv2.WINDOW_NORMAL) | |
124 wait = 5 | 126 wait = 5 |
125 if frameRate > 0: | 127 if frameRate > 0: |
126 wait = int(round(1000./frameRate)) | 128 wait = int(round(1000./frameRate)) |
127 if interactive: | 129 if interactive: |
128 wait = 0 | 130 wait = 0 |
138 if printFrames: | 140 if printFrames: |
139 print('frame {0}'.format(frameNum)) | 141 print('frame {0}'.format(frameNum)) |
140 frameNum+=1 | 142 frameNum+=1 |
141 if text != None: | 143 if text != None: |
142 cv2.putText(img, text, (10,50), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) | 144 cv2.putText(img, text, (10,50), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) |
143 cvImshow('frame', img, rescale) | 145 cvImshow(windowName, img, rescale) |
144 key = cv2.waitKey(wait) | 146 key = cv2.waitKey(wait) |
145 cv2.destroyAllWindows() | 147 cv2.destroyAllWindows() |
146 else: | 148 else: |
147 print('Video capture for {} failed'.format(filename)) | 149 print('Video capture for {} failed'.format(filename)) |
148 | 150 |
217 from math import ceil, log10 | 219 from math import ceil, log10 |
218 | 220 |
219 capture = cv2.VideoCapture(videoFilename) | 221 capture = cv2.VideoCapture(videoFilename) |
220 width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) | 222 width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) |
221 height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) | 223 height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) |
224 | |
225 windowName = 'frame' | |
226 #cv2.namedWindow(windowName, cv2.WINDOW_NORMAL) | |
222 | 227 |
223 if undistort: # setup undistortion | 228 if undistort: # setup undistortion |
224 [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients) | 229 [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients) |
225 if capture.isOpened(): | 230 if capture.isOpened(): |
226 key = -1 | 231 key = -1 |
257 objDescription = '{} '.format(obj.num) | 262 objDescription = '{} '.format(obj.num) |
258 if userTypeNames[obj.userType] != 'unknown': | 263 if userTypeNames[obj.userType] != 'unknown': |
259 objDescription += userTypeNames[obj.userType][0].upper() | 264 objDescription += userTypeNames[obj.userType][0].upper() |
260 cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) | 265 cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) |
261 if not saveAllImages: | 266 if not saveAllImages: |
262 cvImshow('frame', img, rescale) | 267 cvImshow(windowName, img, rescale) |
263 key = cv2.waitKey() | 268 key = cv2.waitKey() |
264 if saveAllImages or saveKey(key): | 269 if saveAllImages or saveKey(key): |
265 cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img) | 270 cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img) |
266 frameNum += nFramesStep | 271 frameNum += nFramesStep |
267 if nFramesStep > 1: | 272 if nFramesStep > 1: |
322 res = undistortedCoordinates(x,y, map1, map2) | 327 res = undistortedCoordinates(x,y, map1, map2) |
323 if not isnan(res).any(): | 328 if not isnan(res).any(): |
324 invMap1[y,x] = res[0] | 329 invMap1[y,x] = res[0] |
325 invMap2[y,x] = res[1] | 330 invMap2[y,x] = res[1] |
326 return invMap1, invMap2 | 331 return invMap1, invMap2 |
332 | |
333 def cameraIntrinsicCalibration(path, checkerBoardSize=[6,7], secondPassSearch=False, display=False): | |
334 ''' Camera calibration searches through all the images (jpg or png) located | |
335 in _path_ for matches to a checkerboard pattern of size checkboardSize. | |
336 These images should all be of the same camera with the same resolution. | |
337 | |
338 For best results, use an asymetric board and ensure that the image has | |
339 very high contrast, including the background. Suitable checkerboard: | |
340 http://ftp.isr.ist.utl.pt/pub/roswiki/attachments/camera_calibration(2f)Tutorials(2f)StereoCalibration/check-108.png | |
341 | |
342 The code below is based off of: | |
343 https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html | |
344 Modified by Paul St-Aubin | |
345 ''' | |
346 from numpy import zeros, mgrid, float32, savetxt | |
347 import glob, os | |
348 | |
349 # termination criteria | |
350 criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) | |
351 | |
352 # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) | |
353 objp = zeros((checkerBoardSize[0]*checkerBoardSize[1],3), float32) | |
354 objp[:,:2] = mgrid[0:checkerBoardSize[1],0:checkerBoardSize[0]].T.reshape(-1,2) | |
355 | |
356 # Arrays to store object points and image points from all the images. | |
357 objpoints = [] # 3d point in real world space | |
358 imgpoints = [] # 2d points in image plane. | |
359 | |
360 ## Loop throuhg all images in _path_ | |
361 images = glob.glob(os.path.join(path,'*.[jJ][pP][gG]'))+glob.glob(os.path.join(path,'*.[jJ][pP][eE][gG]'))+glob.glob(os.path.join(path,'*.[pP][nN][gG]')) | |
362 for fname in images: | |
363 img = cv2.imread(fname) | |
364 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
365 | |
366 # Find the chess board corners | |
367 ret, corners = cv2.findChessboardCorners(gray, (checkerBoardSize[1],checkerBoardSize[0]), None) | |
368 | |
369 # If found, add object points, image points (after refining them) | |
370 if ret: | |
371 print 'Found pattern in '+fname | |
372 | |
373 if(secondPassSearch): | |
374 corners = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria) | |
375 | |
376 objpoints.append(objp) | |
377 imgpoints.append(corners) | |
378 | |
379 # Draw and display the corners | |
380 if(display): | |
381 img = cv2.drawChessboardCorners(img, (checkerBoardSize[1],checkerBoardSize[0]), corners, ret) | |
382 if(img): | |
383 cv2.imshow('img',img) | |
384 cv2.waitKey(0) | |
385 | |
386 ## Close up image loading and calibrate | |
387 cv2.destroyAllWindows() | |
388 if len(objpoints) == 0 or len(imgpoints) == 0: | |
389 return False | |
390 try: | |
391 ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) | |
392 except NameError: | |
393 return False | |
394 savetxt('intrinsic-camera.txt', camera_matrix) | |
395 return camera_matrix, dist_coeffs | |
327 | 396 |
328 def printCvMat(cvmat, out = stdout): | 397 def printCvMat(cvmat, out = stdout): |
329 '''Prints the cvmat to out''' | 398 '''Prints the cvmat to out''' |
330 print('Deprecated, use new interface') | 399 print('Deprecated, use new interface') |
331 for i in xrange(cvmat.rows): | 400 for i in xrange(cvmat.rows): |