Mercurial Hosting > traffic-intelligence
comparison scripts/compute-homography.py @ 638:852f5de42d01
added functionality to read Aliaksei Tsai camera model data
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Wed, 08 Apr 2015 16:07:15 +0200 |
parents | 3058e00887bc |
children | 4e7925cb4f8f |
comparison
equal
deleted
inserted
replaced
637:c9a0b72979fd | 638:852f5de42d01 |
---|---|
4 | 4 |
5 import matplotlib.pyplot as plt | 5 import matplotlib.pyplot as plt |
6 import numpy as np | 6 import numpy as np |
7 import cv2 | 7 import cv2 |
8 | 8 |
9 import cvutils | 9 import cvutils, utils, storage |
10 import utils | 10 |
11 # TODO add option to use RANSAC or other robust homography estimation method? | |
11 | 12 |
12 parser = argparse.ArgumentParser(description='The program computes the homography matrix from at least 4 non-colinear point correspondences inputed in the same order in a video frame and a aerial photo/ground map, or from the list of corresponding points in the two planes.', epilog = '''The point correspondence file contains at least 4 non-colinear point coordinates | 13 parser = argparse.ArgumentParser(description='The program computes the homography matrix from at least 4 non-colinear point correspondences inputed in the same order in a video frame and a aerial photo/ground map, or from the list of corresponding points in the two planes.', epilog = '''The point correspondence file contains at least 4 non-colinear point coordinates |
13 with the following format: | 14 with the following format: |
14 - the first two lines are the x and y coordinates in the projected space (usually world space) | 15 - the first two lines are the x and y coordinates in the projected space (usually world space) |
15 - the last two lines are the x and y coordinates in the origin space (usually image space) | 16 - the last two lines are the x and y coordinates in the origin space (usually image space) |
18 and a ration to convert pixels to world distance unit (eg meters per pixel), | 19 and a ration to convert pixels to world distance unit (eg meters per pixel), |
19 the images will be shown in turn and the user should click | 20 the images will be shown in turn and the user should click |
20 in the same order the corresponding points in world and image spaces.''', formatter_class=argparse.RawDescriptionHelpFormatter) | 21 in the same order the corresponding points in world and image spaces.''', formatter_class=argparse.RawDescriptionHelpFormatter) |
21 | 22 |
22 parser.add_argument('-p', dest = 'pointCorrespondencesFilename', help = 'name of the text file containing the point correspondences') | 23 parser.add_argument('-p', dest = 'pointCorrespondencesFilename', help = 'name of the text file containing the point correspondences') |
24 parser.add_argument('--tsai', dest = 'tsaiCameraFilename', help = 'name of the text file containing the camera parameter following the pinhole camera model (Lund format)') # caution, this is Aliaksei's format | |
23 parser.add_argument('-i', dest = 'videoFrameFilename', help = 'filename of the video frame') | 25 parser.add_argument('-i', dest = 'videoFrameFilename', help = 'filename of the video frame') |
24 parser.add_argument('-w', dest = 'worldFilename', help = 'filename of the aerial photo/ground map') | 26 parser.add_argument('-w', dest = 'worldFilename', help = 'filename of the aerial photo/ground map') |
25 parser.add_argument('-n', dest = 'nPoints', help = 'number of corresponding points to input', default = 4, type = int) | 27 parser.add_argument('-n', dest = 'nPoints', help = 'number of corresponding points to input', default = 4, type = int) |
26 parser.add_argument('-u', dest = 'unitsPerPixel', help = 'number of units per pixel', default = 1., type = float) | 28 parser.add_argument('-u', dest = 'unitsPerPixel', help = 'number of units per pixel', default = 1., type = float) |
27 parser.add_argument('--display', dest = 'displayPoints', help = 'display original and projected points on both images', action = 'store_true') | 29 parser.add_argument('--display', dest = 'displayPoints', help = 'display original and projected points on both images', action = 'store_true') |
76 | 78 |
77 homography = np.array([]) | 79 homography = np.array([]) |
78 if args.pointCorrespondencesFilename is not None: | 80 if args.pointCorrespondencesFilename is not None: |
79 worldPts, videoPts = cvutils.loadPointCorrespondences(args.pointCorrespondencesFilename) | 81 worldPts, videoPts = cvutils.loadPointCorrespondences(args.pointCorrespondencesFilename) |
80 homography, mask = cv2.findHomography(videoPts, worldPts) # method=0, ransacReprojThreshold=3 | 82 homography, mask = cv2.findHomography(videoPts, worldPts) # method=0, ransacReprojThreshold=3 |
83 elif args.tsaiCameraFilename is not None: # hack using PDTV | |
84 f = storage.openCheck(args.tsaiCameraFilename, quitting = True) | |
85 content = storage.getLines(f) | |
86 outFilename = '/tmp/camera.yaml' | |
87 out = storage.openCheck(outFilename, 'w') | |
88 out.write('data_class: TsaiCamera\n') | |
89 for l in content: | |
90 out.write(l.replace(' f:', 'f:').replace(' k:', 'k:').replace(',','.')+'\n') | |
91 out.close() | |
92 homography = cvutils.computeHomographyFromPDTV(outFilename) | |
81 elif args.videoFrameFilename is not None and args.worldFilename is not None: | 93 elif args.videoFrameFilename is not None and args.worldFilename is not None: |
82 worldImg = plt.imread(args.worldFilename) | 94 worldImg = plt.imread(args.worldFilename) |
83 videoImg = plt.imread(args.videoFrameFilename) | 95 videoImg = plt.imread(args.videoFrameFilename) |
84 if args.undistort: | 96 if args.undistort: |
85 [map1, map2] = cvutils.computeUndistortMaps(videoImg.shape[1], videoImg.shape[0], args.undistortedImageMultiplication, np.loadtxt(args.intrinsicCameraMatrixFilename), args.distortionCoefficients) | 97 [map1, map2] = cvutils.computeUndistortMaps(videoImg.shape[1], videoImg.shape[0], args.undistortedImageMultiplication, np.loadtxt(args.intrinsicCameraMatrixFilename), args.distortionCoefficients) |
101 f.close() | 113 f.close() |
102 | 114 |
103 if homography.size>0: | 115 if homography.size>0: |
104 np.savetxt('homography.txt',homography) | 116 np.savetxt('homography.txt',homography) |
105 | 117 |
106 if args.displayPoints and args.videoFrameFilename is not None and args.worldFilename is not None and homography.size>0: | 118 if args.displayPoints and args.videoFrameFilename is not None and args.worldFilename is not None and homography.size>0 and args.tsaiCameraFilename is None: |
107 worldImg = cv2.imread(args.worldFilename) | 119 worldImg = cv2.imread(args.worldFilename) |
108 videoImg = cv2.imread(args.videoFrameFilename) | 120 videoImg = cv2.imread(args.videoFrameFilename) |
109 if args.undistort: | 121 if args.undistort: |
110 [map1, map2] = cvutils.computeUndistortMaps(videoImg.shape[1], videoImg.shape[0], args.undistortedImageMultiplication, np.loadtxt(args.intrinsicCameraMatrixFilename), args.distortionCoefficients) | 122 [map1, map2] = cvutils.computeUndistortMaps(videoImg.shape[1], videoImg.shape[0], args.undistortedImageMultiplication, np.loadtxt(args.intrinsicCameraMatrixFilename), args.distortionCoefficients) |
111 videoImg = cv2.remap(videoImg, map1, map2, interpolation=cv2.INTER_LINEAR) | 123 videoImg = cv2.remap(videoImg, map1, map2, interpolation=cv2.INTER_LINEAR) |