diff scripts/compute-homography.py @ 476:6551a3cf1750

modified compute-homography to work with argparse
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Tue, 25 Mar 2014 19:43:28 -0400
parents 51810d737d86
children d337bffd7283
line wrap: on
line diff
--- a/scripts/compute-homography.py	Thu Mar 13 01:48:04 2014 -0400
+++ b/scripts/compute-homography.py	Tue Mar 25 19:43:28 2014 -0400
@@ -1,6 +1,6 @@
 #! /usr/bin/env python
 
-import sys,getopt
+import sys, argparse
 
 import matplotlib.pyplot as plt
 import numpy as np
@@ -9,8 +9,24 @@
 import cvutils
 import utils
 
-options, args = getopt.getopt(sys.argv[1:], 'hp:i:w:n:u:',['help'])
-options = dict(options)
+parser = argparse.ArgumentParser(description='The program computes the homography matrix from at least 4 non-colinear point correspondences inputed in the same order in a video frame and a aerial photo/ground map, or from the list of corresponding points in the two planes.', epilog = '''The point correspondence file contains at least 4 non-colinear point coordinates 
+with the following format:
+ - the first two lines are the x and y coordinates in the projected space (usually world space)
+ - the last two lines are the x and y coordinates in the origin space (usually image space)
+
+If providing video and world images, with a number of points to input
+and a ration to convert pixels to world distance unit (eg meters per pixel), 
+the images will be shown in turn and the user should click 
+in the same order the corresponding points in world and image spaces.''', formatter_class=argparse.RawDescriptionHelpFormatter,)
+
+parser.add_argument('-p', dest = 'pointCorrespondencesFilename', help = 'name of the text file containing the point correspondences')
+parser.add_argument('-i', dest = 'videoFrameFilename', help = 'filename of the video frame')
+parser.add_argument('-w', dest = 'worldFilename', help = 'filename of the aerial photo/ground map')
+parser.add_argument('-n', dest = 'nPoints', help = 'number of corresponding points to input', default = 4, type = int)
+parser.add_argument('-u', dest = 'unitsPerPixel', help = 'number of units per pixel', default = 1., type = float)
+parser.add_argument('--display', dest = 'displayPoints', help = 'display original and projected points on both images', action = 'store_true')
+
+args = parser.parse_args()
 
 # TODO process camera intrinsic and extrinsic parameters to obtain image to world homography, taking example from Work/src/python/generate-homography.py script
 # cameraMat = load(videoFilenamePrefix+'-camera.txt');
@@ -52,44 +68,22 @@
 
 # cvFindHomography(imagePoints, worldPoints, H);
 
-if '--help' in options.keys() or '-h' in options.keys() or len(options) == 0:
-    print('Usage: {0} --help|-h [-p point-correspondences.txt] [ -i video-frame] [ -w world-frame] [n number-points] [-u unit-per-pixel=1]'.format(sys.argv[0]))
-    print('''The input data can be provided either as point correspondences already saved
- in a text file or inputed by clicking a certain number of points (>=4)
- in a video frame and a world image.
-
-The point correspondence file contains at least 4 non-colinear point coordinates 
-with the following format:
- - the first two lines are the x and y coordinates in the projected space (usually world space)
- - the last two lines are the x and y coordinates in the origin space (usually image space)
-
-If providing video and world images, with a number of points to input
-and a ration to convert pixels to world distance unit (eg meters per pixel), 
-the images will be shown in turn and the user should click 
-in the same order the corresponding points in world and image spaces. ''')
-    sys.exit()
 
 homography = np.array([])
-if '-p' in options.keys():
-    worldPts, videoPts = cvutils.loadPointCorrespondences(options['-p'])
+if args.pointCorrespondencesFilename != None:
+    worldPts, videoPts = cvutils.loadPointCorrespondences(args.pointCorrespondencesFilename)
     homography, mask = cv2.findHomography(videoPts, worldPts) # method=0, ransacReprojThreshold=3
-elif '-i' in options.keys() and '-w' in options.keys():
-    nPoints = 4
-    if '-n' in options.keys():
-        nPoints = int(options['-n'])
-    unitsPerPixel = 1
-    if '-u' in options.keys():
-        unitsPerPixel = float(options['-u'])
-    worldImg = plt.imread(options['-w'])
-    videoImg = plt.imread(options['-i'])
-    print('Click on {0} points in the video frame'.format(nPoints))
+elif args.videoFrameFilename != None and args.worldFilename != None:
+    worldImg = plt.imread(args.worldFilename)
+    videoImg = plt.imread(args.videoFrameFilename)
+    print('Click on {0} points in the video frame'.format(args.nPoints))
     plt.figure()
     plt.imshow(videoImg)
-    videoPts = np.array(plt.ginput(nPoints, timeout=3000))
-    print('Click on {0} points in the world image'.format(nPoints))
+    videoPts = np.array(plt.ginput(args.nPoints, timeout=3000))
+    print('Click on {0} points in the world image'.format(args.nPoints))
     plt.figure()
     plt.imshow(worldImg)
-    worldPts = unitsPerPixel*np.array(plt.ginput(nPoints, timeout=3000))
+    worldPts = args.unitsPerPixel*np.array(plt.ginput(args.nPoints, timeout=3000))
     plt.close('all')
     homography, mask = cv2.findHomography(videoPts, worldPts)
     # save the points in file
@@ -101,23 +95,17 @@
 if homography.size>0:
     np.savetxt('homography.txt',homography)
 
-if '-i' in options.keys() and homography.size>0:
-    videoImg = cv2.imread(options['-i'])
-    worldImg = cv2.imread(options['-w'])
+if args.displayPoints and args.videoFrameFilename != None and args.worldFilename != None and homography.size>0:
+    worldImg = plt.imread(args.worldFilename)
+    videoImg = plt.imread(args.videoFrameFilename)
     invHomography = np.linalg.inv(homography)
     projectedWorldPts = cvutils.projectArray(invHomography, worldPts.T).T
-    if '-u' in options.keys():
-        unitsPerPixel = float(options['-u'])        
-        projectedVideoPts = cvutils.projectArray(invHomography, videoPts.T).T
+    projectedVideoPts = cvutils.projectArray(invHomography, videoPts.T).T
     for i in range(worldPts.shape[0]):
-        cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))),2,cvutils.cvRed)
-        cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))),2,cvutils.cvBlue)
-        if '-u' in options.keys():
-            cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/unitsPerPixel))),2,cvutils.cvRed)
-            cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/unitsPerPixel))),2,cvutils.cvRed)
-        #print('img: {0} / projected: {1}'.format(videoPts[i], p))
+        cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))),2,cvutils.cvRed)
+        cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/args.unitsPerPixel))),2,cvutils.cvRed)
+        # TODO print numbers and in image space
     cv2.imshow('video frame',videoImg)
-    if '-u' in options.keys():
-        cv2.imshow('world image',worldImg)
+    #cv2.imshow('world image',worldImg)
     cv2.waitKey()
     cv2.destroyAllWindows()