changeset 238:be3761a09b20

added functions to input point correspondences
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Mon, 09 Jul 2012 00:46:04 -0400
parents 6774bdce03f1
children 93c26e45efd8
files python/compute-homography.py
diffstat 1 files changed, 61 insertions(+), 18 deletions(-) [+]
line wrap: on
line diff
--- a/python/compute-homography.py	Fri Jul 06 01:08:38 2012 -0400
+++ b/python/compute-homography.py	Mon Jul 09 00:46:04 2012 -0400
@@ -2,13 +2,14 @@
 
 import sys,getopt
 
+import matplotlib.pyplot as plt
 import numpy as np
 import cv2
 
 import cvutils
 import utils
 
-options, args = getopt.getopt(sys.argv[1:], 'h',['help','video_frame='])
+options, args = getopt.getopt(sys.argv[1:], 'hp:i:w:n:u:',['help'])
 options = dict(options)
 
 # TODO process camera intrinsic and extrinsic parameters to obtain image to world homography, taking example from Work/src/python/generate-homography.py script
@@ -51,29 +52,71 @@
 
 # cvFindHomography(imagePoints, worldPoints, H);
 
+if '--help' in options.keys() or '-h' in options.keys() or len(options) == 0:
+    print('Usage: {0} --help|-h [-p point-correspondences.txt] [ -i video-frame] [ -w world-frame] [n number-points] [-u unit-per-pixel=1]'.format(sys.argv[0]))
+    print('''The input data can be provided either as point correspondences already saved
+ in a text file or inputed by clicking a certain number of points (>=4)
+ in a video frame and a world image.
 
-if '--help' in options.keys() or '-h' in options.keys() or len(args) == 0:
-    print('Usage: {0} --help|-h [--video_frame <video frame filename>] [<point_correspondences.txt>]'.format(sys.argv[0]))
-    print('''The positional argument should be the name
- of a file containing at least 4 non-colinear point coordinates (point correspondences:
+The point correspondence file contains at least 4 non-colinear point coordinates 
+with the following format:
  - the first two lines are the x and y coordinates in the projected space (usually world space)
  - the last two lines are the x and y coordinates in the origin space (usually image space)
 
-if providing a video frame, the image points and back projected world points will be plotted''')
+If providing video and world images, with a number of points to input
+and a ration to convert pixels to world distance unit (eg meters per pixel), 
+the images will be shown in turn and the user should click 
+in the same order the corresponding points in world and image spaces. ''')
     sys.exit()
 
-dstPts, srcPts = cvutils.loadPointCorrespondences(args[0])
-homography, mask = cv2.findHomography(srcPts, dstPts) # method=0, ransacReprojThreshold=3
-np.savetxt(utils.removeExtension(sys.argv[1])+'-homography.txt',homography)
+homography = np.array([])
+if '-p' in options.keys():
+    worldPts, videoPts = cvutils.loadPointCorrespondences(args[0])
+    homography, mask = cv2.findHomography(videoPts, worldPts) # method=0, ransacReprojThreshold=3
+elif '-i' in options.keys() and '-w' in options.keys():
+    nPoints = 4
+    if '-n' in options.keys():
+        nPoints = int(options['-n'])
+    unitsPerPixel = 1
+    if '-u' in options.keys():
+        unitsPerPixel = float(options['-u'])
+    worldImg = plt.imread(options['-w'])
+    videoImg = plt.imread(options['-i'])
+    print('Click on {0} points in the video frame'.format(nPoints))
+    plt.figure()
+    plt.imshow(videoImg)
+    videoPts = np.array(plt.ginput(nPoints))
+    print('Click on {0} points in the world image'.format(nPoints))
+    plt.figure()
+    plt.imshow(worldImg)
+    worldPts = unitsPerPixel*np.array(plt.ginput(nPoints))
+    plt.close('all')
+    homography, mask = cv2.findHomography(videoPts, worldPts)
+    # save the points in file
+    f = open('point-correspondences.txt', 'a')
+    np.savetxt(f, worldPts.T)
+    np.savetxt(f, videoPts.T)
+    f.close()
 
-if '--video_frame' in options.keys() and homography.size>0:
-    img = cv2.imread(options['--video_frame'])
-    for p in srcPts:
-        cv2.circle(img,tuple(p),2,cvutils.cvRed)
+if homography.size>0:
+    np.savetxt('homography.txt',homography)
+
+if '-i' in options.keys() and homography.size>0:
+    videoImg = cv2.imread(options['-i'])
+    worldImg = cv2.imread(options['-w'])
     invHomography = np.linalg.inv(homography)
-    projectedDstPts = cvutils.projectArray(invHomography, dstPts.T).T
-    for i,p in enumerate(projectedDstPts):
-        cv2.circle(img,tuple(np.int32(np.round(p))),2,cvutils.cvBlue)
-        print('img: {0} / projected: {1}'.format(srcPts[i], p))
-    cv2.imshow('video frame',img)
+    projectedWorldPts = cvutils.projectArray(invHomography, worldPts.T).T
+    if '-u' in options.keys():
+        unitsPerPixel = float(options['-u'])        
+        projectedVideoPts = cvutils.projectArray(invHomography, videoPts.T).T
+    for i in range(worldPts.shape[0]):
+        cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))),2,cvutils.cvRed)
+        cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))),2,cvutils.cvBlue)
+        if '-u' in options.keys():
+            cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/unitsPerPixel))),2,cvutils.cvRed)
+            cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/unitsPerPixel))),2,cvutils.cvRed)
+        #print('img: {0} / projected: {1}'.format(videoPts[i], p))
+    cv2.imshow('video frame',videoImg)
+    if '-u' in options.keys():
+        cv2.imshow('world image',worldImg)
     cv2.waitKey()