diff python/calibration-translation.py @ 160:b0719b3ad3db

created function to load point correspondences and updates scripts that use it
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Mon, 19 Sep 2011 16:43:28 -0400
parents 115f7f90286d
children 514f6b98cd8c
line wrap: on
line diff
--- a/python/calibration-translation.py	Mon Sep 12 16:38:47 2011 -0400
+++ b/python/calibration-translation.py	Mon Sep 19 16:43:28 2011 -0400
@@ -12,16 +12,12 @@
 import cvutils
 
 # development for the data collected and stabilized by Paul in Summer 2011
-# todo test other features
+# todo write help, add options to control the parameters for matching (n points and distance)
 
 options = utils.parseCLIOptions('Program to re-calibrate an initial calibration based on point correspondences by adjusting the points to slightly different viewpoints, where all the points are still visible\n\nUsage: ', ['ref_video=', 'ref_points='], sys.argv, ['mask_img='])
-#, 'ref_homography='
 
-referenceVideoFilename=options['--ref_video']#'1440-1459_Mercalli.avi'
-#referenceHomographyFilename=options['--ref_homography']#'1440-1459_Mercalli-homography.txt'
-points = np.loadtxt(options['--ref_points'], dtype=np.float32) # '1440-1459_Mercalli-point-correspondences.txt'
-wldPts = points[:2,:].T
-imgPts = points[2:,:].T
+referenceVideoFilename=options['--ref_video']
+wldPts, imgPts = cvutils.loadPointCorrespondences(options['--ref_points'])
 
 def translatePoints(points, t):
     'points is Nx2, t is [x,y]'
@@ -32,19 +28,18 @@
 
 filenames = [f for f in utils.listfiles('.','avi')] # directory to examine should be current directory
 
-#referenceHomography = np.loadtxt(referenceHomographyFilename)
 referenceVideoIndex = filenames.index(referenceVideoFilename)
 indices = set(range(len(filenames)))
 indices.discard(referenceVideoIndex)
 
 images = {}
-#features = {}
 captures = {}
 
 captures[referenceVideoFilename] = cv2.VideoCapture(referenceVideoFilename)
 (ret, img) = captures[referenceVideoFilename].read()
 images[referenceVideoFilename] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
 
+# load a mask image to compute the translation
 if '--mask_img' in options.keys():
     maskImg = cv2.imread('mask.png', cv2.CV_LOAD_IMAGE_GRAYSCALE) # todo add possibility to look in the whole image if not providing mask
 else:
@@ -57,7 +52,8 @@
     cv2.putText(displayRef, str(j+1), tuple(p), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0))
 cv2.imshow('Reference',displayRef)
 
-for f in filenames: # get suitable image references for each video
+# get suitable image references for each video
+for f in filenames: 
     captures[f] = cv2.VideoCapture(f)
     frameFilename = utils.removeExtension(f)+'-frame.png' # TODO if frame image already exists, no need to search for it again
     if not os.path.exists(frameFilename):
@@ -84,6 +80,7 @@
 
 plt.close('all')
 
+# validate or input point correspondences and compute homography
 for i in indices:
     t = cvutils.computeTranslation(images[filenames[referenceVideoIndex]], images[filenames[i]], referenceFeatures, 100, 10)
     print filenames[i],t
@@ -103,8 +100,6 @@
         print('No translation could be found automatically. You will have to manually input world reference points.')
 
     if t==None or key != cvutils.cvKeyNumbers['y']:# if no translation could computed or it is not satisfactory
-        # image should be right to get points
-        # todo save image
         print('Select the corresponding points in the same order as in the reference image')
         plt.figure(1)
         plt.imshow(displayRef)