changeset 157:3aab19947a34

added utility to recalibrate images with similar viewpoints
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Thu, 08 Sep 2011 19:25:02 -0400
parents 2eef5620c0b3
children 2d7c6d767a39
files python/calibration-translation.py
diffstat 1 files changed, 116 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/python/calibration-translation.py	Thu Sep 08 19:25:02 2011 -0400
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+
+import sys
+
+import matplotlib.mlab as pylab
+import matplotlib.pyplot as plt
+import numpy as np
+
+import cv2
+import utils
+import cvutils
+
+# development for the data collected and stabilized by Paul in Summer 2011
+# todo test other features
+
+options = utils.parseCLIOptions('Program to re-calibrate an initial calibration based on point correspondences by adjusting the points to slightly different viewpoints, where all the points are still visible\n\nUsage: ', ['ref_video=', 'ref_homography=', 'ref_points='], sys.argv, ['mask_img='])
+
+referenceVideoFilename=options['--ref_video']#'1440-1459_Mercalli.avi'
+referenceHomographyFilename=options['--ref_homography']#'1440-1459_Mercalli-homography.txt'
+points = np.loadtxt(options['--ref_points'], dtype=np.float32) # '1440-1459_Mercalli-point-correspondences.txt'
+wldPts = points[:2,:].T
+imgPts = points[2:,:].T
+
+def translatePoints(points, t):
+    'points is Nx2, t is [x,y]'
+    translated = points.copy()
+    for i in xrange(2):
+        translated[i] += t[i]
+    return translated
+
+filenames = [f for f in utils.listfiles('.','avi')] # directory to examine should be current directory
+
+referenceHomography = np.loadtxt(referenceHomographyFilename)
+referenceVideoIndex = filenames.index(referenceVideoFilename)
+indices = set(range(len(filenames)))
+indices.discard(referenceVideoIndex)
+
+images = {}
+#features = {}
+captures = {}
+
+captures[referenceVideoFilename] = cv2.VideoCapture(referenceVideoFilename)
+(ret, img) = captures[referenceVideoFilename].read()
+images[referenceVideoFilename] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
+
+if '--mask_img' in options.keys():
+    maskImg = cv2.imread('mask.png', cv2.CV_LOAD_IMAGE_GRAYSCALE) # todo add possibility to look in the whole image if not providing mask
+else:
+    maskImg = np.ones(images[referenceVideoFilename].shape, dtype=np.uint8)
+
+referenceFeatures = cv2.goodFeaturesToTrack(images[referenceVideoFilename], 1000, 0.02, 2, useHarrisDetector = True, mask=maskImg)
+displayRef = cv2.cvtColor(images[referenceVideoFilename], cv2.COLOR_GRAY2RGB)
+for j,p in enumerate(imgPts):
+    cv2.circle(displayRef, tuple(p), 3, (255,0,0))
+    cv2.putText(displayRef, str(j+1), tuple(p), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0))
+cv2.imshow('Reference',displayRef)
+
+key = -1
+for f in filenames: # get suitable image references for each video
+    captures[f] = cv2.VideoCapture(f)
+    while key != cvutils.cvKeyNumbers['y']:
+        (ret, img) = captures[f].read()
+        cv2.imshow('Image',img)
+        print('Can one see the reference points in the image? (y/n)')
+        key = cv2.waitKey(0)
+
+    images[f] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
+    cv2.imwrite(utils.removeExtension(filenames[i])+'-frame.png')
+    #images[f] = cv2.imread(f, cv2.CV_LOAD_IMAGE_GRAYSCALE)
+    #features[f] = cv2.goodFeaturesToTrack(images[f], 1000, 0.02, 2, useHarrisDetector = True, mask=maskImg) # todo put parameters on the command line ? 
+    # goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]])
+    # display features
+    if False:
+        display = img.copy()#cv2.cvtColor(images[f], cv2.COLOR_GRAY2RGB) #.copy()
+        for p in features[f]:
+            cv2.circle(display, tuple(p[0]), 3, (255,0,0))
+        cv2.imshow('Reference',display)
+        cv2.waitKey()
+
+plt.close('all')
+
+for i in indices:
+    t = cvutils.computeTranslation(images[filenames[referenceVideoIndex]], images[filenames[i]], referenceFeatures, 100, 10)
+    print filenames[i],t
+    key = -1
+    if t != None: # show translated points and ask if ok
+        displayImg = cv2.cvtColor(images[filenames[i]], cv2.COLOR_GRAY2RGB) #.copy()
+        for p in imgPts:
+            cv2.circle(displayImg, tuple(p+t[0]), 3, (255,0,0))
+        cv2.imshow('Image',displayImg)
+
+        while key != cvutils.cvKeyNumbers['y'] and key != cvutils.cvKeyNumbers['n']:
+            print('Are the translated points rightly located (y/n)?')
+            key = cv2.waitKey(0)
+        if key == cvutils.cvKeyNumbers['y']: # compute homography with translated numbers
+            newImgPts = [p+t[0] for p in imgPts]
+    else:
+        print('No translation could be found automatically. You will have to manually input world reference points.')
+
+    if t==None or key != cvutils.cvKeyNumbers['y']:# if no translation could computed or it is not satisfactory
+        # image should be right to get points
+        # todo save image
+        print('Select the corresponding points in the same order as in the reference image')
+        plt.figure(1)
+        plt.imshow(displayRef)
+        plt.figure(2)
+        plt.imshow(img)
+        plt.show()
+        newImgPts = np.array([list(p) for p in plt.ginput(n=wldPts.shape[0], timeout=-1)], dtype = np.float32)
+
+    homography, mask = cv2.findHomography(newImgPts, wldPts) # method=0, ransacReprojThreshold=3
+    print homography
+    np.savetxt(utils.removeExtension(filenames[i])+'-homography.txt',homography)
+    np.savetxt(utils.removeExtension(filenames[i])+'-point-correspondences.txt', append(wldPts.T, newImgPts.T, axis=0))
+
+cv2.destroyAllWindows()