view scripts/compute-homography.py @ 478:d337bffd7283

Display of points in compute homography and step option to replay videos A bug seems to remain with respect to trajectory bounds, to check
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Thu, 27 Mar 2014 11:40:28 -0400
parents 6551a3cf1750
children b0dac840c24f
line wrap: on
line source

#! /usr/bin/env python

import sys, argparse

import matplotlib.pyplot as plt
import numpy as np
import cv2

import cvutils
import utils

parser = argparse.ArgumentParser(description='The program computes the homography matrix from at least 4 non-colinear point correspondences inputed in the same order in a video frame and a aerial photo/ground map, or from the list of corresponding points in the two planes.', epilog = '''The point correspondence file contains at least 4 non-colinear point coordinates 
with the following format:
 - the first two lines are the x and y coordinates in the projected space (usually world space)
 - the last two lines are the x and y coordinates in the origin space (usually image space)

If providing video and world images, with a number of points to input
and a ration to convert pixels to world distance unit (eg meters per pixel), 
the images will be shown in turn and the user should click 
in the same order the corresponding points in world and image spaces.''', formatter_class=argparse.RawDescriptionHelpFormatter,)

parser.add_argument('-p', dest = 'pointCorrespondencesFilename', help = 'name of the text file containing the point correspondences')
parser.add_argument('-i', dest = 'videoFrameFilename', help = 'filename of the video frame')
parser.add_argument('-w', dest = 'worldFilename', help = 'filename of the aerial photo/ground map')
parser.add_argument('-n', dest = 'nPoints', help = 'number of corresponding points to input', default = 4, type = int)
parser.add_argument('-u', dest = 'unitsPerPixel', help = 'number of units per pixel', default = 1., type = float)
parser.add_argument('--display', dest = 'displayPoints', help = 'display original and projected points on both images', action = 'store_true')

args = parser.parse_args()

# TODO process camera intrinsic and extrinsic parameters to obtain image to world homography, taking example from Work/src/python/generate-homography.py script
# cameraMat = load(videoFilenamePrefix+'-camera.txt');
# T1 = cameraMat[3:6,:].copy();
# A = cameraMat[0:3,0:3].copy();

# # pay attention, rotation may be the transpose
# # R = T1[:,0:3].T;
# R = T1[:,0:3];
# rT = dot(R, T1[:,3]/1000);
# T = zeros((3,4),'f');
# T[:,0:3] = R[:];
# T[:,3] = rT;

# AT = dot(A,T);

# nPoints = 4;
# worldPoints = cvCreateMat(nPoints, 3, CV_64FC1);
# imagePoints = cvCreateMat(nPoints, 3, CV_64FC1);

# # extract homography from the camera calibration
# worldPoints = cvCreateMat(4, 3, CV_64FC1);
# imagePoints = cvCreateMat(4, 3, CV_64FC1);

# worldPoints[0,:] = [[1, 1, 0]];
# worldPoints[1,:] = [[1, 2, 0]];
# worldPoints[2,:] = [[2, 1, 0]];
# worldPoints[3,:] = [[2, 2, 0]];

# wPoints = [[1,1,2,2],
#            [1,2,1,2],
#            [0,0,0,0]];
# iPoints = utils.worldToImage(AT, wPoints);

# for i in range(nPoints):
#     imagePoints[i,:] = [iPoints[:,i].tolist()];

# H = cvCreateMat(3, 3, CV_64FC1);

# cvFindHomography(imagePoints, worldPoints, H);


homography = np.array([])
if args.pointCorrespondencesFilename != None:
    worldPts, videoPts = cvutils.loadPointCorrespondences(args.pointCorrespondencesFilename)
    homography, mask = cv2.findHomography(videoPts, worldPts) # method=0, ransacReprojThreshold=3
elif args.videoFrameFilename != None and args.worldFilename != None:
    worldImg = plt.imread(args.worldFilename)
    videoImg = plt.imread(args.videoFrameFilename)
    print('Click on {0} points in the video frame'.format(args.nPoints))
    plt.figure()
    plt.imshow(videoImg)
    videoPts = np.array(plt.ginput(args.nPoints, timeout=3000))
    print('Click on {0} points in the world image'.format(args.nPoints))
    plt.figure()
    plt.imshow(worldImg)
    worldPts = args.unitsPerPixel*np.array(plt.ginput(args.nPoints, timeout=3000))
    plt.close('all')
    homography, mask = cv2.findHomography(videoPts, worldPts)
    # save the points in file
    f = open('point-correspondences.txt', 'a')
    np.savetxt(f, worldPts.T)
    np.savetxt(f, videoPts.T)
    f.close()

if homography.size>0:
    np.savetxt('homography.txt',homography)

if args.displayPoints and args.videoFrameFilename != None and args.worldFilename != None and homography.size>0:
    worldImg = cv2.imread(args.worldFilename)
    videoImg = cv2.imread(args.videoFrameFilename)
    invHomography = np.linalg.inv(homography)
    projectedWorldPts = cvutils.projectArray(invHomography, worldPts.T).T
    projectedVideoPts = cvutils.projectArray(homography, videoPts.T).T
    for i in range(worldPts.shape[0]):
        # world image
        cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))),2,cvutils.cvBlue)
        cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/args.unitsPerPixel))),2,cvutils.cvRed)
        cv2.putText(worldImg, str(i+1), tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))+5), cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue, 2)
        # video image
        cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))),2,cvutils.cvBlue)
        cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))),2,cvutils.cvRed)
        cv2.putText(videoImg, str(i+1), tuple(np.int32(np.round(videoPts[i])+5)), cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue, 2)
    cv2.imshow('video frame',videoImg)
    cv2.imshow('world image',worldImg)
    cv2.waitKey()
    cv2.destroyAllWindows()