view scripts/compute-homography.py @ 334:1d90e9080cb2

moved python scripts to the scripts directory
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Fri, 14 Jun 2013 10:34:11 -0400
parents python/compute-homography.py@9d88a4d97473
children 5f75d6c23ed5
line wrap: on
line source

#! /usr/bin/env python

import sys,getopt

import matplotlib.pyplot as plt
import numpy as np
import cv2

import cvutils
import utils

options, args = getopt.getopt(sys.argv[1:], 'hp:i:w:n:u:',['help'])
options = dict(options)

# TODO process camera intrinsic and extrinsic parameters to obtain image to world homography, taking example from Work/src/python/generate-homography.py script
# cameraMat = load(videoFilenamePrefix+'-camera.txt');
# T1 = cameraMat[3:6,:].copy();
# A = cameraMat[0:3,0:3].copy();

# # pay attention, rotation may be the transpose
# # R = T1[:,0:3].T;
# R = T1[:,0:3];
# rT = dot(R, T1[:,3]/1000);
# T = zeros((3,4),'f');
# T[:,0:3] = R[:];
# T[:,3] = rT;

# AT = dot(A,T);

# nPoints = 4;
# worldPoints = cvCreateMat(nPoints, 3, CV_64FC1);
# imagePoints = cvCreateMat(nPoints, 3, CV_64FC1);

# # extract homography from the camera calibration
# worldPoints = cvCreateMat(4, 3, CV_64FC1);
# imagePoints = cvCreateMat(4, 3, CV_64FC1);

# worldPoints[0,:] = [[1, 1, 0]];
# worldPoints[1,:] = [[1, 2, 0]];
# worldPoints[2,:] = [[2, 1, 0]];
# worldPoints[3,:] = [[2, 2, 0]];

# wPoints = [[1,1,2,2],
#            [1,2,1,2],
#            [0,0,0,0]];
# iPoints = utils.worldToImage(AT, wPoints);

# for i in range(nPoints):
#     imagePoints[i,:] = [iPoints[:,i].tolist()];

# H = cvCreateMat(3, 3, CV_64FC1);

# cvFindHomography(imagePoints, worldPoints, H);

if '--help' in options.keys() or '-h' in options.keys() or len(options) == 0:
    print('Usage: {0} --help|-h [-p point-correspondences.txt] [ -i video-frame] [ -w world-frame] [n number-points] [-u unit-per-pixel=1]'.format(sys.argv[0]))
    print('''The input data can be provided either as point correspondences already saved
 in a text file or inputed by clicking a certain number of points (>=4)
 in a video frame and a world image.

The point correspondence file contains at least 4 non-colinear point coordinates 
with the following format:
 - the first two lines are the x and y coordinates in the projected space (usually world space)
 - the last two lines are the x and y coordinates in the origin space (usually image space)

If providing video and world images, with a number of points to input
and a ration to convert pixels to world distance unit (eg meters per pixel), 
the images will be shown in turn and the user should click 
in the same order the corresponding points in world and image spaces. ''')
    sys.exit()

homography = np.array([])
if '-p' in options.keys():
    worldPts, videoPts = cvutils.loadPointCorrespondences(options['-p'])
    homography, mask = cv2.findHomography(videoPts, worldPts) # method=0, ransacReprojThreshold=3
elif '-i' in options.keys() and '-w' in options.keys():
    nPoints = 4
    if '-n' in options.keys():
        nPoints = int(options['-n'])
    unitsPerPixel = 1
    if '-u' in options.keys():
        unitsPerPixel = float(options['-u'])
    worldImg = plt.imread(options['-w'])
    videoImg = plt.imread(options['-i'])
    print('Click on {0} points in the video frame'.format(nPoints))
    plt.figure()
    plt.imshow(videoImg)
    videoPts = np.array(plt.ginput(nPoints))
    print('Click on {0} points in the world image'.format(nPoints))
    plt.figure()
    plt.imshow(worldImg)
    worldPts = unitsPerPixel*np.array(plt.ginput(nPoints))
    plt.close('all')
    homography, mask = cv2.findHomography(videoPts, worldPts)
    # save the points in file
    f = open('point-correspondences.txt', 'a')
    np.savetxt(f, worldPts.T)
    np.savetxt(f, videoPts.T)
    f.close()

if homography.size>0:
    np.savetxt('homography.txt',homography)

if '-i' in options.keys() and homography.size>0:
    videoImg = cv2.imread(options['-i'])
    worldImg = cv2.imread(options['-w'])
    invHomography = np.linalg.inv(homography)
    projectedWorldPts = cvutils.projectArray(invHomography, worldPts.T).T
    if '-u' in options.keys():
        unitsPerPixel = float(options['-u'])        
        projectedVideoPts = cvutils.projectArray(invHomography, videoPts.T).T
    for i in range(worldPts.shape[0]):
        cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))),2,cvutils.cvRed)
        cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))),2,cvutils.cvBlue)
        if '-u' in options.keys():
            cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/unitsPerPixel))),2,cvutils.cvRed)
            cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/unitsPerPixel))),2,cvutils.cvRed)
        #print('img: {0} / projected: {1}'.format(videoPts[i], p))
    cv2.imshow('video frame',videoImg)
    if '-u' in options.keys():
        cv2.imshow('world image',worldImg)
    cv2.waitKey()