Mercurial Hosting > traffic-intelligence
view python/compute-homography.py @ 222:426321b46e44
temporary trajectory instants table
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Tue, 26 Jun 2012 02:08:01 -0400 |
parents | ba71924cadf5 |
children | 584613399513 |
line wrap: on
line source
#! /usr/bin/env python import sys,getopt import numpy as np import cv2 import cvutils import utils options, args = getopt.getopt(sys.argv[1:], 'h',['help','video_frame=']) options = dict(options) # TODO process camera intrinsic and extrinsic parameters to obtain image to world homography, taking example from Work/src/python/generate-homography.py script # cameraMat = load(videoFilenamePrefix+'-camera.txt'); # T1 = cameraMat[3:6,:].copy(); # A = cameraMat[0:3,0:3].copy(); # # pay attention, rotation may be the transpose # # R = T1[:,0:3].T; # R = T1[:,0:3]; # rT = dot(R, T1[:,3]/1000); # T = zeros((3,4),'f'); # T[:,0:3] = R[:]; # T[:,3] = rT; # AT = dot(A,T); # nPoints = 4; # worldPoints = cvCreateMat(nPoints, 3, CV_64FC1); # imagePoints = cvCreateMat(nPoints, 3, CV_64FC1); # # extract homography from the camera calibration # worldPoints = cvCreateMat(4, 3, CV_64FC1); # imagePoints = cvCreateMat(4, 3, CV_64FC1); # worldPoints[0,:] = [[1, 1, 0]]; # worldPoints[1,:] = [[1, 2, 0]]; # worldPoints[2,:] = [[2, 1, 0]]; # worldPoints[3,:] = [[2, 2, 0]]; # wPoints = [[1,1,2,2], # [1,2,1,2], # [0,0,0,0]]; # iPoints = utils.worldToImage(AT, wPoints); # for i in range(nPoints): # imagePoints[i,:] = [iPoints[:,i].tolist()]; # H = cvCreateMat(3, 3, CV_64FC1); # cvFindHomography(imagePoints, worldPoints, H); if '--help' in options.keys() or '-h' in options.keys(): print('''The argument should be the name of a file containing at least 4 non-colinear point coordinates: - the first two lines are the x and y coordinates in the projected space (usually world space) - the last two lines are the x and y coordinates in the origin space (usually image space)''') sys.exit() if len(args) == 0: print('Usage: {0} --help|-h [--video_frame <video frame filename>] [<point_correspondences.txt>]'.format(sys.argv[0])) sys.exit() dstPts, srcPts = cvutils.loadPointCorrespondences(args[0]) homography, mask = cv2.findHomography(srcPts, dstPts) # method=0, ransacReprojThreshold=3 np.savetxt(utils.removeExtension(sys.argv[1])+'-homography.txt',homography) if '--video_frame' in options.keys() and homography.size>0: img = cv2.imread(options['--video_frame']) for p in srcPts: cv2.circle(img,tuple(p),2,cvutils.cvRed) invHomography = np.linalg.inv(homography) projectedDstPts = cvutils.projectArray(invHomography, dstPts.T).T for i,p in enumerate(projectedDstPts): cv2.circle(img,tuple(np.int32(np.round(p))),2,cvutils.cvBlue) print('img: {0} / projected: {1}'.format(srcPts[i], p)) cv2.imshow('video frame',img) cv2.waitKey()