Mercurial Hosting > traffic-intelligence
view python/calibration-translation.py @ 265:7a3bf04cf016
added plotting of indicators
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Thu, 26 Jul 2012 19:46:36 -0400 |
parents | b0719b3ad3db |
children | 514f6b98cd8c |
line wrap: on
line source
#!/usr/bin/env python import sys import os import matplotlib.mlab as pylab import matplotlib.pyplot as plt import numpy as np import cv2 import utils import cvutils # development for the data collected and stabilized by Paul in Summer 2011 # todo write help, add options to control the parameters for matching (n points and distance) options = utils.parseCLIOptions('Program to re-calibrate an initial calibration based on point correspondences by adjusting the points to slightly different viewpoints, where all the points are still visible\n\nUsage: ', ['ref_video=', 'ref_points='], sys.argv, ['mask_img=']) referenceVideoFilename=options['--ref_video'] wldPts, imgPts = cvutils.loadPointCorrespondences(options['--ref_points']) def translatePoints(points, t): 'points is Nx2, t is [x,y]' translated = points.copy() for i in xrange(2): translated[i] += t[i] return translated filenames = [f for f in utils.listfiles('.','avi')] # directory to examine should be current directory referenceVideoIndex = filenames.index(referenceVideoFilename) indices = set(range(len(filenames))) indices.discard(referenceVideoIndex) images = {} captures = {} captures[referenceVideoFilename] = cv2.VideoCapture(referenceVideoFilename) (ret, img) = captures[referenceVideoFilename].read() images[referenceVideoFilename] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # load a mask image to compute the translation if '--mask_img' in options.keys(): maskImg = cv2.imread('mask.png', cv2.CV_LOAD_IMAGE_GRAYSCALE) # todo add possibility to look in the whole image if not providing mask else: maskImg = np.ones(images[referenceVideoFilename].shape, dtype=np.uint8) referenceFeatures = cv2.goodFeaturesToTrack(images[referenceVideoFilename], 1000, 0.02, 2, useHarrisDetector = True, mask=maskImg) displayRef = cv2.cvtColor(images[referenceVideoFilename], cv2.COLOR_GRAY2RGB) for j,p in enumerate(imgPts): cv2.circle(displayRef, tuple(p), 3, (255,0,0)) cv2.putText(displayRef, str(j+1), tuple(p), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0)) cv2.imshow('Reference',displayRef) # get suitable image references for each video for f in filenames: captures[f] = cv2.VideoCapture(f) frameFilename = utils.removeExtension(f)+'-frame.png' # TODO if frame image already exists, no need to search for it again if not os.path.exists(frameFilename): key = -1 while key != cvutils.cvKeyNumbers['y']: (ret, img) = captures[f].read() cv2.imshow('Image',img) print('Can one see the reference points in the image? (y/n)') key = cv2.waitKey(0) images[f] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) cv2.imwrite(frameFilename, img) else: images[f] = cv2.imread(frameFilename, cv2.CV_LOAD_IMAGE_GRAYSCALE) #features[f] = cv2.goodFeaturesToTrack(images[f], 1000, 0.02, 2, useHarrisDetector = True, mask=maskImg) # todo put parameters on the command line ? # goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]]) # display features # if False: # display = img.copy()#cv2.cvtColor(images[f], cv2.COLOR_GRAY2RGB) #.copy() # for p in features[f]: # cv2.circle(display, tuple(p[0]), 3, (255,0,0)) # cv2.imshow('Reference',display) # cv2.waitKey() plt.close('all') # validate or input point correspondences and compute homography for i in indices: t = cvutils.computeTranslation(images[filenames[referenceVideoIndex]], images[filenames[i]], referenceFeatures, 100, 10) print filenames[i],t key = -1 if t != None: # show translated points and ask if ok displayImg = cv2.cvtColor(images[filenames[i]], cv2.COLOR_GRAY2RGB) #.copy() for p in imgPts: cv2.circle(displayImg, tuple(p+t[0]), 3, (255,0,0)) cv2.imshow('Image',displayImg) while not(key == cvutils.cvKeyNumbers['y'] or key == cvutils.cvKeyNumbers['n']): print('Are the translated points rightly located (y/n)?') key = cv2.waitKey(0) if key == cvutils.cvKeyNumbers['y']: # compute homography with translated numbers newImgPts = np.array([p+t[0] for p in imgPts]) else: print('No translation could be found automatically. You will have to manually input world reference points.') if t==None or key != cvutils.cvKeyNumbers['y']:# if no translation could computed or it is not satisfactory print('Select the corresponding points in the same order as in the reference image') plt.figure(1) plt.imshow(displayRef) plt.figure(2) plt.imshow(img) plt.show() newImgPts = np.array([list(p) for p in plt.ginput(n=wldPts.shape[0], timeout=-1)], dtype = np.float32) homography, mask = cv2.findHomography(newImgPts, wldPts) # method=0, ransacReprojThreshold=3 print homography np.savetxt(utils.removeExtension(filenames[i])+'-homography.txt',homography) np.savetxt(utils.removeExtension(filenames[i])+'-point-correspondences.txt', np.append(wldPts.T, newImgPts.T, axis=0)) cv2.destroyAllWindows()