comparison python/calibration-translation.py @ 159:115f7f90286d

updated calibration-translation and added function to convert point correspondences
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Mon, 12 Sep 2011 16:38:47 -0400
parents 2d7c6d767a39
children b0719b3ad3db
comparison
equal deleted inserted replaced
158:2d7c6d767a39 159:115f7f90286d
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 2
3 import sys 3 import sys
4 import os
4 5
5 import matplotlib.mlab as pylab 6 import matplotlib.mlab as pylab
6 import matplotlib.pyplot as plt 7 import matplotlib.pyplot as plt
7 import numpy as np 8 import numpy as np
8 9
31 32
32 filenames = [f for f in utils.listfiles('.','avi')] # directory to examine should be current directory 33 filenames = [f for f in utils.listfiles('.','avi')] # directory to examine should be current directory
33 34
34 #referenceHomography = np.loadtxt(referenceHomographyFilename) 35 #referenceHomography = np.loadtxt(referenceHomographyFilename)
35 referenceVideoIndex = filenames.index(referenceVideoFilename) 36 referenceVideoIndex = filenames.index(referenceVideoFilename)
36 indices = set([1, 2, 3, 4, 5, 6, 7, 9, 10, 11])#set(range(len(filenames))) 37 indices = set(range(len(filenames)))
37 #indices.discard(referenceVideoIndex) 38 indices.discard(referenceVideoIndex)
38 39
39 images = {} 40 images = {}
40 #features = {} 41 #features = {}
41 captures = {} 42 captures = {}
42 43
56 cv2.putText(displayRef, str(j+1), tuple(p), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0)) 57 cv2.putText(displayRef, str(j+1), tuple(p), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0))
57 cv2.imshow('Reference',displayRef) 58 cv2.imshow('Reference',displayRef)
58 59
59 for f in filenames: # get suitable image references for each video 60 for f in filenames: # get suitable image references for each video
60 captures[f] = cv2.VideoCapture(f) 61 captures[f] = cv2.VideoCapture(f)
61 # TODO if frame image already exists, no need to search for it again 62 frameFilename = utils.removeExtension(f)+'-frame.png' # TODO if frame image already exists, no need to search for it again
62 key = -1 63 if not os.path.exists(frameFilename):
63 while key != cvutils.cvKeyNumbers['y']: 64 key = -1
64 (ret, img) = captures[f].read() 65 while key != cvutils.cvKeyNumbers['y']:
65 cv2.imshow('Image',img) 66 (ret, img) = captures[f].read()
66 print('Can one see the reference points in the image? (y/n)') 67 cv2.imshow('Image',img)
67 key = cv2.waitKey(0) 68 print('Can one see the reference points in the image? (y/n)')
69 key = cv2.waitKey(0)
68 70
69 images[f] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) 71 images[f] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
70 cv2.imwrite(utils.removeExtension(f)+'-frame.png', img) 72 cv2.imwrite(frameFilename, img)
71 #images[f] = cv2.imread(f, cv2.CV_LOAD_IMAGE_GRAYSCALE) 73 else:
74 images[f] = cv2.imread(frameFilename, cv2.CV_LOAD_IMAGE_GRAYSCALE)
72 #features[f] = cv2.goodFeaturesToTrack(images[f], 1000, 0.02, 2, useHarrisDetector = True, mask=maskImg) # todo put parameters on the command line ? 75 #features[f] = cv2.goodFeaturesToTrack(images[f], 1000, 0.02, 2, useHarrisDetector = True, mask=maskImg) # todo put parameters on the command line ?
73 # goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]]) 76 # goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]])
74 # display features 77 # display features
75 if False: 78 # if False:
76 display = img.copy()#cv2.cvtColor(images[f], cv2.COLOR_GRAY2RGB) #.copy() 79 # display = img.copy()#cv2.cvtColor(images[f], cv2.COLOR_GRAY2RGB) #.copy()
77 for p in features[f]: 80 # for p in features[f]:
78 cv2.circle(display, tuple(p[0]), 3, (255,0,0)) 81 # cv2.circle(display, tuple(p[0]), 3, (255,0,0))
79 cv2.imshow('Reference',display) 82 # cv2.imshow('Reference',display)
80 cv2.waitKey() 83 # cv2.waitKey()
81 84
82 plt.close('all') 85 plt.close('all')
83 86
84 for i in indices: 87 for i in indices:
85 t = cvutils.computeTranslation(images[filenames[referenceVideoIndex]], images[filenames[i]], referenceFeatures, 100, 10) 88 t = cvutils.computeTranslation(images[filenames[referenceVideoIndex]], images[filenames[i]], referenceFeatures, 100, 10)