Mercurial Hosting > traffic-intelligence
comparison python/calibration-translation.py @ 158:2d7c6d767a39
corrected and improved calibration-translation.py
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Fri, 09 Sep 2011 19:23:11 -0400 |
parents | 3aab19947a34 |
children | 115f7f90286d |
comparison
equal
deleted
inserted
replaced
157:3aab19947a34 | 158:2d7c6d767a39 |
---|---|
11 import cvutils | 11 import cvutils |
12 | 12 |
13 # development for the data collected and stabilized by Paul in Summer 2011 | 13 # development for the data collected and stabilized by Paul in Summer 2011 |
14 # todo test other features | 14 # todo test other features |
15 | 15 |
16 options = utils.parseCLIOptions('Program to re-calibrate an initial calibration based on point correspondences by adjusting the points to slightly different viewpoints, where all the points are still visible\n\nUsage: ', ['ref_video=', 'ref_homography=', 'ref_points='], sys.argv, ['mask_img=']) | 16 options = utils.parseCLIOptions('Program to re-calibrate an initial calibration based on point correspondences by adjusting the points to slightly different viewpoints, where all the points are still visible\n\nUsage: ', ['ref_video=', 'ref_points='], sys.argv, ['mask_img=']) |
17 #, 'ref_homography=' | |
17 | 18 |
18 referenceVideoFilename=options['--ref_video']#'1440-1459_Mercalli.avi' | 19 referenceVideoFilename=options['--ref_video']#'1440-1459_Mercalli.avi' |
19 referenceHomographyFilename=options['--ref_homography']#'1440-1459_Mercalli-homography.txt' | 20 #referenceHomographyFilename=options['--ref_homography']#'1440-1459_Mercalli-homography.txt' |
20 points = np.loadtxt(options['--ref_points'], dtype=np.float32) # '1440-1459_Mercalli-point-correspondences.txt' | 21 points = np.loadtxt(options['--ref_points'], dtype=np.float32) # '1440-1459_Mercalli-point-correspondences.txt' |
21 wldPts = points[:2,:].T | 22 wldPts = points[:2,:].T |
22 imgPts = points[2:,:].T | 23 imgPts = points[2:,:].T |
23 | 24 |
24 def translatePoints(points, t): | 25 def translatePoints(points, t): |
28 translated[i] += t[i] | 29 translated[i] += t[i] |
29 return translated | 30 return translated |
30 | 31 |
31 filenames = [f for f in utils.listfiles('.','avi')] # directory to examine should be current directory | 32 filenames = [f for f in utils.listfiles('.','avi')] # directory to examine should be current directory |
32 | 33 |
33 referenceHomography = np.loadtxt(referenceHomographyFilename) | 34 #referenceHomography = np.loadtxt(referenceHomographyFilename) |
34 referenceVideoIndex = filenames.index(referenceVideoFilename) | 35 referenceVideoIndex = filenames.index(referenceVideoFilename) |
35 indices = set(range(len(filenames))) | 36 indices = set([1, 2, 3, 4, 5, 6, 7, 9, 10, 11])#set(range(len(filenames))) |
36 indices.discard(referenceVideoIndex) | 37 #indices.discard(referenceVideoIndex) |
37 | 38 |
38 images = {} | 39 images = {} |
39 #features = {} | 40 #features = {} |
40 captures = {} | 41 captures = {} |
41 | 42 |
53 for j,p in enumerate(imgPts): | 54 for j,p in enumerate(imgPts): |
54 cv2.circle(displayRef, tuple(p), 3, (255,0,0)) | 55 cv2.circle(displayRef, tuple(p), 3, (255,0,0)) |
55 cv2.putText(displayRef, str(j+1), tuple(p), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0)) | 56 cv2.putText(displayRef, str(j+1), tuple(p), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0)) |
56 cv2.imshow('Reference',displayRef) | 57 cv2.imshow('Reference',displayRef) |
57 | 58 |
58 key = -1 | |
59 for f in filenames: # get suitable image references for each video | 59 for f in filenames: # get suitable image references for each video |
60 captures[f] = cv2.VideoCapture(f) | 60 captures[f] = cv2.VideoCapture(f) |
61 # TODO if frame image already exists, no need to search for it again | |
62 key = -1 | |
61 while key != cvutils.cvKeyNumbers['y']: | 63 while key != cvutils.cvKeyNumbers['y']: |
62 (ret, img) = captures[f].read() | 64 (ret, img) = captures[f].read() |
63 cv2.imshow('Image',img) | 65 cv2.imshow('Image',img) |
64 print('Can one see the reference points in the image? (y/n)') | 66 print('Can one see the reference points in the image? (y/n)') |
65 key = cv2.waitKey(0) | 67 key = cv2.waitKey(0) |
66 | 68 |
67 images[f] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) | 69 images[f] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) |
68 cv2.imwrite(utils.removeExtension(filenames[i])+'-frame.png') | 70 cv2.imwrite(utils.removeExtension(f)+'-frame.png', img) |
69 #images[f] = cv2.imread(f, cv2.CV_LOAD_IMAGE_GRAYSCALE) | 71 #images[f] = cv2.imread(f, cv2.CV_LOAD_IMAGE_GRAYSCALE) |
70 #features[f] = cv2.goodFeaturesToTrack(images[f], 1000, 0.02, 2, useHarrisDetector = True, mask=maskImg) # todo put parameters on the command line ? | 72 #features[f] = cv2.goodFeaturesToTrack(images[f], 1000, 0.02, 2, useHarrisDetector = True, mask=maskImg) # todo put parameters on the command line ? |
71 # goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]]) | 73 # goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]]) |
72 # display features | 74 # display features |
73 if False: | 75 if False: |
87 displayImg = cv2.cvtColor(images[filenames[i]], cv2.COLOR_GRAY2RGB) #.copy() | 89 displayImg = cv2.cvtColor(images[filenames[i]], cv2.COLOR_GRAY2RGB) #.copy() |
88 for p in imgPts: | 90 for p in imgPts: |
89 cv2.circle(displayImg, tuple(p+t[0]), 3, (255,0,0)) | 91 cv2.circle(displayImg, tuple(p+t[0]), 3, (255,0,0)) |
90 cv2.imshow('Image',displayImg) | 92 cv2.imshow('Image',displayImg) |
91 | 93 |
92 while key != cvutils.cvKeyNumbers['y'] and key != cvutils.cvKeyNumbers['n']: | 94 while not(key == cvutils.cvKeyNumbers['y'] or key == cvutils.cvKeyNumbers['n']): |
93 print('Are the translated points rightly located (y/n)?') | 95 print('Are the translated points rightly located (y/n)?') |
94 key = cv2.waitKey(0) | 96 key = cv2.waitKey(0) |
95 if key == cvutils.cvKeyNumbers['y']: # compute homography with translated numbers | 97 if key == cvutils.cvKeyNumbers['y']: # compute homography with translated numbers |
96 newImgPts = [p+t[0] for p in imgPts] | 98 newImgPts = np.array([p+t[0] for p in imgPts]) |
97 else: | 99 else: |
98 print('No translation could be found automatically. You will have to manually input world reference points.') | 100 print('No translation could be found automatically. You will have to manually input world reference points.') |
99 | 101 |
100 if t==None or key != cvutils.cvKeyNumbers['y']:# if no translation could computed or it is not satisfactory | 102 if t==None or key != cvutils.cvKeyNumbers['y']:# if no translation could computed or it is not satisfactory |
101 # image should be right to get points | 103 # image should be right to get points |
109 newImgPts = np.array([list(p) for p in plt.ginput(n=wldPts.shape[0], timeout=-1)], dtype = np.float32) | 111 newImgPts = np.array([list(p) for p in plt.ginput(n=wldPts.shape[0], timeout=-1)], dtype = np.float32) |
110 | 112 |
111 homography, mask = cv2.findHomography(newImgPts, wldPts) # method=0, ransacReprojThreshold=3 | 113 homography, mask = cv2.findHomography(newImgPts, wldPts) # method=0, ransacReprojThreshold=3 |
112 print homography | 114 print homography |
113 np.savetxt(utils.removeExtension(filenames[i])+'-homography.txt',homography) | 115 np.savetxt(utils.removeExtension(filenames[i])+'-homography.txt',homography) |
114 np.savetxt(utils.removeExtension(filenames[i])+'-point-correspondences.txt', append(wldPts.T, newImgPts.T, axis=0)) | 116 np.savetxt(utils.removeExtension(filenames[i])+'-point-correspondences.txt', np.append(wldPts.T, newImgPts.T, axis=0)) |
115 | 117 |
116 cv2.destroyAllWindows() | 118 cv2.destroyAllWindows() |