comparison python/cvutils.py @ 614:5e09583275a4

Merged Nicolas/trafficintelligence into default
author Mohamed Gomaa <eng.m.gom3a@gmail.com>
date Fri, 05 Dec 2014 12:13:53 -0500
parents b5525249eda1
children 9202628a4130
comparison
equal deleted inserted replaced
598:11f96bd08552 614:5e09583275a4
1 #! /usr/bin/env python 1 #! /usr/bin/env python
2 '''Image/Video utilities''' 2 '''Image/Video utilities'''
3 3
4 import Image, ImageDraw # PIL
5 try: 4 try:
6 import cv2 5 import cv2
7 opencvExists = True 6 opencvAvailable = True
8 except ImportError: 7 except ImportError:
9 print('OpenCV library could not be loaded') 8 print('OpenCV library could not be loaded (video replay functions will not be available)') # TODO change to logging module
10 opencvExists = False 9 opencvAvailable = False
10 try:
11 import skimage
12 skimageAvailable = True
13 except ImportError:
14 print('Scikit-image library could not be loaded (HoG-based classification methods will not be available)')
15 skimageAvailable = False
16
11 from sys import stdout 17 from sys import stdout
12 18
13 import utils 19 import utils
14 20
15 #import aggdraw # agg on top of PIL (antialiased drawing) 21 #import aggdraw # agg on top of PIL (antialiased drawing)
28 return chr(key&255)== 'q' or chr(key&255) == 'Q' 34 return chr(key&255)== 'q' or chr(key&255) == 'Q'
29 35
30 def saveKey(key): 36 def saveKey(key):
31 return chr(key&255) == 's' 37 return chr(key&255) == 's'
32 38
33 def drawLines(filename, origins, destinations, w = 1, resultFilename='image.png'): 39 def plotLines(filename, origins, destinations, w = 1, resultFilename='image.png'):
34 '''Draws lines over the image ''' 40 '''Draws lines over the image '''
41 import Image, ImageDraw # PIL
35 42
36 img = Image.open(filename) 43 img = Image.open(filename)
37 44
38 draw = ImageDraw.Draw(img) 45 draw = ImageDraw.Draw(img)
39 #draw = aggdraw.Draw(img) 46 #draw = aggdraw.Draw(img)
40 #pen = aggdraw.Pen("red", width) 47 #pen = aggdraw.Pen("red", width)
41 for p1, p2 in zip(origins, destinations): 48 for p1, p2 in zip(origins, destinations):
42 draw.line([p1.x, p1.y, p2.x, p2.y], width = w, fill = (256,0,0)) 49 draw.line([p1.x, p1.y, p2.x, p2.y], width = w, fill = (256,0,0))
43 #draw.line([p1.x, p1.y, p2.x, p2.y], pen) 50 #draw.line([p1.x, p1.y, p2.x, p2.y], pen)
44 del draw 51 del draw
45 52
46 #out = utils.openCheck(resultFilename) 53 #out = utils.openCheck(resultFilename)
47 img.save(resultFilename) 54 img.save(resultFilename)
48 55
49 def matlab2PointCorrespondences(filename): 56 def matlab2PointCorrespondences(filename):
61 points = loadtxt(filename, dtype=float32) 68 points = loadtxt(filename, dtype=float32)
62 return (points[:2,:].T, points[2:,:].T) # (world points, image points) 69 return (points[:2,:].T, points[2:,:].T) # (world points, image points)
63 70
64 def cvMatToArray(cvmat): 71 def cvMatToArray(cvmat):
65 '''Converts an OpenCV CvMat to numpy array.''' 72 '''Converts an OpenCV CvMat to numpy array.'''
73 print('Deprecated, use new interface')
66 from numpy.core.multiarray import zeros 74 from numpy.core.multiarray import zeros
67 a = zeros((cvmat.rows, cvmat.cols))#array([[0.0]*cvmat.width]*cvmat.height) 75 a = zeros((cvmat.rows, cvmat.cols))#array([[0.0]*cvmat.width]*cvmat.height)
68 for i in xrange(cvmat.rows): 76 for i in xrange(cvmat.rows):
69 for j in xrange(cvmat.cols): 77 for j in xrange(cvmat.cols):
70 a[i,j] = cvmat[i,j] 78 a[i,j] = cvmat[i,j]
71 return a 79 return a
72 80
73 if opencvExists: 81 if opencvAvailable:
74 def computeHomography(srcPoints, dstPoints, method=0, ransacReprojThreshold=0.0): 82 def computeHomography(srcPoints, dstPoints, method=0, ransacReprojThreshold=3.0):
75 '''Returns the homography matrix mapping from srcPoints to dstPoints (dimension Nx2)''' 83 '''Returns the homography matrix mapping from srcPoints to dstPoints (dimension Nx2)'''
76 H, mask = cv2.findHomography(srcPoints, dstPoints, method, ransacReprojThreshold) 84 H, mask = cv2.findHomography(srcPoints, dstPoints, method, ransacReprojThreshold)
77 return H 85 return H
78 86
79 def arrayToCvMat(a, t = cv2.cv.CV_64FC1): 87 def arrayToCvMat(a, t = cv2.CV_64FC1):
80 '''Converts a numpy array to an OpenCV CvMat, with default type CV_64FC1.''' 88 '''Converts a numpy array to an OpenCV CvMat, with default type CV_64FC1.'''
89 print('Deprecated, use new interface')
81 cvmat = cv2.cv.CreateMat(a.shape[0], a.shape[1], t) 90 cvmat = cv2.cv.CreateMat(a.shape[0], a.shape[1], t)
82 for i in range(cvmat.rows): 91 for i in range(cvmat.rows):
83 for j in range(cvmat.cols): 92 for j in range(cvmat.cols):
84 cvmat[i,j] = a[i,j] 93 cvmat[i,j] = a[i,j]
85 return cvmat 94 return cvmat
86 95
87 def draw(img, positions, color, lastCoordinate = None): 96 def cvPlot(img, positions, color, lastCoordinate = None):
88 last = lastCoordinate+1 97 last = lastCoordinate+1
89 if lastCoordinate != None and lastCoordinate >=0: 98 if lastCoordinate != None and lastCoordinate >=0:
90 last = min(positions.length()-1, lastCoordinate) 99 last = min(positions.length()-1, lastCoordinate)
91 for i in range(0, last-1): 100 for i in range(0, last-1):
92 cv2.line(img, positions[i].asint().astuple(), positions[i+1].asint().astuple(), color) 101 cv2.line(img, positions[i].asint().astuple(), positions[i+1].asint().astuple(), color)
93 102
94 def playVideo(filename, firstFrameNum = 0, frameRate = -1): 103 def cvImshow(windowName, img, rescale = 1.0):
104 'Rescales the image (in particular if too large)'
105 from cv2 import resize
106 if rescale != 1.:
107 size = (int(round(img.shape[1]*rescale)), int(round(img.shape[0]*rescale)))
108 resizedImg = resize(img, size)
109 cv2.imshow(windowName, resizedImg)
110 else:
111 cv2.imshow(windowName, img)
112
113 def computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients):
114 from copy import deepcopy
115 from numpy import identity, array
116 newImgSize = (int(round(width*undistortedImageMultiplication)), int(round(height*undistortedImageMultiplication)))
117 newCameraMatrix = deepcopy(intrinsicCameraMatrix)
118 newCameraMatrix[0,2] = newImgSize[0]/2.
119 newCameraMatrix[1,2] = newImgSize[1]/2.
120 return cv2.initUndistortRectifyMap(intrinsicCameraMatrix, array(distortionCoefficients), identity(3), newCameraMatrix, newImgSize, cv2.CV_32FC1)
121
122 def playVideo(filename, firstFrameNum = 0, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1.):
95 '''Plays the video''' 123 '''Plays the video'''
124 windowName = 'frame'
125 cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
96 wait = 5 126 wait = 5
97 if frameRate > 0: 127 if frameRate > 0:
98 wait = int(round(1000./frameRate)) 128 wait = int(round(1000./frameRate))
129 if interactive:
130 wait = 0
99 capture = cv2.VideoCapture(filename) 131 capture = cv2.VideoCapture(filename)
100 if capture.isOpened(): 132 if capture.isOpened():
101 key = -1 133 key = -1
102 ret = True 134 ret = True
103 frameNum = firstFrameNum 135 frameNum = firstFrameNum
104 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum) 136 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum)
105 while ret and not quitKey(key): 137 while ret and not quitKey(key):
106 ret, img = capture.read() 138 ret, img = capture.read()
107 if ret: 139 if ret:
108 print('frame {0}'.format(frameNum)) 140 if printFrames:
141 print('frame {0}'.format(frameNum))
109 frameNum+=1 142 frameNum+=1
110 cv2.imshow('frame', img) 143 if text != None:
144 cv2.putText(img, text, (10,50), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed)
145 cvImshow(windowName, img, rescale)
111 key = cv2.waitKey(wait) 146 key = cv2.waitKey(wait)
112 147 cv2.destroyAllWindows()
113 def getImagesFromVideo(filename, nImages = 1, saveImage = False): 148 else:
114 '''Returns nImages images from the video sequence''' 149 print('Video capture for {} failed'.format(filename))
150
151 def getImagesFromVideo(videoFilename, firstFrameNum = 0, nFrames = 1, saveImage = False, outputPrefix = 'image'):
152 '''Returns nFrames images from the video sequence'''
153 from math import floor, log10
115 images = [] 154 images = []
116 capture = cv2.VideoCapture(filename) 155 capture = cv2.VideoCapture(videoFilename)
117 if capture.isOpened(): 156 if capture.isOpened():
157 nDigits = int(floor(log10(capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))))+1
118 ret = False 158 ret = False
119 numImg = 0 159 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum)
120 while numImg<nImages: 160 imgNum = 0
161 while imgNum<nFrames:
121 ret, img = capture.read() 162 ret, img = capture.read()
122 i = 0 163 i = 0
123 while not ret and i<10: 164 while not ret and i<10:
124 ret, img = capture.read() 165 ret, img = capture.read()
125 i += 1 166 i += 1
126 if img.size>0: 167 if img.size>0:
127 numImg +=1
128 if saveImage: 168 if saveImage:
129 cv2.imwrite('image{0:04d}.png'.format(numImg), img) 169 imgNumStr = format(firstFrameNum+imgNum, '0{}d'.format(nDigits))
170 cv2.imwrite(outputPrefix+imgNumStr+'.png', img)
130 else: 171 else:
131 images.append(img) 172 images.append(img)
173 imgNum +=1
174 capture.release()
175 else:
176 print('Video capture for {} failed'.format(videoFilename))
132 return images 177 return images
133 178
134 def displayTrajectories(videoFilename, objects, homography = None, firstFrameNum = 0, lastFrameNumArg = None): 179 def getFPS(videoFilename):
180 capture = cv2.VideoCapture(videoFilename)
181 if capture.isOpened():
182 fps = capture.get(cv2.cv.CV_CAP_PROP_FPS)
183 capture.release()
184 return fps
185 else:
186 print('Video capture for {} failed'.format(videoFilename))
187 return None
188
189 def imageBox(img, obj, frameNum, homography, width, height, px = 0.2, py = 0.2, pixelThreshold = 800):
190 'Computes the bounding box of object at frameNum'
191 x = []
192 y = []
193 for f in obj.features:
194 if f.existsAtInstant(frameNum):
195 projectedPosition = f.getPositionAtInstant(frameNum).project(homography)
196 x.append(projectedPosition.x)
197 y.append(projectedPosition.y)
198 xmin = min(x)
199 xmax = max(x)
200 ymin = min(y)
201 ymax = max(y)
202 xMm = px * (xmax - xmin)
203 yMm = py * (ymax - ymin)
204 a = max(ymax - ymin + (2 * yMm), xmax - (xmin + 2 * xMm))
205 yCropMin = int(max(0, .5 * (ymin + ymax - a)))
206 yCropMax = int(min(height - 1, .5 * (ymin + ymax + a)))
207 xCropMin = int(max(0, .5 * (xmin + xmax - a)))
208 xCropMax = int(min(width - 1, .5 * (xmin + xmax + a)))
209 if yCropMax != yCropMin and xCropMax != xCropMin and (yCropMax - yCropMin) * (xCropMax - xCropMin) > pixelThreshold:
210 croppedImg = img[yCropMin : yCropMax, xCropMin : xCropMax]
211 else:
212 croppedImg = []
213 return croppedImg, yCropMin, yCropMax, xCropMin, xCropMax
214
215
216 def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1.):
135 '''Displays the objects overlaid frame by frame over the video ''' 217 '''Displays the objects overlaid frame by frame over the video '''
218 from moving import userTypeNames
219 from math import ceil, log10
220
136 capture = cv2.VideoCapture(videoFilename) 221 capture = cv2.VideoCapture(videoFilename)
222 width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
223 height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
224
225 windowName = 'frame'
226 #cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
227
228 if undistort: # setup undistortion
229 [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
137 if capture.isOpened(): 230 if capture.isOpened():
138 key = -1 231 key = -1
139 ret = True 232 ret = True
140 frameNum = firstFrameNum 233 frameNum = firstFrameNum
141 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum) 234 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum)
142 if not lastFrameNumArg: 235 if lastFrameNumArg == None:
143 from sys import maxint 236 from sys import maxint
144 lastFrameNum = maxint 237 lastFrameNum = maxint
145 else: 238 else:
146 lastFrameNum = lastFrameNumArg 239 lastFrameNum = lastFrameNumArg
240 nZerosFilename = int(ceil(log10(lastFrameNum)))
147 while ret and not quitKey(key) and frameNum < lastFrameNum: 241 while ret and not quitKey(key) and frameNum < lastFrameNum:
148 ret, img = capture.read() 242 ret, img = capture.read()
149 if ret: 243 if ret:
150 print('frame {0}'.format(frameNum)) 244 if undistort:
245 img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
246 if printFrames:
247 print('frame {0}'.format(frameNum))
151 for obj in objects: 248 for obj in objects:
152 if obj.existsAtInstant(frameNum): 249 if obj.existsAtInstant(frameNum):
153 if not hasattr(obj, 'projectedPositions'): 250 if not hasattr(obj, 'projectedPositions'):
154 if homography != None: 251 if homography != None:
155 obj.projectedPositions = obj.positions.project(homography) 252 obj.projectedPositions = obj.positions.project(homography)
156 else: 253 else:
157 obj.projectedPositions = obj.positions 254 obj.projectedPositions = obj.positions
158 draw(img, obj.projectedPositions, cvRed, frameNum-obj.getFirstInstant()) 255 cvPlot(img, obj.projectedPositions, cvRed, frameNum-obj.getFirstInstant())
159 cv2.putText(img, '{0}'.format(obj.num), obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) 256 if frameNum in boundingBoxes.keys():
160 cv2.imshow('frame', img) 257 for rect in boundingBoxes[frameNum]:
161 key = cv2.waitKey() 258 cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvRed)
162 if saveKey(key): 259 elif len(obj.features) != 0:
163 cv2.imwrite('image.png', img) 260 imgcrop, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, obj, frameNum, homography, width, height)
164 frameNum += 1 261 cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue, 1)
165 262 objDescription = '{} '.format(obj.num)
263 if userTypeNames[obj.userType] != 'unknown':
264 objDescription += userTypeNames[obj.userType][0].upper()
265 cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed)
266 if not saveAllImages:
267 cvImshow(windowName, img, rescale)
268 key = cv2.waitKey()
269 if saveAllImages or saveKey(key):
270 cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img)
271 frameNum += nFramesStep
272 if nFramesStep > 1:
273 capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
274 cv2.destroyAllWindows()
275 else:
276 print 'Cannot load file ' + videoFilename
277
278 def computeHomographyFromPDTV(cameraFilename, method=0, ransacReprojThreshold=3.0):
279 '''Returns the homography matrix at ground level from PDTV format
280 https://bitbucket.org/hakanardo/pdtv'''
281 import pdtv
282 from numpy import array
283 camera = pdtv.load(cameraFilename)
284 srcPoints = [[x,y] for x, y in zip([1.,2.,2.,1.],[1.,1.,2.,2.])] # need floats!!
285 dstPoints = []
286 for srcPoint in srcPoints:
287 projected = camera.image_to_world(tuple(srcPoint))
288 dstPoints.append([projected[0], projected[1]])
289 H, mask = cv2.findHomography(array(srcPoints), array(dstPoints), method, ransacReprojThreshold)
290 return H
291
292 def undistortedCoordinates(map1, map2, x, y, maxDistance = 1.):
293 '''Returns the coordinates of a point in undistorted image
294 map1 and map2 are the mapping functions from undistorted image
295 to distorted (original image)
296 map1(x,y) = originalx, originaly'''
297 from numpy import abs, logical_and, unravel_index, dot, sum
298 from matplotlib.mlab import find
299 distx = abs(map1-x)
300 disty = abs(map2-y)
301 indices = logical_and(distx<maxDistance, disty<maxDistance)
302 closeCoordinates = unravel_index(find(indices), distx.shape) # returns i,j, ie y,x
303 xWeights = 1-distx[indices]
304 yWeights = 1-disty[indices]
305 return dot(xWeights, closeCoordinates[1])/sum(xWeights), dot(yWeights, closeCoordinates[0])/sum(yWeights)
306
307 def undistortTrajectoryFromCVMapping(map1, map2, t):
308 '''test 'perfect' inversion'''
309 from moving import Trajectory
310 from numpy import isnan
311 undistortedTrajectory = Trajectory()
312 for i,p in enumerate(t):
313 res = undistortedCoordinates(map1, map2, p.x,p.y)
314 if not isnan(res).any():
315 undistortedTrajectory.addPositionXY(res[0], res[1])
316 else:
317 print i,p,res
318 return undistortedTrajectory
319
320 def computeInverseMapping(originalImageSize, map1, map2):
321 'Computes inverse mapping from maps provided by cv2.initUndistortRectifyMap'
322 from numpy import ones, isnan
323 invMap1 = -ones(originalImageSize)
324 invMap2 = -ones(originalImageSize)
325 for x in range(0,originalImageSize[1]):
326 for y in range(0,originalImageSize[0]):
327 res = undistortedCoordinates(x,y, map1, map2)
328 if not isnan(res).any():
329 invMap1[y,x] = res[0]
330 invMap2[y,x] = res[1]
331 return invMap1, invMap2
332
333 def cameraIntrinsicCalibration(path, checkerBoardSize=[6,7], secondPassSearch=False, display=False):
334 ''' Camera calibration searches through all the images (jpg or png) located
335 in _path_ for matches to a checkerboard pattern of size checkboardSize.
336 These images should all be of the same camera with the same resolution.
337
338 For best results, use an asymetric board and ensure that the image has
339 very high contrast, including the background. Suitable checkerboard:
340 http://ftp.isr.ist.utl.pt/pub/roswiki/attachments/camera_calibration(2f)Tutorials(2f)StereoCalibration/check-108.png
341
342 The code below is based off of:
343 https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
344 Modified by Paul St-Aubin
345 '''
346 from numpy import zeros, mgrid, float32, savetxt
347 import glob, os
348
349 # termination criteria
350 criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
351
352 # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
353 objp = zeros((checkerBoardSize[0]*checkerBoardSize[1],3), float32)
354 objp[:,:2] = mgrid[0:checkerBoardSize[1],0:checkerBoardSize[0]].T.reshape(-1,2)
355
356 # Arrays to store object points and image points from all the images.
357 objpoints = [] # 3d point in real world space
358 imgpoints = [] # 2d points in image plane.
359
360 ## Loop throuhg all images in _path_
361 images = glob.glob(os.path.join(path,'*.[jJ][pP][gG]'))+glob.glob(os.path.join(path,'*.[jJ][pP][eE][gG]'))+glob.glob(os.path.join(path,'*.[pP][nN][gG]'))
362 for fname in images:
363 img = cv2.imread(fname)
364 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
365
366 # Find the chess board corners
367 ret, corners = cv2.findChessboardCorners(gray, (checkerBoardSize[1],checkerBoardSize[0]), None)
368
369 # If found, add object points, image points (after refining them)
370 if ret:
371 print 'Found pattern in '+fname
372
373 if(secondPassSearch):
374 corners = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
375
376 objpoints.append(objp)
377 imgpoints.append(corners)
378
379 # Draw and display the corners
380 if(display):
381 img = cv2.drawChessboardCorners(img, (checkerBoardSize[1],checkerBoardSize[0]), corners, ret)
382 if(img):
383 cv2.imshow('img',img)
384 cv2.waitKey(0)
385
386 ## Close up image loading and calibrate
387 cv2.destroyAllWindows()
388 if len(objpoints) == 0 or len(imgpoints) == 0:
389 return False
390 try:
391 ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
392 except NameError:
393 return False
394 savetxt('intrinsic-camera.txt', camera_matrix)
395 return camera_matrix, dist_coeffs
396
397 def undistortImage(img, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., interpolation=cv2.INTER_LINEAR):
398 '''Undistorts the image passed in argument'''
399 width = img.shape[1]
400 height = img.shape[0]
401 [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
402 return cv2.remap(img, map1, map2, interpolation=interpolation)
403
404
166 def printCvMat(cvmat, out = stdout): 405 def printCvMat(cvmat, out = stdout):
167 '''Prints the cvmat to out''' 406 '''Prints the cvmat to out'''
407 print('Deprecated, use new interface')
168 for i in xrange(cvmat.rows): 408 for i in xrange(cvmat.rows):
169 for j in xrange(cvmat.cols): 409 for j in xrange(cvmat.cols):
170 out.write('{0} '.format(cvmat[i,j])) 410 out.write('{0} '.format(cvmat[i,j]))
171 out.write('\n') 411 out.write('\n')
172 412
173 def projectArray(homography, points): 413 def projectArray(homography, points):
174 '''Returns the coordinates of the projected points (format 2xN points) 414 '''Returns the coordinates of the projected points through homography
175 through homography''' 415 (format: array 2xN points)'''
176 from numpy.core._dotblas import dot 416 from numpy.core import dot
177 from numpy.core.multiarray import array 417 from numpy.core.multiarray import array
178 from numpy.lib.function_base import append 418 from numpy.lib.function_base import append
179 419
180 if points.shape[0] != 2: 420 if points.shape[0] != 2:
181 raise Exception('points of dimension {0} {1}'.format(points.shape[0], points.shape[1])) 421 raise Exception('points of dimension {0} {1}'.format(points.shape[0], points.shape[1]))
183 if (homography!=None) and homography.size>0: 423 if (homography!=None) and homography.size>0:
184 augmentedPoints = append(points,[[1]*points.shape[1]], 0) 424 augmentedPoints = append(points,[[1]*points.shape[1]], 0)
185 prod = dot(homography, augmentedPoints) 425 prod = dot(homography, augmentedPoints)
186 return prod[0:2]/prod[2] 426 return prod[0:2]/prod[2]
187 else: 427 else:
188 return p 428 return points
189 429
190 def project(homography, p): 430 def project(homography, p):
191 '''Returns the coordinates of the projection of the point p 431 '''Returns the coordinates of the projection of the point p with coordinates p[0], p[1]
192 through homography''' 432 through homography'''
193 from numpy import array 433 from numpy import array
194 return projectArray(homography, array([[p[0]],[p[1]]])) 434 return projectArray(homography, array([[p[0]],[p[1]]]))
195 435
196 def projectTrajectory(homography, trajectory): 436 def projectTrajectory(homography, trajectory):
199 [y1, y2, ...]]''' 439 [y1, y2, ...]]'''
200 from numpy.core.multiarray import array 440 from numpy.core.multiarray import array
201 return projectArray(homography, array(trajectory)) 441 return projectArray(homography, array(trajectory))
202 442
203 def invertHomography(homography): 443 def invertHomography(homography):
204 'Returns an inverted homography' 444 '''Returns an inverted homography
445 Unnecessary for reprojection over camera image'''
205 from numpy.linalg.linalg import inv 446 from numpy.linalg.linalg import inv
206 invH = inv(homography) 447 invH = inv(homography)
207 invH /= invH[2,2] 448 invH /= invH[2,2]
208 return invH 449 return invH
209 450
210 if opencvExists: 451 def undistortTrajectory(invMap1, invMap2, positions):
452 from numpy import floor, ceil
453 floorPositions = floor(positions)
454 #ceilPositions = ceil(positions)
455 undistortedTrajectory = [[],[]]
456 for i in xrange(len(positions[0])):
457 x,y = None, None
458 if positions[0][i]+1 < invMap1.shape[1] and positions[1][i]+1 < invMap1.shape[0]:
459 floorX = invMap1[floorPositions[1][i], floorPositions[0][i]]
460 floorY = invMap2[floorPositions[1][i], floorPositions[0][i]]
461 ceilX = invMap1[floorPositions[1][i]+1, floorPositions[0][i]+1]
462 ceilY = invMap2[floorPositions[1][i]+1, floorPositions[0][i]+1]
463 #ceilX = invMap1[ceilPositions[1][i], ceilPositions[0][i]]
464 #ceilY = invMap2[ceilPositions[1][i], ceilPositions[0][i]]
465 if floorX >=0 and floorY >=0 and ceilX >=0 and ceilY >=0:
466 x = floorX+(positions[0][i]-floorPositions[0][i])*(ceilX-floorX)
467 y = floorY+(positions[1][i]-floorPositions[1][i])*(ceilY-floorY)
468 undistortedTrajectory[0].append(x)
469 undistortedTrajectory[1].append(y)
470 return undistortedTrajectory
471
472 def projectGInputPoints(homography, points):
473 from numpy import array
474 return projectTrajectory(homography, array(points+[points[0]]).T)
475
476 if opencvAvailable:
211 def computeTranslation(img1, img2, img1Points, maxTranslation2, minNMatches, windowSize = (5,5), level = 5, criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)): 477 def computeTranslation(img1, img2, img1Points, maxTranslation2, minNMatches, windowSize = (5,5), level = 5, criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)):
212 '''Computes the translation of img2 with respect to img1 478 '''Computes the translation of img2 with respect to img1
213 (loaded using OpenCV as numpy arrays) 479 (loaded using OpenCV as numpy arrays)
214 img1Points are used to compute the translation 480 img1Points are used to compute the translation
215 481
231 if len(delta) >= minNMatches: 497 if len(delta) >= minNMatches:
232 return median(delta, axis=0) 498 return median(delta, axis=0)
233 else: 499 else:
234 print(dp) 500 print(dp)
235 return None 501 return None
502
503 if skimageAvailable:
504 def HOG(image, rescaleSize = (64, 64), orientations=9, pixelsPerCell=(8, 8), cellsPerBlock=(2, 2), visualize=False, normalize=False):
505 from skimage.feature import hog
506 from skimage import color, transform
507
508 bwImg = color.rgb2gray(image)
509 inputImg = transform.resize(bwImg, rescaleSize)
510 features = hog(inputImg, orientations, pixelsPerCell, cellsPerBlock, visualize, normalize)
511 if visualize:
512 from matplotlib.pyplot import imshow, figure, subplot
513 hogViz = features[1]
514 features = features[0]
515 figure()
516 subplot(1,2,1)
517 imshow(img)
518 subplot(1,2,2)
519 imshow(hogViz)
520 return features
521
522 def createHOGTrainingSet(imageDirectory, classLabel, rescaleSize = (64, 64), orientations=9, pixelsPerCell=(8, 8), cellsPerBlock=(2, 2), visualize=False, normalize=False):
523 from os import listdir
524 from numpy import array, float32
525 from matplotlib.pyplot import imread
526
527 inputData = []
528 for filename in listdir(imageDirectory):
529 img = imread(imageDirectory+filename)
530 features = HOG(img, rescaleSize, orientations, pixelsPerCell, cellsPerBlock, visualize, normalize)
531 inputData.append(features)
532
533 nImages = len(inputData)
534 return array(inputData, dtype = float32), array([classLabel]*nImages, dtype = float32)
535
536