changeset 549:b5525249eda1

Merged in mohamedgomaa/trafficintelligence (pull request #7) add some functions for behaviour analysis and corrected a few bugs
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Tue, 08 Jul 2014 16:32:09 -0400
parents 9816fab353f3 (diff) e6ab4caf359c (current diff)
children 5668af2ff515
files python/cvutils.py python/moving.py python/storage.py
diffstat 7 files changed, 149 insertions(+), 15 deletions(-) [+]
line wrap: on
line diff
--- a/c/feature-based-tracking.cpp	Tue Jul 08 15:22:30 2014 -0400
+++ b/c/feature-based-tracking.cpp	Tue Jul 08 16:32:09 2014 -0400
@@ -374,6 +374,8 @@
   } else if (params.groupFeatures) {
     cout << "The program groups features" << endl;
     groupFeatures(params);
+  } else {
+    cout << "Main option missing or misspelt" << endl;
   }
 
   return 0;
--- a/python/cvutils.py	Tue Jul 08 15:22:30 2014 -0400
+++ b/python/cvutils.py	Tue Jul 08 16:32:09 2014 -0400
@@ -121,6 +121,8 @@
 
     def playVideo(filename, firstFrameNum = 0, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1.):
         '''Plays the video'''
+        windowName = 'frame'
+        cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
         wait = 5
         if frameRate > 0:
             wait = int(round(1000./frameRate))
@@ -140,7 +142,7 @@
                     frameNum+=1
                     if text != None:
                        cv2.putText(img, text, (10,50), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) 
-                    cvImshow('frame', img, rescale)
+                    cvImshow(windowName, img, rescale)
                     key = cv2.waitKey(wait)
             cv2.destroyAllWindows()
         else:
@@ -220,6 +222,9 @@
         width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
         height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
 
+        windowName = 'frame'
+        #cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
+
         if undistort: # setup undistortion
             [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
         if capture.isOpened():
@@ -259,7 +264,7 @@
                                 objDescription += userTypeNames[obj.userType][0].upper()
                             cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed)
                     if not saveAllImages:
-                        cvImshow('frame', img, rescale)
+                        cvImshow(windowName, img, rescale)
                         key = cv2.waitKey()
                     if saveAllImages or saveKey(key):
                         cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img)
@@ -325,6 +330,78 @@
                     invMap2[y,x] = res[1]
         return invMap1, invMap2
 
+    def cameraIntrinsicCalibration(path, checkerBoardSize=[6,7], secondPassSearch=False, display=False):
+        ''' Camera calibration searches through all the images (jpg or png) located
+            in _path_ for matches to a checkerboard pattern of size checkboardSize.
+            These images should all be of the same camera with the same resolution.
+
+            For best results, use an asymetric board and ensure that the image has
+            very high contrast, including the background. Suitable checkerboard:
+            http://ftp.isr.ist.utl.pt/pub/roswiki/attachments/camera_calibration(2f)Tutorials(2f)StereoCalibration/check-108.png
+
+            The code below is based off of:
+            https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
+            Modified by Paul St-Aubin
+            '''
+        from numpy import zeros, mgrid, float32, savetxt
+        import glob, os
+
+        # termination criteria
+        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
+
+        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
+        objp = zeros((checkerBoardSize[0]*checkerBoardSize[1],3), float32)
+        objp[:,:2] = mgrid[0:checkerBoardSize[1],0:checkerBoardSize[0]].T.reshape(-1,2)
+
+        # Arrays to store object points and image points from all the images.
+        objpoints = [] # 3d point in real world space
+        imgpoints = [] # 2d points in image plane.
+
+        ## Loop throuhg all images in _path_
+        images = glob.glob(os.path.join(path,'*.[jJ][pP][gG]'))+glob.glob(os.path.join(path,'*.[jJ][pP][eE][gG]'))+glob.glob(os.path.join(path,'*.[pP][nN][gG]'))
+        for fname in images:
+            img = cv2.imread(fname)
+            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+
+            # Find the chess board corners
+            ret, corners = cv2.findChessboardCorners(gray, (checkerBoardSize[1],checkerBoardSize[0]), None)
+
+            # If found, add object points, image points (after refining them)
+            if ret:
+                print 'Found pattern in '+fname
+                
+                if(secondPassSearch): 
+                    corners = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
+
+                objpoints.append(objp)
+                imgpoints.append(corners)
+
+                # Draw and display the corners
+                if(display):
+                    img = cv2.drawChessboardCorners(img, (checkerBoardSize[1],checkerBoardSize[0]), corners, ret)
+                    if(img):
+                        cv2.imshow('img',img)
+                        cv2.waitKey(0)
+
+        ## Close up image loading and calibrate
+        cv2.destroyAllWindows()
+        if len(objpoints) == 0 or len(imgpoints) == 0: 
+            return False
+        try:
+            ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
+        except NameError:
+            return False
+        savetxt('intrinsic-camera.txt', camera_matrix)
+        return camera_matrix, dist_coeffs
+
+    def undistortImage(img, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., interpolation=cv2.INTER_LINEAR):
+        '''Undistorts the image passed in argument'''
+        width = img.shape[1]
+        height = img.shape[0]
+        [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
+        return cv2.remap(img, map1, map2, interpolation=interpolation)
+
+
 def printCvMat(cvmat, out = stdout):
     '''Prints the cvmat to out'''
     print('Deprecated, use new interface')
--- a/python/moving.py	Tue Jul 08 15:22:30 2014 -0400
+++ b/python/moving.py	Tue Jul 08 16:32:09 2014 -0400
@@ -542,10 +542,12 @@
             return Trajectory([[a-b for a,b in zip(self.getXCoordinates(),traj2.getXCoordinates())],
                                [a-b for a,b in zip(self.getYCoordinates(),traj2.getYCoordinates())]])
 
-    def differentiate(self):
+    def differentiate(self, doubleLastPosition = False):
         diff = Trajectory()
         for i in xrange(1, self.length()):
             diff.addPosition(self[i]-self[i-1])
+        if doubleLastPosition:
+            diff.addPosition(diff[-1])
         return diff
 
     def norm(self):
@@ -633,11 +635,13 @@
     lateral coordiante is stored as second coordinate'''
 
     def __init__(self, S = None, Y = None, lanes = None):
-        if S == None or Y == None:
+        if S == None or Y == None or len(S) != len(Y):
             self.positions = [[],[]]
+            if len(S) != len(Y):
+                print("S and Y coordinates of different lengths\nInitializing to empty lists")
         else:
             self.positions = [S,Y]
-        if lanes == None:
+        if lanes == None or len(lanes) != self.length():
             self.lanes = []
         else:
             self.lanes = lanes
@@ -655,15 +659,30 @@
     def getLanes(self):
         return self.lanes
 
-    def addPosition(self, s, y, lane):
+    def addPositionSYL(self, s, y, lane):
         self.addPositionXY(s,y)
         self.lanes.append(lane)
 
+    def addPosition(self, p):
+        'Adds position in the point format for curvilinear of list with 3 values'
+        self.addPositionSYL(p[0], p[1], p[2])
+
     def setPosition(self, i, s, y, lane):
         self.setPositionXY(i, s, y)
         if i < self.__len__():
             self.lanes[i] = lane
 
+    def differentiate(self, doubleLastPosition = False):
+        diff = CurvilinearTrajectory()
+        p1 = self[0]
+        for i in xrange(1, self.length()):
+            p2 = self[i]
+            diff.addPositionSYL(p2[0]-p1[0], p2[1]-p1[1], p1[2])
+            p1=p2
+        if doubleLastPosition:
+            diff.addPosition(diff[-1])
+        return diff
+
     def getIntersections(self, S1, lane = None):
         '''Returns a list of the indices at which the trajectory 
         goes past the curvilinear coordinate S1
--- a/python/storage.py	Tue Jul 08 15:22:30 2014 -0400
+++ b/python/storage.py	Tue Jul 08 16:32:09 2014 -0400
@@ -568,7 +568,7 @@
             finally: self.sechead = None
         else: return self.fp.readline()
 
-def loadTrajectoriesFromVissimFile(filename, simulationStepsPerTimeUnit, nObjects = -1):
+def loadTrajectoriesFromVissimFile(filename, simulationStepsPerTimeUnit, nObjects = -1, warmUpLastInstant = None):
     '''Reads data from VISSIM .fzp trajectory file
     simulationStepsPerTimeUnit is the number of simulation steps per unit of time used by VISSIM
     for example, there seems to be 5 simulation steps per simulated second in VISSIM, 
@@ -577,6 +577,7 @@
 
     Assumed to be sorted over time'''
     objects = {} # dictionary of objects index by their id
+    firstInstants = {}
 
     infile = openCheck(filename, quitting = True)
 
@@ -592,13 +593,15 @@
         s = float(data[4])
         y = float(data[5])
         lane = data[2]+'_'+data[3]
-        if objNum not in objects:
-            objects[objNum] = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(instant, instant))
-            objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory()
-        objects[objNum].timeInterval.last = instant
-        objects[objNum].curvilinearPositions.addPosition(s, y, lane)
-        if nObjects > 0 and len(objects) > nObjects:
-            return objects.values()[:nObjects]
+        if objNum not in firstInstants:
+            firstInstants[objNum] = instant
+            if warmUpLastInstant == None or firstInstants[objNum] >= warmUpLastInstant:
+                if nObjects < 0 or len(objects) < nObjects:
+                    objects[objNum] = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(instant, instant))
+                    objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory()
+        if (warmUpLastInstant == None or firstInstants[objNum] >= warmUpLastInstant) and objNum in objects:
+            objects[objNum].timeInterval.last = instant
+            objects[objNum].curvilinearPositions.addPositionSYL(s, y, lane)
 
     return objects.values()
     
@@ -650,7 +653,7 @@
         else:
             obj.laneNums.append(int(numbers[13]))
             obj.positions.addPositionXY(float(numbers[6]), float(numbers[7]))
-            obj.curvilinearPositions.addPosition(float(numbers[5]), float(numbers[4]), obj.laneNums[-1])
+            obj.curvilinearPositions.addPositionSYL(float(numbers[5]), float(numbers[4]), obj.laneNums[-1])
             obj.speeds.append(float(numbers[11]))
             obj.precedingVehicles.append(int(numbers[14]))
             obj.followingVehicles.append(int(numbers[15]))
--- a/python/tests/moving.txt	Tue Jul 08 15:22:30 2014 -0400
+++ b/python/tests/moving.txt	Tue Jul 08 16:32:09 2014 -0400
@@ -77,6 +77,14 @@
 >>> t1.getTrajectoryInPolygonNoShapely(np.array([[10,10],[14,10],[14,13],[10,13]])).length()
 0
 
+>>> t1.differentiate()
+(1.000000,3.000000) (1.000000,3.000000)
+>>> t1.differentiate(True)
+(1.000000,3.000000) (1.000000,3.000000) (1.000000,3.000000)
+>>> t1 = Trajectory([[0.5,1.5,3.5],[0.5,2.5,7.5]])
+>>> t1.differentiate()
+(1.000000,2.000000) (2.000000,5.000000)
+
 >>> from utils import LCSS
 >>> lcss = LCSS(lambda x,y: Point.distanceNorm2(x,y) <= 0.1)
 >>> Trajectory.lcss(t1, t1, lcss)
@@ -102,6 +110,12 @@
 >>> Point.timeToCollision(p2, p1, v2, v1, 0.) == None
 True
 
+>>> t = CurvilinearTrajectory(S = [1., 2., 3., 5.], Y = [0.5, 0.5, 0.6, 0.7], lanes = ['1']*4)
+>>> t.differentiate() # doctest:+ELLIPSIS
+[1.0, 0.0, '1'] [1.0, 0.099..., '1'] [2.0, 0.099..., '1']
+>>> t.differentiate(True) # doctest:+ELLIPSIS
+[1.0, 0.0, '1'] [1.0, 0.099..., '1'] [2.0, 0.099..., '1'] [2.0, 0.099..., '1']
+
 >>> o1 = MovingObject(positions = Trajectory([[0]*3,[2]*3]), velocities = Trajectory([[0]*3,[1]*3]))
 >>> o1.classifyUserTypeSpeedMotorized(0.5, np.median)
 >>> userTypeNames[o1.getUserType()]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/run-tests.sh	Tue Jul 08 16:32:09 2014 -0400
@@ -0,0 +1,17 @@
+#!/bin/sh
+echo "------------"
+echo "Python tests"
+cd python
+./run-tests.sh
+cd ..
+echo "------------"
+echo "C++ tests"
+if [ -f ./bin/tests ]
+then
+    ./bin/tests
+else
+    echo "The test executable has not been compiled"
+fi
+echo "------------"
+echo "Script tests"
+./scripts/run-tests.sh
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/run-tests.sh	Tue Jul 08 16:32:09 2014 -0400
@@ -0,0 +1,2 @@
+#!/bin/sh
+echo 'no tests'
\ No newline at end of file