changeset 1255:c0fe55a6b82f

added support for LUMPI 3D data format
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Wed, 03 Apr 2024 12:30:36 -0400
parents a477ad82ab66
children 56d0195d043e
files trafficintelligence/storage.py
diffstat 1 files changed, 126 insertions(+), 68 deletions(-) [+]
line wrap: on
line diff
--- a/trafficintelligence/storage.py	Wed Apr 03 11:38:16 2024 -0400
+++ b/trafficintelligence/storage.py	Wed Apr 03 12:30:36 2024 -0400
@@ -1271,6 +1271,50 @@
     inv_Tr[0:3, 3] = dot(-transpose(Tr[0:3, 0:3]), Tr[0:3, 3])
     return inv_Tr
 
+def compute3Dbox(anno_list):
+    #### LiDAR coordinate, rotate with y
+    '''.DS_Store
+        ^ z   
+        |
+        |
+        |
+        . - - - - - - - > x
+       /
+      /
+     /
+    v y
+    
+    '''
+    from pykitti.utils import rotz
+
+    R = rotz(anno_list['heading_3d'])
+    l = anno_list['l_3d']
+    w = anno_list['w_3d']
+    h = anno_list['h_3d']
+
+    x_3d = anno_list['x_3d']
+    y_3d = anno_list['y_3d']
+    z_3d = anno_list['z_3d']
+
+    # # 3d bounding box corners x,y,z as center of the 3d bbox
+    # #            0     1      2    3      4     5    6     7
+    x_corners = [l/2,  l/2, -l/2, -l/2,  l/2,  l/2, -l/2, -l/2]
+    y_corners = [w/2, -w/2, -w/2,  w/2,  w/2, -w/2, -w/2,  w/2]
+    z_corners = [h/2,  h/2,  h/2,  h/2, -h/2, -h/2, -h/2, -h/2]
+
+    # 3d bounding box corners x,y,z as center of the bottom surface of the 3d bbox, 
+    # the difference occurs at z_corners, as it rotates with z_corners.
+    # x_corners = [l/2,  l/2, -l/2, -l/2,  l/2,  l/2, -l/2, -l/2]
+    # y_corners = [w/2, -w/2, -w/2,  w/2,  w/2, -w/2, -w/2,  w/2]
+    # z_corners = [h,  h,  h,  h, 0, 0, 0, 0]   
+    corners_3d = dot(R, vstack([x_corners, y_corners, z_corners]))
+    # add the center
+    corners_3d[0, :] = corners_3d[0, :] + x_3d
+    corners_3d[1, :] = corners_3d[1, :] + y_3d
+    corners_3d[2, :] = corners_3d[2, :] + z_3d
+    
+    return transpose(corners_3d)
+
 def loadKITTICalibration(filename):
     '''Loads KITTI calibration data'''
     calib = {}
@@ -1304,7 +1348,7 @@
 # from https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py
 
 
-def loadTrajectoriesFromKITTI(filename, kittiCalibration, oxts, resultFile = False):
+def loadTrajectoriesFromKITTI(filename, kittiCalibration = None, oxts = None, resultFile = False):
     '''Reads data from KITTI ground truth or output from an object detection and tracking method
 
     kittiCalibration is obtained from loading training/testing calibration file for each sequence 00XX.txt
@@ -1314,8 +1358,9 @@
     from pykitti.utils import roty
     from trafficintelligence.cvutils import cartesian2Homogeneous
 
-    invR0 = linalg.inv(reshape(kittiCalibration['R_rect'], (3, 3)))
-    transCam2Velo = transpose(kittiCalibration['Tr_cam_velo'])
+    if kittiCalibration is not None:
+        invR0 = linalg.inv(reshape(kittiCalibration['R_rect'], (3, 3)))
+        transCam2Velo = transpose(kittiCalibration['Tr_cam_velo'])
     
     header = ['frame', # 0, 1, ..., n
               'trackingid', # -1, 0 , 1, ..., k
@@ -1353,89 +1398,102 @@
             tmp = missing.interpolate()
         featureTrajectories = [moving.Trajectory() for i in range(4)]
         for i, r in tmp.iterrows():
-            _, Tr_imu_to_world = oxts[r.frame]
-            # transfer_matrix = {'P2': reshape(kittiCalibration['P2'], (3, 4)),
-            #                    'R_rect': kittiCalibration['R_rect'],
-            #                    'Tr_cam_to_velo': kittiCalibration['Tr_cam_velo'],
-            #                    # transfer_matrix['Tr_velo_to_cam'] = kittiCalibration['Tr_velo_to_cam']
-            #                    'Tr_velo_to_imu': kittiCalibration['Tr_velo_imu'],
-            #                    # transfer_matrix['Tr_imu_to_velo'] = kittiCalibration['Tr_imu_to_velo']
-            #                    'Tr_imu_to_world': Tr_imu2w}
-            # 3D object
-            # compute rotational matrix around yaw axis
-            R = roty(r.ry)
+            if kittiCalibration is not None:
+                _, Tr_imu_to_world = oxts[r.frame]
+                # transfer_matrix = {'P2': reshape(kittiCalibration['P2'], (3, 4)),
+                #                    'R_rect': kittiCalibration['R_rect'],
+                #                    'Tr_cam_to_velo': kittiCalibration['Tr_cam_velo'],
+                #                    # transfer_matrix['Tr_velo_to_cam'] = kittiCalibration['Tr_velo_to_cam']
+                #                    'Tr_velo_to_imu': kittiCalibration['Tr_velo_imu'],
+                #                    # transfer_matrix['Tr_imu_to_velo'] = kittiCalibration['Tr_imu_to_velo']
+                #                    'Tr_imu_to_world': Tr_imu2w}
+                # 3D object
+                # compute rotational matrix around yaw axis
+                R = roty(r.ry)
+
+                # 3d bounding box corners
+                x_corners = [r.l / 2, r.l / 2, -r.l / 2, -r.l / 2, r.l / 2, r.l / 2, -r.l / 2, -r.l / 2]
+                y_corners = [0, 0, 0, 0, -r.h, -r.h, -r.h, -r.h]
+                z_corners = [r.w / 2, -r.w / 2, -r.w / 2, r.w / 2, r.w / 2, -r.w / 2, -r.w / 2, r.w / 2]
+                # rotate and translate 3d bounding box
+                corners3d = dot(R, vstack([x_corners, y_corners, z_corners]))
+                corners3d[0, :] = corners3d[0, :] + r.x#obj.t[0]
+                corners3d[1, :] = corners3d[1, :] + r.y#obj.t[1]
+                corners3d[2, :] = corners3d[2, :] + r.z#obj.t[2]
+                # corners3d = transpose(corners3d)
+                # box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
+                refCorners = transpose(dot(invR0, corners3d)) # 8x3 avoid double transpose np.transpose(pts_3d_rect))) in pts_3d_ref = self.project_rect_to_ref(pts_3d_rect)
+                homRefCorners = cartesian2Homogeneous(refCorners)
+                veloCorners = dot(homRefCorners, transCam2Velo)
+                # self.project_ref_to_velo(pts_3d_ref)
+                # boxes3d_world_single, flag_imu, flag_world = calib_function.project_velo_to_world(bboxes_3d_velo=boxes3d_velo_single, Tr_matrix=transfer_matrix)
+                #Tr_velo_to_imu = transfer_matrix['Tr_velo_to_imu']
+                #Tr_imu_to_world = transfer_matrix['Tr_imu_to_world']
+                homVeloCorners = cartesian2Homogeneous(veloCorners)
+                imuCorners = dot(kittiCalibration['Tr_velo_imu'], homVeloCorners.T).T # 8x3
 
+                homImuCorners = cartesian2Homogeneous(imuCorners)
+                worldCorners = dot(Tr_imu_to_world, homImuCorners.T).T # 8x3
+            else: #LUMPI format
+                anno_list_temp = {}
+                anno_list_temp['x_2d'] = r.xmin
+                anno_list_temp['y_2d'] = r.ymin
+                anno_list_temp['l_3d'] = r.l
+                anno_list_temp['w_3d'] = r.w
+                anno_list_temp['h_3d'] = r.h
+                anno_list_temp['x_3d'] = r.x
+                anno_list_temp['y_3d'] = r.y
+                anno_list_temp['z_3d'] = r.z
+                anno_list_temp['heading_3d'] = r.ry
+                worldCorners = compute3Dbox(anno_list_temp)
+            # take first 4 lines of corners, x,y,_ # x0, y0, _ = boxes3d[0]
+            xCoords = worldCorners[:4,0]
+            yCoords = worldCorners[:4,1]
+            t.addPositionXY(xCoords.mean(), yCoords.mean())
+            for i in range(4):
+                featureTrajectories[i].addPositionXY(xCoords[i], yCoords[i])
+            # check https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c
+        if interval.length()>1:
+            objects.append(moving.MovingObject(num = objNum, timeInterval = interval, positions = t, velocities = t.differentiate(True), userType = userType, features = [moving.MovingObject(num = featureNum+i, timeInterval = copy(interval), positions = featureTrajectories[i], velocities = featureTrajectories[i].differentiate(True)) for i in range(4)]))
+            featureNum += 4
+    
+    # ego vehicle
+    if oxts is not None:
+        t = moving.Trajectory()
+        featureTrajectories = [moving.Trajectory() for i in range(4)]
+        interval = moving.TimeInterval(0, len(oxts)-1)#int(data.frame.min()), int(data.frame.max()))
+        R = roty(pi/2)
+        #for frame in interval:
+        for _, Tr_imu_to_world in oxts:
+            l = 4.5 # m
+            w = 1.8 # m
+            h = 0.
             # 3d bounding box corners
-            x_corners = [r.l / 2, r.l / 2, -r.l / 2, -r.l / 2, r.l / 2, r.l / 2, -r.l / 2, -r.l / 2]
-            y_corners = [0, 0, 0, 0, -r.h, -r.h, -r.h, -r.h]
-            z_corners = [r.w / 2, -r.w / 2, -r.w / 2, r.w / 2, r.w / 2, -r.w / 2, -r.w / 2, r.w / 2]
+            x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
+            y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
+            z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
             # rotate and translate 3d bounding box
             corners3d = dot(R, vstack([x_corners, y_corners, z_corners]))
-            corners3d[0, :] = corners3d[0, :] + r.x#obj.t[0]
-            corners3d[1, :] = corners3d[1, :] + r.y#obj.t[1]
-            corners3d[2, :] = corners3d[2, :] + r.z#obj.t[2]
-            # corners3d = transpose(corners3d)
-            # box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
+            corners3d[0, :] = corners3d[0, :]
+            corners3d[1, :] = corners3d[1, :]
+            corners3d[2, :] = corners3d[2, :]
             refCorners = transpose(dot(invR0, corners3d)) # 8x3 avoid double transpose np.transpose(pts_3d_rect))) in pts_3d_ref = self.project_rect_to_ref(pts_3d_rect)
             homRefCorners = cartesian2Homogeneous(refCorners)
             veloCorners = dot(homRefCorners, transCam2Velo)
-# self.project_ref_to_velo(pts_3d_ref)
-            # boxes3d_world_single, flag_imu, flag_world = calib_function.project_velo_to_world(bboxes_3d_velo=boxes3d_velo_single, Tr_matrix=transfer_matrix)
-            #Tr_velo_to_imu = transfer_matrix['Tr_velo_to_imu']
-            #Tr_imu_to_world = transfer_matrix['Tr_imu_to_world']
             homVeloCorners = cartesian2Homogeneous(veloCorners)
             imuCorners = dot(kittiCalibration['Tr_velo_imu'], homVeloCorners.T).T # 8x3
 
             homImuCorners = cartesian2Homogeneous(imuCorners)
             worldCorners = dot(Tr_imu_to_world, homImuCorners.T).T # 8x3
-            
+                
             # take first 4 lines of corners, x,y,_ # x0, y0, _ = boxes3d[0]
             xCoords = worldCorners[:4,0]
             yCoords = worldCorners[:4,1]
             t.addPositionXY(xCoords.mean(), yCoords.mean())
             for i in range(4):
                 featureTrajectories[i].addPositionXY(xCoords[i], yCoords[i])                
-            # check https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c
-        if interval.length()>1:
-            objects.append(moving.MovingObject(num = objNum, timeInterval = interval, positions = t, velocities = t.differentiate(True), userType = userType, features = [moving.MovingObject(num = featureNum+i, timeInterval = copy(interval), positions = featureTrajectories[i], velocities = featureTrajectories[i].differentiate(True)) for i in range(4)]))
-            featureNum += 4
-    
-    # ego vehicle
-    t = moving.Trajectory()
-    featureTrajectories = [moving.Trajectory() for i in range(4)]
-    interval = moving.TimeInterval(0, len(oxts)-1)#int(data.frame.min()), int(data.frame.max()))
-    R = roty(pi/2)
-    #for frame in interval:
-    for _, Tr_imu_to_world in oxts:
-        l = 4.5 # m
-        w = 1.8 # m
-        h = 0.
-        # 3d bounding box corners
-        x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
-        y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
-        z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
-        # rotate and translate 3d bounding box
-        corners3d = dot(R, vstack([x_corners, y_corners, z_corners]))
-        corners3d[0, :] = corners3d[0, :]
-        corners3d[1, :] = corners3d[1, :]
-        corners3d[2, :] = corners3d[2, :]
-        refCorners = transpose(dot(invR0, corners3d)) # 8x3 avoid double transpose np.transpose(pts_3d_rect))) in pts_3d_ref = self.project_rect_to_ref(pts_3d_rect)
-        homRefCorners = cartesian2Homogeneous(refCorners)
-        veloCorners = dot(homRefCorners, transCam2Velo)
-        homVeloCorners = cartesian2Homogeneous(veloCorners)
-        imuCorners = dot(kittiCalibration['Tr_velo_imu'], homVeloCorners.T).T # 8x3
-
-        homImuCorners = cartesian2Homogeneous(imuCorners)
-        worldCorners = dot(Tr_imu_to_world, homImuCorners.T).T # 8x3
-            
-        # take first 4 lines of corners, x,y,_ # x0, y0, _ = boxes3d[0]
-        xCoords = worldCorners[:4,0]
-        yCoords = worldCorners[:4,1]
-        t.addPositionXY(xCoords.mean(), yCoords.mean())
-        for i in range(4):
-            featureTrajectories[i].addPositionXY(xCoords[i], yCoords[i])                
-            # check https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c        
-    objects.append(moving.MovingObject(num = max([o.getNum() for o in objects])+1, timeInterval = interval, positions = t, velocities = t.differentiate(True), userType = 'Car', features = [moving.MovingObject(num = featureNum+i, timeInterval = copy(interval), positions = featureTrajectories[i], velocities = featureTrajectories[i].differentiate(True)) for i in range(4)]))
+                # check https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c        
+        objects.append(moving.MovingObject(num = max([o.getNum() for o in objects])+1, timeInterval = interval, positions = t, velocities = t.differentiate(True), userType = 'Car', features = [moving.MovingObject(num = featureNum+i, timeInterval = copy(interval), positions = featureTrajectories[i], velocities = featureTrajectories[i].differentiate(True)) for i in range(4)]))
 
     return objects