comparison trafficintelligence/storage.py @ 1201:28bf2420c412

work in progress to import KITTI data
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Tue, 28 Feb 2023 17:16:49 -0500
parents ccab20f85710
children 059b7282aa09
comparison
equal deleted inserted replaced
1199:6a6a4d5958f7 1201:28bf2420c412
5 from pathlib import Path 5 from pathlib import Path
6 import shutil 6 import shutil
7 from copy import copy 7 from copy import copy
8 import sqlite3, logging 8 import sqlite3, logging
9 9
10 from numpy import log, min as npmin, max as npmax, round as npround, array, sum as npsum, loadtxt, floor as npfloor, ceil as npceil, linalg, int32, int64 10 from numpy import log, min as npmin, max as npmax, round as npround, array, sum as npsum, loadtxt, floor as npfloor, ceil as npceil, linalg, int32, int64, reshape, dot, vstack, transpose
11 from pandas import read_csv, merge 11 from pandas import read_csv, merge
12 12
13 from trafficintelligence import utils, moving, events, indicators 13 from trafficintelligence import utils, moving, events, indicators
14 from trafficintelligence.base import VideoFilenameAddable 14 from trafficintelligence.base import VideoFilenameAddable
15 15
1253 if (obj.size[1] != float(numbers[9])): 1253 if (obj.size[1] != float(numbers[9])):
1254 print('changed width obj {}'.format(obj.getNum())) 1254 print('changed width obj {}'.format(obj.getNum()))
1255 1255
1256 inputfile.close() 1256 inputfile.close()
1257 return objects 1257 return objects
1258
1259 # from https://github.com/kuixu/kitti_object_vis
1260 def inverse_rigid_trans(Tr):
1261 """ Inverse a rigid body transform matrix (3x4 as [R|t])
1262 [R'|-R't; 0|1]
1263 """
1264 import numpy as np
1265 inv_Tr = np.zeros_like(Tr) # 3x4
1266 inv_Tr[0:3, 0:3] = transpose(Tr[0:3, 0:3])
1267 inv_Tr[0:3, 3] = dot(-transpose(Tr[0:3, 0:3]), Tr[0:3, 3])
1268 return inv_Tr
1269
1270 def loadKITTICalibration(filename):
1271 '''Loads KITTI calibration data'''
1272 calib = {}
1273 with open(filename, 'r') as f:
1274 for l in f.readlines():
1275 l = l.rstrip()
1276 if len(l) == 0:
1277 continue
1278 key, value = l.split(' ', 1)
1279 if ":" in key:
1280 key = key[:-1]
1281 try:
1282 calib[key] = array([float(x) for x in value.split()])
1283 except ValueError as e:
1284 print(e)
1285 continue
1286 #calib['Tr_velo_to_cam'] = calib['Tr_velo_cam']
1287 #calib['Tr_imu_to_velo'] = calib['Tr_imu_velo']
1288 temps = reshape(calib['Tr_velo_cam'], (3, 4))
1289 calib['Tr_cam_velo'] = inverse_rigid_trans(temps)
1290 temps = reshape(calib['Tr_imu_velo'], (3, 4))
1291 calib['Tr_velo_imu'] = inverse_rigid_trans(temps)
1292
1293 P = reshape(calib['P2'], (3, 4))
1294 calib['c_u'] = P[0, 2]
1295 calib['c_v'] = P[1, 2]
1296 calib['f_u'] = P[0, 0]
1297 calib['f_v'] = P[1, 1]
1298 calib['b_x'] = P[0, 3] / (-calib['f_u']) # relative
1299 calib['b_y'] = P[1, 3] / (-calib['f_v'])
1300 return calib
1301
1302 # from https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py
1303
1304
1305 # def get_cam_in_ex(calib):
1306 # """
1307 # Qingwu
1308 # :param calib: calib from kitti_tracking
1309 # :return: cam_parameters: Camera intrinsics and extrinsics
1310 # """
1311 # cam_parameters = {}
1312 # P = np.reshape(calib["P2"], [3, 4])
1313 # cam_parameters["c_u"] = P[0, 2]
1314 # cam_parameters["c_v"] = P[1, 2]
1315 # cam_parameters["f_u"] = P[0, 0]
1316 # cam_parameters["f_v"] = P[1, 1]
1317 # cam_parameters["b_x"] = P[0, 3] / (-cam_parameters["f_u"]) # relative
1318 # cam_parameters["b_y"] = P[1, 3] / (-cam_parameters["f_v"])
1319 # return cam_parameters
1320
1321
1322 def loadTrajectoriesFromKITTI(filename, kittiCalibration, oxts):
1323 '''Reads data from KITTI ground truth or output from an object detection and tracking method
1324
1325 kittiCalibration is obtained from loading training/testing calibration file for each sequence 00XX.txt
1326 oxts is obtained using utils.load_oxts_packets_and_poses(['./training/oxts/0001.txt']) from pykitti
1327
1328 Ref: https://github.com/pratikac/kitti/blob/master/readme.tracking.txt'''
1329 from pykitti.utils import roty
1330
1331 invR0 = np.linalg.inv(reshape(kittiCalibration['R_rect'], (3, 3)))
1332
1333 header = ['frame', # 0, 1, ..., n
1334 'trackingid', # -1, 0 , 1, ..., k
1335 'type', # 'Car', 'Pedestrian', ...
1336 'truncation', # truncated pixel ratio [0..1]
1337 'occlusion', # 0=visible, 1=partly occluded, 2=fully occluded, 3=unknown
1338 'alpha', # object observation angle [-pi..pi]
1339 # extract 2d bounding box in 0-based coordinates
1340 'xmin', # left
1341 'ymin', # top
1342 'xmax', # right
1343 'ymax', # bottom
1344 # extract 3d bounding box information
1345 'h', # box height
1346 'w', # box width
1347 'l', # box length (in meters)
1348 'x', 'y', 'z', # location (x,y,z) in camera coord
1349 'ry'] # yaw angle (around Y-axis in camera coordinates) [-pi..pi]
1350 data = read_csv(filename, delimiter = ' ', names = header)
1351 data = data[data.trackingid > -1]
1352 for objNum in data.trackingid.unique():
1353 tmp = data[data.trackingid == objNum].sort_values('frame')
1354 obj = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(tmp.frame.min(), tmp.frame.max()),
1355 positions = moving.Trajectory([[float(numbers[6])],[float(numbers[7])]]),
1356 userType = tmp.iloc[0].type)
1357 for i, r in tmp.iterrows():
1358 _, Tr_imu2w = oxts[r.frame]
1359 transfer_matrix = {'P2': reshape(kittiCalibration['P2'], (3, 4)),
1360 'R_rect': kittiCalibration['R_rect'],
1361 'Tr_cam_to_velo': kittiCalibration['Tr_cam_velo'],
1362 # transfer_matrix['Tr_velo_to_cam'] = kittiCalibration['Tr_velo_to_cam']
1363 'Tr_velo_to_imu': kittiCalibration['Tr_velo_imu'],
1364 # transfer_matrix['Tr_imu_to_velo'] = kittiCalibration['Tr_imu_to_velo']
1365 'Tr_imu_to_world': Tr_imu2w}
1366 # 3D object
1367 # compute rotational matrix around yaw axis
1368 R = roty(r.ry)
1369
1370 # 3d bounding box corners
1371 x_corners = [r.l / 2, r.l / 2, -r.l / 2, -r.l / 2, r.l / 2, r.l / 2, -r.l / 2, -r.l / 2]
1372 y_corners = [0, 0, 0, 0, -r.h, -r.h, -r.h, -r.h]
1373 z_corners = [r.w / 2, -r.w / 2, -r.w / 2, r.w / 2, r.w / 2, -r.w / 2, -r.w / 2, r.w / 2]
1374 # rotate and translate 3d bounding box
1375 corners3d = dot(R, vstack([x_corners, y_corners, z_corners]))
1376 corners3d[0, :] = corners3d[0, :] + r.x#obj.t[0]
1377 corners3d[1, :] = corners3d[1, :] + r.y#obj.t[1]
1378 corners3d[2, :] = corners3d[2, :] + r.z#obj.t[2]
1379 # corners3d = transpose(corners3d)
1380 # box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
1381 cornersVelo = transpose(dot(invR0, corners3d)) # avoid double transpose np.transpose(pts_3d_rect)))
1382 # boxes3d_world_single, flag_imu, flag_world = calib_function.project_velo_to_world(bboxes_3d_velo=boxes3d_velo_single, Tr_matrix=transfer_matrix)
1383
1384 # check https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c
1385
1386
1258 1387
1259 def convertNgsimFile(inputfile, outputfile, append = False, nObjects = -1, sequenceNum = 0): 1388 def convertNgsimFile(inputfile, outputfile, append = False, nObjects = -1, sequenceNum = 0):
1260 '''Reads data from the trajectory data provided by NGSIM project 1389 '''Reads data from the trajectory data provided by NGSIM project
1261 and converts to our current format.''' 1390 and converts to our current format.'''
1262 if append: 1391 if append:
1547 for sectionName in config.sections(): 1676 for sectionName in config.sections():
1548 configDict[sectionName] = SceneParameters(config, sectionName) 1677 configDict[sectionName] = SceneParameters(config, sectionName)
1549 return configDict 1678 return configDict
1550 1679
1551 1680
1552 if __name__ == "__main__": 1681 if __name__ == '__main__':
1553 import doctest 1682 import doctest
1554 import unittest 1683 import unittest
1555 suite = doctest.DocFileSuite('tests/storage.txt') 1684 suite = doctest.DocFileSuite('tests/storage.txt')
1556 unittest.TextTestRunner().run(suite) 1685 unittest.TextTestRunner().run(suite)
1557 # #doctest.testmod() 1686 # #doctest.testmod()