Mercurial Hosting > traffic-intelligence
comparison trafficintelligence/storage.py @ 1255:c0fe55a6b82f
added support for LUMPI 3D data format
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Wed, 03 Apr 2024 12:30:36 -0400 |
parents | a477ad82ab66 |
children | 56d0195d043e |
comparison
equal
deleted
inserted
replaced
1254:a477ad82ab66 | 1255:c0fe55a6b82f |
---|---|
1269 inv_Tr = zeros_like(Tr) # 3x4 | 1269 inv_Tr = zeros_like(Tr) # 3x4 |
1270 inv_Tr[0:3, 0:3] = transpose(Tr[0:3, 0:3]) | 1270 inv_Tr[0:3, 0:3] = transpose(Tr[0:3, 0:3]) |
1271 inv_Tr[0:3, 3] = dot(-transpose(Tr[0:3, 0:3]), Tr[0:3, 3]) | 1271 inv_Tr[0:3, 3] = dot(-transpose(Tr[0:3, 0:3]), Tr[0:3, 3]) |
1272 return inv_Tr | 1272 return inv_Tr |
1273 | 1273 |
1274 def compute3Dbox(anno_list): | |
1275 #### LiDAR coordinate, rotate with y | |
1276 '''.DS_Store | |
1277 ^ z | |
1278 | | |
1279 | | |
1280 | | |
1281 . - - - - - - - > x | |
1282 / | |
1283 / | |
1284 / | |
1285 v y | |
1286 | |
1287 ''' | |
1288 from pykitti.utils import rotz | |
1289 | |
1290 R = rotz(anno_list['heading_3d']) | |
1291 l = anno_list['l_3d'] | |
1292 w = anno_list['w_3d'] | |
1293 h = anno_list['h_3d'] | |
1294 | |
1295 x_3d = anno_list['x_3d'] | |
1296 y_3d = anno_list['y_3d'] | |
1297 z_3d = anno_list['z_3d'] | |
1298 | |
1299 # # 3d bounding box corners x,y,z as center of the 3d bbox | |
1300 # # 0 1 2 3 4 5 6 7 | |
1301 x_corners = [l/2, l/2, -l/2, -l/2, l/2, l/2, -l/2, -l/2] | |
1302 y_corners = [w/2, -w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2] | |
1303 z_corners = [h/2, h/2, h/2, h/2, -h/2, -h/2, -h/2, -h/2] | |
1304 | |
1305 # 3d bounding box corners x,y,z as center of the bottom surface of the 3d bbox, | |
1306 # the difference occurs at z_corners, as it rotates with z_corners. | |
1307 # x_corners = [l/2, l/2, -l/2, -l/2, l/2, l/2, -l/2, -l/2] | |
1308 # y_corners = [w/2, -w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2] | |
1309 # z_corners = [h, h, h, h, 0, 0, 0, 0] | |
1310 corners_3d = dot(R, vstack([x_corners, y_corners, z_corners])) | |
1311 # add the center | |
1312 corners_3d[0, :] = corners_3d[0, :] + x_3d | |
1313 corners_3d[1, :] = corners_3d[1, :] + y_3d | |
1314 corners_3d[2, :] = corners_3d[2, :] + z_3d | |
1315 | |
1316 return transpose(corners_3d) | |
1317 | |
1274 def loadKITTICalibration(filename): | 1318 def loadKITTICalibration(filename): |
1275 '''Loads KITTI calibration data''' | 1319 '''Loads KITTI calibration data''' |
1276 calib = {} | 1320 calib = {} |
1277 with open(filename, 'r') as f: | 1321 with open(filename, 'r') as f: |
1278 for l in f: | 1322 for l in f: |
1302 return calib | 1346 return calib |
1303 | 1347 |
1304 # from https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py | 1348 # from https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py |
1305 | 1349 |
1306 | 1350 |
1307 def loadTrajectoriesFromKITTI(filename, kittiCalibration, oxts, resultFile = False): | 1351 def loadTrajectoriesFromKITTI(filename, kittiCalibration = None, oxts = None, resultFile = False): |
1308 '''Reads data from KITTI ground truth or output from an object detection and tracking method | 1352 '''Reads data from KITTI ground truth or output from an object detection and tracking method |
1309 | 1353 |
1310 kittiCalibration is obtained from loading training/testing calibration file for each sequence 00XX.txt | 1354 kittiCalibration is obtained from loading training/testing calibration file for each sequence 00XX.txt |
1311 oxts is obtained using utils.load_oxts_packets_and_poses(['./training/oxts/0001.txt']) from pykitti | 1355 oxts is obtained using utils.load_oxts_packets_and_poses(['./training/oxts/0001.txt']) from pykitti |
1312 | 1356 |
1313 Ref: https://github.com/pratikac/kitti/blob/master/readme.tracking.txt''' | 1357 Ref: https://github.com/pratikac/kitti/blob/master/readme.tracking.txt''' |
1314 from pykitti.utils import roty | 1358 from pykitti.utils import roty |
1315 from trafficintelligence.cvutils import cartesian2Homogeneous | 1359 from trafficintelligence.cvutils import cartesian2Homogeneous |
1316 | 1360 |
1317 invR0 = linalg.inv(reshape(kittiCalibration['R_rect'], (3, 3))) | 1361 if kittiCalibration is not None: |
1318 transCam2Velo = transpose(kittiCalibration['Tr_cam_velo']) | 1362 invR0 = linalg.inv(reshape(kittiCalibration['R_rect'], (3, 3))) |
1363 transCam2Velo = transpose(kittiCalibration['Tr_cam_velo']) | |
1319 | 1364 |
1320 header = ['frame', # 0, 1, ..., n | 1365 header = ['frame', # 0, 1, ..., n |
1321 'trackingid', # -1, 0 , 1, ..., k | 1366 'trackingid', # -1, 0 , 1, ..., k |
1322 'usertype', # 'Car', 'Pedestrian', ... | 1367 'usertype', # 'Car', 'Pedestrian', ... |
1323 'truncation', # truncated pixel ratio [0..1] | 1368 'truncation', # truncated pixel ratio [0..1] |
1351 instants = set(interval).difference(tmp.frame) | 1396 instants = set(interval).difference(tmp.frame) |
1352 missing = concat([tmp[header[10:]], DataFrame([[t]+[NaN]*(len(header)-10) for t in instants], columns = ['frame']+header[10:])], ignore_index=True).sort_values('frame') | 1397 missing = concat([tmp[header[10:]], DataFrame([[t]+[NaN]*(len(header)-10) for t in instants], columns = ['frame']+header[10:])], ignore_index=True).sort_values('frame') |
1353 tmp = missing.interpolate() | 1398 tmp = missing.interpolate() |
1354 featureTrajectories = [moving.Trajectory() for i in range(4)] | 1399 featureTrajectories = [moving.Trajectory() for i in range(4)] |
1355 for i, r in tmp.iterrows(): | 1400 for i, r in tmp.iterrows(): |
1356 _, Tr_imu_to_world = oxts[r.frame] | 1401 if kittiCalibration is not None: |
1357 # transfer_matrix = {'P2': reshape(kittiCalibration['P2'], (3, 4)), | 1402 _, Tr_imu_to_world = oxts[r.frame] |
1358 # 'R_rect': kittiCalibration['R_rect'], | 1403 # transfer_matrix = {'P2': reshape(kittiCalibration['P2'], (3, 4)), |
1359 # 'Tr_cam_to_velo': kittiCalibration['Tr_cam_velo'], | 1404 # 'R_rect': kittiCalibration['R_rect'], |
1360 # # transfer_matrix['Tr_velo_to_cam'] = kittiCalibration['Tr_velo_to_cam'] | 1405 # 'Tr_cam_to_velo': kittiCalibration['Tr_cam_velo'], |
1361 # 'Tr_velo_to_imu': kittiCalibration['Tr_velo_imu'], | 1406 # # transfer_matrix['Tr_velo_to_cam'] = kittiCalibration['Tr_velo_to_cam'] |
1362 # # transfer_matrix['Tr_imu_to_velo'] = kittiCalibration['Tr_imu_to_velo'] | 1407 # 'Tr_velo_to_imu': kittiCalibration['Tr_velo_imu'], |
1363 # 'Tr_imu_to_world': Tr_imu2w} | 1408 # # transfer_matrix['Tr_imu_to_velo'] = kittiCalibration['Tr_imu_to_velo'] |
1364 # 3D object | 1409 # 'Tr_imu_to_world': Tr_imu2w} |
1365 # compute rotational matrix around yaw axis | 1410 # 3D object |
1366 R = roty(r.ry) | 1411 # compute rotational matrix around yaw axis |
1367 | 1412 R = roty(r.ry) |
1413 | |
1414 # 3d bounding box corners | |
1415 x_corners = [r.l / 2, r.l / 2, -r.l / 2, -r.l / 2, r.l / 2, r.l / 2, -r.l / 2, -r.l / 2] | |
1416 y_corners = [0, 0, 0, 0, -r.h, -r.h, -r.h, -r.h] | |
1417 z_corners = [r.w / 2, -r.w / 2, -r.w / 2, r.w / 2, r.w / 2, -r.w / 2, -r.w / 2, r.w / 2] | |
1418 # rotate and translate 3d bounding box | |
1419 corners3d = dot(R, vstack([x_corners, y_corners, z_corners])) | |
1420 corners3d[0, :] = corners3d[0, :] + r.x#obj.t[0] | |
1421 corners3d[1, :] = corners3d[1, :] + r.y#obj.t[1] | |
1422 corners3d[2, :] = corners3d[2, :] + r.z#obj.t[2] | |
1423 # corners3d = transpose(corners3d) | |
1424 # box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) | |
1425 refCorners = transpose(dot(invR0, corners3d)) # 8x3 avoid double transpose np.transpose(pts_3d_rect))) in pts_3d_ref = self.project_rect_to_ref(pts_3d_rect) | |
1426 homRefCorners = cartesian2Homogeneous(refCorners) | |
1427 veloCorners = dot(homRefCorners, transCam2Velo) | |
1428 # self.project_ref_to_velo(pts_3d_ref) | |
1429 # boxes3d_world_single, flag_imu, flag_world = calib_function.project_velo_to_world(bboxes_3d_velo=boxes3d_velo_single, Tr_matrix=transfer_matrix) | |
1430 #Tr_velo_to_imu = transfer_matrix['Tr_velo_to_imu'] | |
1431 #Tr_imu_to_world = transfer_matrix['Tr_imu_to_world'] | |
1432 homVeloCorners = cartesian2Homogeneous(veloCorners) | |
1433 imuCorners = dot(kittiCalibration['Tr_velo_imu'], homVeloCorners.T).T # 8x3 | |
1434 | |
1435 homImuCorners = cartesian2Homogeneous(imuCorners) | |
1436 worldCorners = dot(Tr_imu_to_world, homImuCorners.T).T # 8x3 | |
1437 else: #LUMPI format | |
1438 anno_list_temp = {} | |
1439 anno_list_temp['x_2d'] = r.xmin | |
1440 anno_list_temp['y_2d'] = r.ymin | |
1441 anno_list_temp['l_3d'] = r.l | |
1442 anno_list_temp['w_3d'] = r.w | |
1443 anno_list_temp['h_3d'] = r.h | |
1444 anno_list_temp['x_3d'] = r.x | |
1445 anno_list_temp['y_3d'] = r.y | |
1446 anno_list_temp['z_3d'] = r.z | |
1447 anno_list_temp['heading_3d'] = r.ry | |
1448 worldCorners = compute3Dbox(anno_list_temp) | |
1449 # take first 4 lines of corners, x,y,_ # x0, y0, _ = boxes3d[0] | |
1450 xCoords = worldCorners[:4,0] | |
1451 yCoords = worldCorners[:4,1] | |
1452 t.addPositionXY(xCoords.mean(), yCoords.mean()) | |
1453 for i in range(4): | |
1454 featureTrajectories[i].addPositionXY(xCoords[i], yCoords[i]) | |
1455 # check https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c | |
1456 if interval.length()>1: | |
1457 objects.append(moving.MovingObject(num = objNum, timeInterval = interval, positions = t, velocities = t.differentiate(True), userType = userType, features = [moving.MovingObject(num = featureNum+i, timeInterval = copy(interval), positions = featureTrajectories[i], velocities = featureTrajectories[i].differentiate(True)) for i in range(4)])) | |
1458 featureNum += 4 | |
1459 | |
1460 # ego vehicle | |
1461 if oxts is not None: | |
1462 t = moving.Trajectory() | |
1463 featureTrajectories = [moving.Trajectory() for i in range(4)] | |
1464 interval = moving.TimeInterval(0, len(oxts)-1)#int(data.frame.min()), int(data.frame.max())) | |
1465 R = roty(pi/2) | |
1466 #for frame in interval: | |
1467 for _, Tr_imu_to_world in oxts: | |
1468 l = 4.5 # m | |
1469 w = 1.8 # m | |
1470 h = 0. | |
1368 # 3d bounding box corners | 1471 # 3d bounding box corners |
1369 x_corners = [r.l / 2, r.l / 2, -r.l / 2, -r.l / 2, r.l / 2, r.l / 2, -r.l / 2, -r.l / 2] | 1472 x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2] |
1370 y_corners = [0, 0, 0, 0, -r.h, -r.h, -r.h, -r.h] | 1473 y_corners = [0, 0, 0, 0, -h, -h, -h, -h] |
1371 z_corners = [r.w / 2, -r.w / 2, -r.w / 2, r.w / 2, r.w / 2, -r.w / 2, -r.w / 2, r.w / 2] | 1474 z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2] |
1372 # rotate and translate 3d bounding box | 1475 # rotate and translate 3d bounding box |
1373 corners3d = dot(R, vstack([x_corners, y_corners, z_corners])) | 1476 corners3d = dot(R, vstack([x_corners, y_corners, z_corners])) |
1374 corners3d[0, :] = corners3d[0, :] + r.x#obj.t[0] | 1477 corners3d[0, :] = corners3d[0, :] |
1375 corners3d[1, :] = corners3d[1, :] + r.y#obj.t[1] | 1478 corners3d[1, :] = corners3d[1, :] |
1376 corners3d[2, :] = corners3d[2, :] + r.z#obj.t[2] | 1479 corners3d[2, :] = corners3d[2, :] |
1377 # corners3d = transpose(corners3d) | |
1378 # box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) | |
1379 refCorners = transpose(dot(invR0, corners3d)) # 8x3 avoid double transpose np.transpose(pts_3d_rect))) in pts_3d_ref = self.project_rect_to_ref(pts_3d_rect) | 1480 refCorners = transpose(dot(invR0, corners3d)) # 8x3 avoid double transpose np.transpose(pts_3d_rect))) in pts_3d_ref = self.project_rect_to_ref(pts_3d_rect) |
1380 homRefCorners = cartesian2Homogeneous(refCorners) | 1481 homRefCorners = cartesian2Homogeneous(refCorners) |
1381 veloCorners = dot(homRefCorners, transCam2Velo) | 1482 veloCorners = dot(homRefCorners, transCam2Velo) |
1382 # self.project_ref_to_velo(pts_3d_ref) | |
1383 # boxes3d_world_single, flag_imu, flag_world = calib_function.project_velo_to_world(bboxes_3d_velo=boxes3d_velo_single, Tr_matrix=transfer_matrix) | |
1384 #Tr_velo_to_imu = transfer_matrix['Tr_velo_to_imu'] | |
1385 #Tr_imu_to_world = transfer_matrix['Tr_imu_to_world'] | |
1386 homVeloCorners = cartesian2Homogeneous(veloCorners) | 1483 homVeloCorners = cartesian2Homogeneous(veloCorners) |
1387 imuCorners = dot(kittiCalibration['Tr_velo_imu'], homVeloCorners.T).T # 8x3 | 1484 imuCorners = dot(kittiCalibration['Tr_velo_imu'], homVeloCorners.T).T # 8x3 |
1388 | 1485 |
1389 homImuCorners = cartesian2Homogeneous(imuCorners) | 1486 homImuCorners = cartesian2Homogeneous(imuCorners) |
1390 worldCorners = dot(Tr_imu_to_world, homImuCorners.T).T # 8x3 | 1487 worldCorners = dot(Tr_imu_to_world, homImuCorners.T).T # 8x3 |
1391 | 1488 |
1392 # take first 4 lines of corners, x,y,_ # x0, y0, _ = boxes3d[0] | 1489 # take first 4 lines of corners, x,y,_ # x0, y0, _ = boxes3d[0] |
1393 xCoords = worldCorners[:4,0] | 1490 xCoords = worldCorners[:4,0] |
1394 yCoords = worldCorners[:4,1] | 1491 yCoords = worldCorners[:4,1] |
1395 t.addPositionXY(xCoords.mean(), yCoords.mean()) | 1492 t.addPositionXY(xCoords.mean(), yCoords.mean()) |
1396 for i in range(4): | 1493 for i in range(4): |
1397 featureTrajectories[i].addPositionXY(xCoords[i], yCoords[i]) | 1494 featureTrajectories[i].addPositionXY(xCoords[i], yCoords[i]) |
1398 # check https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c | 1495 # check https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c |
1399 if interval.length()>1: | 1496 objects.append(moving.MovingObject(num = max([o.getNum() for o in objects])+1, timeInterval = interval, positions = t, velocities = t.differentiate(True), userType = 'Car', features = [moving.MovingObject(num = featureNum+i, timeInterval = copy(interval), positions = featureTrajectories[i], velocities = featureTrajectories[i].differentiate(True)) for i in range(4)])) |
1400 objects.append(moving.MovingObject(num = objNum, timeInterval = interval, positions = t, velocities = t.differentiate(True), userType = userType, features = [moving.MovingObject(num = featureNum+i, timeInterval = copy(interval), positions = featureTrajectories[i], velocities = featureTrajectories[i].differentiate(True)) for i in range(4)])) | |
1401 featureNum += 4 | |
1402 | |
1403 # ego vehicle | |
1404 t = moving.Trajectory() | |
1405 featureTrajectories = [moving.Trajectory() for i in range(4)] | |
1406 interval = moving.TimeInterval(0, len(oxts)-1)#int(data.frame.min()), int(data.frame.max())) | |
1407 R = roty(pi/2) | |
1408 #for frame in interval: | |
1409 for _, Tr_imu_to_world in oxts: | |
1410 l = 4.5 # m | |
1411 w = 1.8 # m | |
1412 h = 0. | |
1413 # 3d bounding box corners | |
1414 x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2] | |
1415 y_corners = [0, 0, 0, 0, -h, -h, -h, -h] | |
1416 z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2] | |
1417 # rotate and translate 3d bounding box | |
1418 corners3d = dot(R, vstack([x_corners, y_corners, z_corners])) | |
1419 corners3d[0, :] = corners3d[0, :] | |
1420 corners3d[1, :] = corners3d[1, :] | |
1421 corners3d[2, :] = corners3d[2, :] | |
1422 refCorners = transpose(dot(invR0, corners3d)) # 8x3 avoid double transpose np.transpose(pts_3d_rect))) in pts_3d_ref = self.project_rect_to_ref(pts_3d_rect) | |
1423 homRefCorners = cartesian2Homogeneous(refCorners) | |
1424 veloCorners = dot(homRefCorners, transCam2Velo) | |
1425 homVeloCorners = cartesian2Homogeneous(veloCorners) | |
1426 imuCorners = dot(kittiCalibration['Tr_velo_imu'], homVeloCorners.T).T # 8x3 | |
1427 | |
1428 homImuCorners = cartesian2Homogeneous(imuCorners) | |
1429 worldCorners = dot(Tr_imu_to_world, homImuCorners.T).T # 8x3 | |
1430 | |
1431 # take first 4 lines of corners, x,y,_ # x0, y0, _ = boxes3d[0] | |
1432 xCoords = worldCorners[:4,0] | |
1433 yCoords = worldCorners[:4,1] | |
1434 t.addPositionXY(xCoords.mean(), yCoords.mean()) | |
1435 for i in range(4): | |
1436 featureTrajectories[i].addPositionXY(xCoords[i], yCoords[i]) | |
1437 # check https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c | |
1438 objects.append(moving.MovingObject(num = max([o.getNum() for o in objects])+1, timeInterval = interval, positions = t, velocities = t.differentiate(True), userType = 'Car', features = [moving.MovingObject(num = featureNum+i, timeInterval = copy(interval), positions = featureTrajectories[i], velocities = featureTrajectories[i].differentiate(True)) for i in range(4)])) | |
1439 | 1497 |
1440 return objects | 1498 return objects |
1441 | 1499 |
1442 def loadTrajectoriesFromKITTI2D(filename, kittiCalibration = None, oxts = None): | 1500 def loadTrajectoriesFromKITTI2D(filename, kittiCalibration = None, oxts = None): |
1443 '''Loads x,y coordinate series | 1501 '''Loads x,y coordinate series |