Mercurial Hosting > traffic-intelligence
changeset 1203:7b3384a8e409
second version of code loading kitti data, to clean
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Wed, 22 Mar 2023 15:40:33 -0400 |
parents | 059b7282aa09 |
children | a12d126346ff |
files | trafficintelligence/moving.py trafficintelligence/storage.py |
diffstat | 2 files changed, 33 insertions(+), 18 deletions(-) [+] |
line wrap: on
line diff
--- a/trafficintelligence/moving.py Thu Mar 16 17:03:18 2023 -0400 +++ b/trafficintelligence/moving.py Wed Mar 22 15:40:33 2023 -0400 @@ -357,8 +357,13 @@ return (p1-p2).norm2() @staticmethod - def plotAll(points, options = '', **kwargs): - plot([p.x for p in points], [p.y for p in points], options, **kwargs) + def plotAll(points, options = '', closePolygon = False, **kwargs): + xCoords = [p.x for p in points] + yCoords = [p.y for p in points] + if closePolygon: + xCoords.append[0] + yCoords.append[0] + plot(xCoords, yCoords, options, **kwargs) def similarOrientation(self, refDirection, cosineThreshold): 'Indicates whether the cosine of the vector and refDirection is smaller than cosineThreshold' @@ -1253,7 +1258,7 @@ and a usertype (e.g. road user) coded as a number (see userTypeNames) ''' - def __init__(self, num = None, timeInterval = None, positions = None, velocities = None, geometry = None, userType = userType2Num['unknown'], nObjects = None, initCurvilinear = False): + def __init__(self, num = None, timeInterval = None, positions = None, velocities = None, geometry = None, userType = userType2Num['unknown'], nObjects = None, features = None, initCurvilinear = False): super(MovingObject, self).__init__(num, timeInterval) if initCurvilinear: self.curvilinearPositions = positions @@ -1264,7 +1269,7 @@ self.geometry = geometry self.userType = userType self.setNObjects(nObjects) # a feature has None for nObjects - self.features = None + self.features = features # compute bounding polygon from trajectory @staticmethod @@ -1651,6 +1656,11 @@ else: self.positions.plotOnWorldImage(nPixelsPerUnitDistance, options, withOrigin, timeStep, None, **kwargs) + def plotOutlineAtInstant(self, t, options = '', **kwargs): + if self.hasFeatures(): + points = [f.getPositionAtInstant(t) for f in self.getFeatures()] + Point.plotAll(points, options, True, kwargs) + def play(self, videoFilename, homography = None, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1.): cvutils.displayTrajectories(videoFilename, [self], homography = homography, firstFrameNum = self.getFirstInstant(), lastFrameNumArg = self.getLastInstant(), undistort = undistort, intrinsicCameraMatrix = intrinsicCameraMatrix, distortionCoefficients = distortionCoefficients, undistortedImageMultiplication = undistortedImageMultiplication)
--- a/trafficintelligence/storage.py Thu Mar 16 17:03:18 2023 -0400 +++ b/trafficintelligence/storage.py Wed Mar 22 15:40:33 2023 -0400 @@ -1351,18 +1351,20 @@ data = data[data.trackingid > -1] objects = [] + featureNum = 0 for objNum in data.trackingid.unique(): tmp = data[data.trackingid == objNum].sort_values('frame') t = moving.Trajectory()#([[float(numbers[6])],[float(numbers[7])]]) + featureTrajectories = [moving.Trajectory() for i in range(4)] for i, r in tmp.iterrows(): _, Tr_imu2w = oxts[r.frame] - transfer_matrix = {'P2': reshape(kittiCalibration['P2'], (3, 4)), - 'R_rect': kittiCalibration['R_rect'], - 'Tr_cam_to_velo': kittiCalibration['Tr_cam_velo'], - # transfer_matrix['Tr_velo_to_cam'] = kittiCalibration['Tr_velo_to_cam'] - 'Tr_velo_to_imu': kittiCalibration['Tr_velo_imu'], - # transfer_matrix['Tr_imu_to_velo'] = kittiCalibration['Tr_imu_to_velo'] - 'Tr_imu_to_world': Tr_imu2w} + # transfer_matrix = {'P2': reshape(kittiCalibration['P2'], (3, 4)), + # 'R_rect': kittiCalibration['R_rect'], + # 'Tr_cam_to_velo': kittiCalibration['Tr_cam_velo'], + # # transfer_matrix['Tr_velo_to_cam'] = kittiCalibration['Tr_velo_to_cam'] + # 'Tr_velo_to_imu': kittiCalibration['Tr_velo_imu'], + # # transfer_matrix['Tr_imu_to_velo'] = kittiCalibration['Tr_imu_to_velo'] + # 'Tr_imu_to_world': Tr_imu2w} # 3D object # compute rotational matrix around yaw axis R = roty(r.ry) @@ -1380,24 +1382,27 @@ # box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d) veloCorners = transpose(dot(invR0, corners3d)) # 8x3 avoid double transpose np.transpose(pts_3d_rect))) # boxes3d_world_single, flag_imu, flag_world = calib_function.project_velo_to_world(bboxes_3d_velo=boxes3d_velo_single, Tr_matrix=transfer_matrix) - Tr_velo_to_imu = transfer_matrix['Tr_velo_to_imu'] - Tr_imu_to_world = transfer_matrix['Tr_imu_to_world'] + #Tr_velo_to_imu = transfer_matrix['Tr_velo_to_imu'] + #Tr_imu_to_world = transfer_matrix['Tr_imu_to_world'] homVeloCorners = ones((veloCorners.shape[0], veloCorners.shape[1]+1)) homVeloCorners[:,:-1] = veloCorners # 8x4 with ones in last column - imuCorners = dot(Tr_velo_to_imu, homVeloCorners.T).T # 8x3 + imuCorners = dot(kittiCalibration['Tr_velo_imu'], homVeloCorners.T).T # 8x3 homImuCorners = ones((imuCorners.shape[0], imuCorners.shape[1]+1)) homImuCorners[:,:-1] = imuCorners # 8x4 with ones in last column - worldCorners = dot(Tr_imu_to_world, homImuCorners.T).T # 8x3 + worldCorners = dot(Tr_imu2w, homImuCorners.T).T # 8x3 # take first 4 lines of corners, x,y,_ # x0, y0, _ = boxes3d[0] xCoords = worldCorners[:4,0] yCoords = worldCorners[:4,1] t.addPositionXY(xCoords.mean(), yCoords.mean()) + for i in range(4): + featureTrajectories[i].addPositionXY(xCoords[i], yCoords[i]) # check https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c - objects.append(moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(tmp.frame.min(), tmp.frame.max()), - positions = t, - userType = tmp.iloc[0].type)) + newObj = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(tmp.frame.min(), tmp.frame.max()), positions = t, userType = tmp.iloc[0].type, features = [moving.MovingObject(num = featureNum+i, timeInterval = moving.TimeInterval(tmp.frame.min(), tmp.frame.max()), positions = featureTrajectories[i]) for i in range(4)]) + #newObj.featureNumbers = [f.getNum() for f in newObj.getFeatures()] + objects.append(newObj) + featureNum += 4 return objects def convertNgsimFile(inputfile, outputfile, append = False, nObjects = -1, sequenceNum = 0):