diff scripts/dltrack.py @ 1245:371c718e57d7

interface updates
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Thu, 08 Feb 2024 16:10:54 -0500
parents 4cd8ace3552f
children 2397de73770d
line wrap: on
line diff
--- a/scripts/dltrack.py	Thu Feb 08 15:04:56 2024 -0500
+++ b/scripts/dltrack.py	Thu Feb 08 16:10:54 2024 -0500
@@ -13,8 +13,9 @@
 from trafficintelligence import cvutils, moving, storage, utils
 
 parser = argparse.ArgumentParser(description='The program tracks objects using the ultralytics models and trakcers.')
-parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file', required = True)
-parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', required = True)
+parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file')
+parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file (overrides the configuration file)')
+parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)')
 parser.add_argument('-m', dest = 'detectorFilename', help = 'name of the detection model file', required = True)
 parser.add_argument('-t', dest = 'trackerFilename', help = 'name of the tracker file', required = True)
 parser.add_argument('-o', dest = 'homographyFilename', help = 'filename of the homography matrix', default = 'homography.txt')
@@ -23,6 +24,7 @@
 parser.add_argument('--intrinsic', dest = 'intrinsicCameraMatrixFilename', help = 'name of the intrinsic camera file')
 parser.add_argument('--distortion-coefficients', dest = 'distortionCoefficients', help = 'distortion coefficients', nargs = '*', type = float)
 parser.add_argument('--display', dest = 'display', help = 'show the raw detection and tracking results', action = 'store_true')
+parser.add_argument('--no-image-coordinates', dest = 'notSavingImageCoordinates', help = 'not saving the raw detection and tracking results', action = 'store_true')
 parser.add_argument('-f', dest = 'firstFrameNum', help = 'number of first frame number to process', type = int, default = 0)
 parser.add_argument('-l', dest = 'lastFrameNum', help = 'number of last frame number to process', type = int, default = float('Inf'))
 parser.add_argument('--conf', dest = 'confindence', help = 'object confidence threshold for detection', type = float, default = 0.25)
@@ -30,9 +32,21 @@
 parser.add_argument('--cyclist-iou', dest = 'cyclistIou', help = 'IoU threshold to associate a bike and ped bounding box', type = float, default = 0.15)
 parser.add_argument('--cyclist-match-prop', dest = 'cyclistMatchingProportion', help = 'minimum proportion of time a bike exists and is associated with a pedestrian to be merged as cyclist', type = float, default = 0.3)
 parser.add_argument('--max-temp-overal', dest = 'maxTemporalOverlap', help = 'maximum proportion of time to merge 2 bikes associated with same pedestrian', type = float, default = 0.05)
+
 args = parser.parse_args()
+params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum = storage.processVideoArguments(args)
+
+if args.intrinsicCameraMatrixFilename is not None:
+    intrinsicCameraMatrix = loadtxt(args.intrinsicCameraMatrixFilename)
+if args.distortionCoefficients is not None:
+    distortionCoefficients = args.distortionCoefficients
+if args.firstFrameNum is not None:
+    firstFrameNum = args.firstFrameNum
+if args.lastFrameNum is not None:
+    lastFrameNum = args.lastFrameNum
 
 # TODO add option to refine position with mask for vehicles
+# TODO work with optical flow (farneback or RAFT) https://pytorch.org/vision/main/models/raft.html
 
 # use 2 x bytetrack track buffer to remove objects from existing ones
 
@@ -57,10 +71,9 @@
 if not success:
     print('Input {} could not be read. Exiting'.format(args.videoFilename))
     import sys; sys.exit()
+
 results = model.track(frame, tracker=args.trackerFilename, classes=list(moving.cocoTypeNames.keys()), persist=True, verbose=False)
-# create object with user type and list of 3 features (bottom ones and middle) + projection
 while capture.isOpened() and success and frameNum <= lastFrameNum:
-#for frameNum, result in enumerate(results):
     result = results[0]
     if frameNum %10 == 0:
         print(frameNum, len(result.boxes), 'objects')
@@ -103,8 +116,8 @@
 # add quality control: avoid U-turns
     
 # merge bikes and people
-twowheels = [num for num, obj in objects.items() if obj.getUserType() in (3,4)]
-pedestrians = [num for num, obj in objects.items() if obj.getUserType() == 2]
+twowheels = [num for num, obj in objects.items() if obj.getUserType() in (moving.userType2Num['motorcyclist'],moving.userType2Num['cyclist'])]
+pedestrians = [num for num, obj in objects.items() if obj.getUserType() == moving.userType2Num['pedestrian']]
 
 def mergeObjects(obj1, obj2):
     obj1.features = obj1.features+obj2.features
@@ -134,8 +147,8 @@
     nMatchedBikes = (costs[:,pedInd] < -args.cyclistMatchingProportion).sum()
     if nMatchedBikes == 0: # peds that have no bike matching: see if they have been classified as bikes sometimes
         userTypeStats = Counter(obj.userTypes)
-        if (4 in userTypeStats or (3 in userTypeStats and 4 in userTypeStats and userTypeStats[3]<=userTypeStats[4])) and userTypeStats[3]+userTypeStats[4] > args.bikeProportion*userTypeStats.total(): # 3 is motorcycle and 4 is cyclist (verif if not turning all motorbike into cyclists)
-            obj.setUserType(4)
+        if (moving.userType2Num['cyclist'] in userTypeStats or (moving.userType2Num['motorcyclist'] in userTypeStats and moving.userType2Num['cyclist'] in userTypeStats and userTypeStats[moving.userType2Num['motorcyclist']]<=userTypeStats[moving.userType2Num['cyclist']])) and userTypeStats[moving.userType2Num['motorcyclist']]+userTypeStats[moving.userType2Num['cyclist']] > args.bikeProportion*userTypeStats.total(): # verif if not turning all motorbike into cyclists
+            obj.setUserType(moving.userType2Num['cyclist'])
     elif nMatchedBikes > 1: # try to merge bikes first
         twIndices = np.nonzero(costs[:,pedInd] < -args.cyclistMatchingProportion)[0]
         # we have to compute temporal overlaps of all 2 wheels among themselves, then remove the ones with the most overlap (sum over column) one by one until there is little left