changeset 1231:6487ef10c0e0

work in progress
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Thu, 24 Aug 2023 17:06:16 -0400
parents c582b272108f
children 83ca1493d55c
files scripts/dltrack.py
diffstat 1 files changed, 37 insertions(+), 6 deletions(-) [+]
line wrap: on
line diff
--- a/scripts/dltrack.py	Mon Aug 21 15:49:32 2023 -0400
+++ b/scripts/dltrack.py	Thu Aug 24 17:06:16 2023 -0400
@@ -2,8 +2,9 @@
 # from https://docs.ultralytics.com/modes/track/
 import sys, argparse
 
-from trafficintelligence.moving import cocoTypeNames
+from trafficintelligence import cvutils, moving, storage
 from ultralytics import YOLO
+import cv2
 
 parser = argparse.ArgumentParser(description='The program tracks objects following the ultralytics yolo executable.')#, epilog = 'Either the configuration filename or the other parameters (at least video and database filenames) need to be provided.')
 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)')
@@ -50,6 +51,8 @@
 
 # TODO add option to refine position with mask for vehicles
 
+# use 2 x bytetrack track buffer to remove objects from existing ones
+
 # Load a model
 model = YOLO('/home/nicolas/Research/Data/classification-models/yolov8x.pt') # seg yolov8x-seg.pt
 # seg could be used on cropped image... if can be loaded and kept in memory
@@ -57,10 +60,38 @@
 
 # Track with the model
 if args.display:
-    results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(cocoTypeNames.keys()), show=True) # , save_txt=True 
+    results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(moving.cocoTypeNames.keys()), show=True) # , save_txt=True 
 else:
-    results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(cocoTypeNames.keys()), stream=True)
-    for result in results:
-        print(len(result.boxes))
+    windowName = 'frame'
+    cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
+    
+    results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(moving.cocoTypeNames.keys()), stream=True)
+    objects = []
+    currentObjects = {}
+    featureNum = 0
+    # create object with user type and list of 3 features (bottom ones and middle) + projection
+    for frameNum, result in enumerate(results):
+        print(frameNum, len(result.boxes))
         for box in result.boxes:
-            print(box.xyxy)
+            num = int(box.id)
+            xyxy = box.xyxy[0].tolist()
+            if num in currentObjects:
+                currentObjects[num].timeInterval.last = frameNum
+                features = currentObjects[num].features
+                features[0].getPositions().addPositionXY(xyxy[0],xyxy[1])
+                features[1].getPositions().addPositionXY(xyxy[2],xyxy[3])
+            else:
+                currentObjects[num] = moving.MovingObject(num, moving.TimeInterval(frameNum,frameNum), userType = moving.coco2Types[int(box.cls)])
+                currentObjects[num].features = [moving.MovingObject(featureNum, moving.TimeInterval(frameNum, frameNum), moving.Trajectory([[xyxy[0]],[xyxy[1]]])),
+                                                moving.MovingObject(featureNum+1, moving.TimeInterval(frameNum, frameNum), moving.Trajectory([[xyxy[2]],[xyxy[3]]]))]
+                currentObjects[num].featureNumbers = [featureNum, featureNum+1]
+                featureNum += 2
+            print(box.cls, box.xyxy)
+        cvutils.cvImshow(windowName, result.plot()) # original image in orig_img
+        key = cv2.waitKey()
+        if cvutils.quitKey(key):
+            break
+
+storage.saveTrajectoriesToSqlite('test.sqlite', list(currentObjects.values()), 'object')
+
+# todo save bbox and mask to study localization / representation