comparison scripts/dltrack.py @ 1233:d5695e0b59d9

saving results from ultralytics works
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Fri, 08 Sep 2023 17:09:12 -0400
parents 6487ef10c0e0
children dd969637381e
comparison
equal deleted inserted replaced
1232:83ca1493d55c 1233:d5695e0b59d9
1 #! /usr/bin/env python3 1 #! /usr/bin/env python3
2 # from https://docs.ultralytics.com/modes/track/ 2 # from https://docs.ultralytics.com/modes/track/
3 import sys, argparse 3 import sys, argparse
4 4 from copy import copy
5 from trafficintelligence import cvutils, moving, storage
6 from ultralytics import YOLO 5 from ultralytics import YOLO
7 import cv2 6 import cv2
7
8 from trafficintelligence import cvutils, moving, storage, utils
8 9
9 parser = argparse.ArgumentParser(description='The program tracks objects following the ultralytics yolo executable.')#, epilog = 'Either the configuration filename or the other parameters (at least video and database filenames) need to be provided.') 10 parser = argparse.ArgumentParser(description='The program tracks objects following the ultralytics yolo executable.')#, epilog = 'Either the configuration filename or the other parameters (at least video and database filenames) need to be provided.')
10 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)') 11 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)')
11 # detect model 12 # detect model
12 # tracker model 13 # tracker model
13 parser.add_argument('--display', dest = 'display', help = 'show the results (careful with long videos, risk of running out of memory)', action = 'store_true') 14 parser.add_argument('--display', dest = 'display', help = 'show the results (careful with long videos, risk of running out of memory)', action = 'store_true')
15 #parser.add_argument('-f', dest = 'firstFrameNum', help = 'show the results (careful with long videos, risk of running out of memory)', action = 'store_true')
14 args = parser.parse_args() 16 args = parser.parse_args()
15 17
16 # required functionality? 18 # required functionality?
17 # # filename of the video to process (can be images, eg image%04d.png) 19 # # filename of the video to process (can be images, eg image%04d.png)
18 # video-filename = laurier.avi 20 # video-filename = laurier.avi
51 53
52 # TODO add option to refine position with mask for vehicles 54 # TODO add option to refine position with mask for vehicles
53 55
54 # use 2 x bytetrack track buffer to remove objects from existing ones 56 # use 2 x bytetrack track buffer to remove objects from existing ones
55 57
58
59 # check if one can go to specific frame https://docs.ultralytics.com/modes/track/#persisting-tracks-loop
60
56 # Load a model 61 # Load a model
57 model = YOLO('/home/nicolas/Research/Data/classification-models/yolov8x.pt') # seg yolov8x-seg.pt 62 model = YOLO('/home/nicolas/Research/Data/classification-models/yolov8x.pt') # seg yolov8x-seg.pt
58 # seg could be used on cropped image... if can be loaded and kept in memory 63 # seg could be used on cropped image... if can be loaded and kept in memory
59 # model = YOLO('/home/nicolas/Research/Data/classification-models/yolo_nas_l.pt ') # AttributeError: 'YoloNAS_L' object has no attribute 'get' 64 # model = YOLO('/home/nicolas/Research/Data/classification-models/yolo_nas_l.pt ') # AttributeError: 'YoloNAS_L' object has no attribute 'get'
60 65
61 # Track with the model 66 # Track with the model
67 #results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(moving.cocoTypeNames.keys()), show=True) # , save_txt=True
62 if args.display: 68 if args.display:
63 results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(moving.cocoTypeNames.keys()), show=True) # , save_txt=True
64 else:
65 windowName = 'frame' 69 windowName = 'frame'
66 cv2.namedWindow(windowName, cv2.WINDOW_NORMAL) 70 cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
67 71
68 results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(moving.cocoTypeNames.keys()), stream=True) 72 results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(moving.cocoTypeNames.keys()), stream=True)
69 objects = [] 73 objects = []
70 currentObjects = {} 74 currentObjects = {}
71 featureNum = 0 75 featureNum = 0
72 # create object with user type and list of 3 features (bottom ones and middle) + projection 76 # create object with user type and list of 3 features (bottom ones and middle) + projection
73 for frameNum, result in enumerate(results): 77 for frameNum, result in enumerate(results):
74 print(frameNum, len(result.boxes)) 78 print(frameNum, len(result.boxes))
75 for box in result.boxes: 79 for box in result.boxes:
80 #print(box.cls, box.id, box.xyxy)
81 if box.id is not None: # None are objects with low confidence
76 num = int(box.id) 82 num = int(box.id)
77 xyxy = box.xyxy[0].tolist() 83 xyxy = box.xyxy[0].tolist()
78 if num in currentObjects: 84 if num in currentObjects:
79 currentObjects[num].timeInterval.last = frameNum 85 currentObjects[num].timeInterval.last = frameNum
80 features = currentObjects[num].features 86 currentObjects[num].userTypes.append(moving.coco2Types[int(box.cls)])
81 features[0].getPositions().addPositionXY(xyxy[0],xyxy[1]) 87 currentObjects[num].features[0].tmpPositions[frameNum] = moving.Point(xyxy[0],xyxy[1])
82 features[1].getPositions().addPositionXY(xyxy[2],xyxy[3]) 88 currentObjects[num].features[1].tmpPositions[frameNum] = moving.Point(xyxy[2],xyxy[3])
89 #features[0].getPositions().addPositionXY(xyxy[0],xyxy[1])
90 #features[1].getPositions().addPositionXY(xyxy[2],xyxy[3])
83 else: 91 else:
84 currentObjects[num] = moving.MovingObject(num, moving.TimeInterval(frameNum,frameNum), userType = moving.coco2Types[int(box.cls)]) 92 inter = moving.TimeInterval(frameNum,frameNum)
85 currentObjects[num].features = [moving.MovingObject(featureNum, moving.TimeInterval(frameNum, frameNum), moving.Trajectory([[xyxy[0]],[xyxy[1]]])), 93 currentObjects[num] = moving.MovingObject(num, inter)
86 moving.MovingObject(featureNum+1, moving.TimeInterval(frameNum, frameNum), moving.Trajectory([[xyxy[2]],[xyxy[3]]]))] 94 currentObjects[num].userTypes = [moving.coco2Types[int(box.cls)]]
95 currentObjects[num].features = [moving.MovingObject(featureNum), moving.MovingObject(featureNum+1)]
87 currentObjects[num].featureNumbers = [featureNum, featureNum+1] 96 currentObjects[num].featureNumbers = [featureNum, featureNum+1]
97 currentObjects[num].features[0].tmpPositions = {frameNum: moving.Point(xyxy[0],xyxy[1])}
98 currentObjects[num].features[1].tmpPositions = {frameNum: moving.Point(xyxy[2],xyxy[3])}
88 featureNum += 2 99 featureNum += 2
89 print(box.cls, box.xyxy) 100 if args.display:
90 cvutils.cvImshow(windowName, result.plot()) # original image in orig_img 101 cvutils.cvImshow(windowName, result.plot()) # original image in orig_img
91 key = cv2.waitKey() 102 key = cv2.waitKey()
92 if cvutils.quitKey(key): 103 if cvutils.quitKey(key):
93 break 104 break
94 105
106 # interpolate before saving
107 for num, obj in currentObjects.items():
108 obj.setUserType(utils.mostCommon(obj.userTypes))
109 obj.features[0].timeInterval = copy(obj.getTimeInterval())
110 obj.features[1].timeInterval = copy(obj.getTimeInterval())
111 if obj.length() != len(obj.features[0].tmpPositions): # interpolate
112 obj.features[0].positions = moving.Trajectory.fromPointDict(obj.features[0].tmpPositions)
113 obj.features[1].positions = moving.Trajectory.fromPointDict(obj.features[1].tmpPositions)
114 else:
115 obj.features[0].positions = moving.Trajectory.fromPointList(list(obj.features[0].tmpPositions.values()))
116 obj.features[1].positions = moving.Trajectory.fromPointList(list(obj.features[1].tmpPositions.values()))
117
95 storage.saveTrajectoriesToSqlite('test.sqlite', list(currentObjects.values()), 'object') 118 storage.saveTrajectoriesToSqlite('test.sqlite', list(currentObjects.values()), 'object')
96 119
97 # todo save bbox and mask to study localization / representation 120 # todo save bbox and mask to study localization / representation
121 # apply quality checks deviation and acceleration bounds?