comparison scripts/dltrack.py @ 1231:6487ef10c0e0

work in progress
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Thu, 24 Aug 2023 17:06:16 -0400
parents c582b272108f
children d5695e0b59d9
comparison
equal deleted inserted replaced
1230:c582b272108f 1231:6487ef10c0e0
1 #! /usr/bin/env python3 1 #! /usr/bin/env python3
2 # from https://docs.ultralytics.com/modes/track/ 2 # from https://docs.ultralytics.com/modes/track/
3 import sys, argparse 3 import sys, argparse
4 4
5 from trafficintelligence.moving import cocoTypeNames 5 from trafficintelligence import cvutils, moving, storage
6 from ultralytics import YOLO 6 from ultralytics import YOLO
7 import cv2
7 8
8 parser = argparse.ArgumentParser(description='The program tracks objects following the ultralytics yolo executable.')#, epilog = 'Either the configuration filename or the other parameters (at least video and database filenames) need to be provided.') 9 parser = argparse.ArgumentParser(description='The program tracks objects following the ultralytics yolo executable.')#, epilog = 'Either the configuration filename or the other parameters (at least video and database filenames) need to be provided.')
9 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)') 10 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)')
10 # detect model 11 # detect model
11 # tracker model 12 # tracker model
48 # # number of frame to process: 0 means processing all frames 49 # # number of frame to process: 0 means processing all frames
49 # nframes = 0 50 # nframes = 0
50 51
51 # TODO add option to refine position with mask for vehicles 52 # TODO add option to refine position with mask for vehicles
52 53
54 # use 2 x bytetrack track buffer to remove objects from existing ones
55
53 # Load a model 56 # Load a model
54 model = YOLO('/home/nicolas/Research/Data/classification-models/yolov8x.pt') # seg yolov8x-seg.pt 57 model = YOLO('/home/nicolas/Research/Data/classification-models/yolov8x.pt') # seg yolov8x-seg.pt
55 # seg could be used on cropped image... if can be loaded and kept in memory 58 # seg could be used on cropped image... if can be loaded and kept in memory
56 # model = YOLO('/home/nicolas/Research/Data/classification-models/yolo_nas_l.pt ') # AttributeError: 'YoloNAS_L' object has no attribute 'get' 59 # model = YOLO('/home/nicolas/Research/Data/classification-models/yolo_nas_l.pt ') # AttributeError: 'YoloNAS_L' object has no attribute 'get'
57 60
58 # Track with the model 61 # Track with the model
59 if args.display: 62 if args.display:
60 results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(cocoTypeNames.keys()), show=True) # , save_txt=True 63 results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(moving.cocoTypeNames.keys()), show=True) # , save_txt=True
61 else: 64 else:
62 results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(cocoTypeNames.keys()), stream=True) 65 windowName = 'frame'
63 for result in results: 66 cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
64 print(len(result.boxes)) 67
68 results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(moving.cocoTypeNames.keys()), stream=True)
69 objects = []
70 currentObjects = {}
71 featureNum = 0
72 # create object with user type and list of 3 features (bottom ones and middle) + projection
73 for frameNum, result in enumerate(results):
74 print(frameNum, len(result.boxes))
65 for box in result.boxes: 75 for box in result.boxes:
66 print(box.xyxy) 76 num = int(box.id)
77 xyxy = box.xyxy[0].tolist()
78 if num in currentObjects:
79 currentObjects[num].timeInterval.last = frameNum
80 features = currentObjects[num].features
81 features[0].getPositions().addPositionXY(xyxy[0],xyxy[1])
82 features[1].getPositions().addPositionXY(xyxy[2],xyxy[3])
83 else:
84 currentObjects[num] = moving.MovingObject(num, moving.TimeInterval(frameNum,frameNum), userType = moving.coco2Types[int(box.cls)])
85 currentObjects[num].features = [moving.MovingObject(featureNum, moving.TimeInterval(frameNum, frameNum), moving.Trajectory([[xyxy[0]],[xyxy[1]]])),
86 moving.MovingObject(featureNum+1, moving.TimeInterval(frameNum, frameNum), moving.Trajectory([[xyxy[2]],[xyxy[3]]]))]
87 currentObjects[num].featureNumbers = [featureNum, featureNum+1]
88 featureNum += 2
89 print(box.cls, box.xyxy)
90 cvutils.cvImshow(windowName, result.plot()) # original image in orig_img
91 key = cv2.waitKey()
92 if cvutils.quitKey(key):
93 break
94
95 storage.saveTrajectoriesToSqlite('test.sqlite', list(currentObjects.values()), 'object')
96
97 # todo save bbox and mask to study localization / representation