Mercurial Hosting > traffic-intelligence
diff scripts/dltrack.py @ 1233:d5695e0b59d9
saving results from ultralytics works
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Fri, 08 Sep 2023 17:09:12 -0400 |
parents | 6487ef10c0e0 |
children | dd969637381e |
line wrap: on
line diff
--- a/scripts/dltrack.py Thu Sep 07 16:20:28 2023 -0400 +++ b/scripts/dltrack.py Fri Sep 08 17:09:12 2023 -0400 @@ -1,16 +1,18 @@ #! /usr/bin/env python3 # from https://docs.ultralytics.com/modes/track/ import sys, argparse - -from trafficintelligence import cvutils, moving, storage +from copy import copy from ultralytics import YOLO import cv2 +from trafficintelligence import cvutils, moving, storage, utils + parser = argparse.ArgumentParser(description='The program tracks objects following the ultralytics yolo executable.')#, epilog = 'Either the configuration filename or the other parameters (at least video and database filenames) need to be provided.') parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)') # detect model # tracker model parser.add_argument('--display', dest = 'display', help = 'show the results (careful with long videos, risk of running out of memory)', action = 'store_true') +#parser.add_argument('-f', dest = 'firstFrameNum', help = 'show the results (careful with long videos, risk of running out of memory)', action = 'store_true') args = parser.parse_args() # required functionality? @@ -53,45 +55,67 @@ # use 2 x bytetrack track buffer to remove objects from existing ones + +# check if one can go to specific frame https://docs.ultralytics.com/modes/track/#persisting-tracks-loop + # Load a model model = YOLO('/home/nicolas/Research/Data/classification-models/yolov8x.pt') # seg yolov8x-seg.pt # seg could be used on cropped image... if can be loaded and kept in memory # model = YOLO('/home/nicolas/Research/Data/classification-models/yolo_nas_l.pt ') # AttributeError: 'YoloNAS_L' object has no attribute 'get' # Track with the model +#results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(moving.cocoTypeNames.keys()), show=True) # , save_txt=True if args.display: - results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(moving.cocoTypeNames.keys()), show=True) # , save_txt=True -else: windowName = 'frame' cv2.namedWindow(windowName, cv2.WINDOW_NORMAL) - results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(moving.cocoTypeNames.keys()), stream=True) - objects = [] - currentObjects = {} - featureNum = 0 - # create object with user type and list of 3 features (bottom ones and middle) + projection - for frameNum, result in enumerate(results): - print(frameNum, len(result.boxes)) - for box in result.boxes: +results = model.track(source=args.videoFilename, tracker="/home/nicolas/Research/Data/classification-models/bytetrack.yaml", classes=list(moving.cocoTypeNames.keys()), stream=True) +objects = [] +currentObjects = {} +featureNum = 0 +# create object with user type and list of 3 features (bottom ones and middle) + projection +for frameNum, result in enumerate(results): + print(frameNum, len(result.boxes)) + for box in result.boxes: + #print(box.cls, box.id, box.xyxy) + if box.id is not None: # None are objects with low confidence num = int(box.id) xyxy = box.xyxy[0].tolist() if num in currentObjects: currentObjects[num].timeInterval.last = frameNum - features = currentObjects[num].features - features[0].getPositions().addPositionXY(xyxy[0],xyxy[1]) - features[1].getPositions().addPositionXY(xyxy[2],xyxy[3]) + currentObjects[num].userTypes.append(moving.coco2Types[int(box.cls)]) + currentObjects[num].features[0].tmpPositions[frameNum] = moving.Point(xyxy[0],xyxy[1]) + currentObjects[num].features[1].tmpPositions[frameNum] = moving.Point(xyxy[2],xyxy[3]) + #features[0].getPositions().addPositionXY(xyxy[0],xyxy[1]) + #features[1].getPositions().addPositionXY(xyxy[2],xyxy[3]) else: - currentObjects[num] = moving.MovingObject(num, moving.TimeInterval(frameNum,frameNum), userType = moving.coco2Types[int(box.cls)]) - currentObjects[num].features = [moving.MovingObject(featureNum, moving.TimeInterval(frameNum, frameNum), moving.Trajectory([[xyxy[0]],[xyxy[1]]])), - moving.MovingObject(featureNum+1, moving.TimeInterval(frameNum, frameNum), moving.Trajectory([[xyxy[2]],[xyxy[3]]]))] + inter = moving.TimeInterval(frameNum,frameNum) + currentObjects[num] = moving.MovingObject(num, inter) + currentObjects[num].userTypes = [moving.coco2Types[int(box.cls)]] + currentObjects[num].features = [moving.MovingObject(featureNum), moving.MovingObject(featureNum+1)] currentObjects[num].featureNumbers = [featureNum, featureNum+1] + currentObjects[num].features[0].tmpPositions = {frameNum: moving.Point(xyxy[0],xyxy[1])} + currentObjects[num].features[1].tmpPositions = {frameNum: moving.Point(xyxy[2],xyxy[3])} featureNum += 2 - print(box.cls, box.xyxy) + if args.display: cvutils.cvImshow(windowName, result.plot()) # original image in orig_img key = cv2.waitKey() if cvutils.quitKey(key): break +# interpolate before saving +for num, obj in currentObjects.items(): + obj.setUserType(utils.mostCommon(obj.userTypes)) + obj.features[0].timeInterval = copy(obj.getTimeInterval()) + obj.features[1].timeInterval = copy(obj.getTimeInterval()) + if obj.length() != len(obj.features[0].tmpPositions): # interpolate + obj.features[0].positions = moving.Trajectory.fromPointDict(obj.features[0].tmpPositions) + obj.features[1].positions = moving.Trajectory.fromPointDict(obj.features[1].tmpPositions) + else: + obj.features[0].positions = moving.Trajectory.fromPointList(list(obj.features[0].tmpPositions.values())) + obj.features[1].positions = moving.Trajectory.fromPointList(list(obj.features[1].tmpPositions.values())) + storage.saveTrajectoriesToSqlite('test.sqlite', list(currentObjects.values()), 'object') # todo save bbox and mask to study localization / representation +# apply quality checks deviation and acceleration bounds?