comparison scripts/classify-objects.py @ 1241:ab4c72b9475c

work in progress
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Mon, 05 Feb 2024 17:06:01 -0500
parents bb14f919d1cb
children 4cd8ace3552f
comparison
equal deleted inserted replaced
1240:bb14f919d1cb 1241:ab4c72b9475c
3 import sys, argparse 3 import sys, argparse
4 4
5 import numpy as np 5 import numpy as np
6 import cv2 6 import cv2
7 from scipy.stats import norm, lognorm 7 from scipy.stats import norm, lognorm
8 from pathlib import Path
9
10 try:
11 from ultralytics import YOLO
12 ultralyticsAvailable = True
13 except ImportError:
14 #print('OpenCV library could not be loaded (video replay functions will not be available)') # TODO change to logging module
15 ultralyticsAvailable = False
16
8 17
9 from trafficintelligence import cvutils, moving, ml, storage, utils 18 from trafficintelligence import cvutils, moving, ml, storage, utils
10 19
11 # TODO add mode detection live, add choice of kernel and svm type (to be saved in future classifier format) 20 # TODO add mode detection live, add choice of kernel and svm type (to be saved in future classifier format)
12 21
27 36
28 speedAggregationFunc = utils.aggregationFunction(classifierParams.speedAggregationMethod, classifierParams.speedAggregationCentile) 37 speedAggregationFunc = utils.aggregationFunction(classifierParams.speedAggregationMethod, classifierParams.speedAggregationCentile)
29 if speedAggregationFunc is None: 38 if speedAggregationFunc is None:
30 sys.exit() 39 sys.exit()
31 40
32 pedBikeCarSVM = ml.SVM_load(classifierParams.pedBikeCarSVMFilename) 41 if ultralyticsAvailable and Path(classifierParams.dlFilename).is_file(): # use Yolo
33 bikeCarSVM = ml.SVM_load(classifierParams.bikeCarSVMFilename) 42 pedBikeCarSVM = None
43 bikeCarSVM = None
44 yolo = YOLO(classifierParams.dlFilename, task='detect')
45 useYolo = True
46 else:
47 useYolo = False
48 pedBikeCarSVM = ml.SVM_load(classifierParams.pedBikeCarSVMFilename)
49 bikeCarSVM = ml.SVM_load(classifierParams.bikeCarSVMFilename)
34 50
35 # log logistic for ped and bik otherwise ((pedBeta/pedAlfa)*((sMean/pedAlfa)**(pedBeta-1)))/((1+(sMean/pedAlfa)**pedBeta)**2.) 51 # log logistic for ped and bik otherwise ((pedBeta/pedAlfa)*((sMean/pedAlfa)**(pedBeta-1)))/((1+(sMean/pedAlfa)**pedBeta)**2.)
36 carNorm = norm(classifierParams.meanVehicleSpeed, classifierParams.stdVehicleSpeed) 52 carNorm = norm(classifierParams.meanVehicleSpeed, classifierParams.stdVehicleSpeed)
37 pedNorm = norm(classifierParams.meanPedestrianSpeed, classifierParams.stdPedestrianSpeed) 53 pedNorm = norm(classifierParams.meanPedestrianSpeed, classifierParams.stdPedestrianSpeed)
38 # numpy lognorm shape, loc, scale: shape for numpy is scale (std of the normal) and scale for numpy is exp(location) (loc=mean of the normal) 54 # numpy lognorm shape, loc, scale: shape for numpy is scale (std of the normal) and scale for numpy is exp(location) (loc=mean of the normal)
59 plt.show() 75 plt.show()
60 sys.exit() 76 sys.exit()
61 77
62 objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', args.nObjects, withFeatures = True) 78 objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', args.nObjects, withFeatures = True)
63 timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects]) 79 timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects])
64 if args.startFrame0:
65 timeInterval.first = 0
66 80
67 capture = cv2.VideoCapture(videoFilename) 81 capture = cv2.VideoCapture(videoFilename)
68 width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) 82 width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
69 height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) 83 height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
70 84
79 pastObjects = [] 93 pastObjects = []
80 currentObjects = [] 94 currentObjects = []
81 if capture.isOpened(): 95 if capture.isOpened():
82 ret = True 96 ret = True
83 frameNum = timeInterval.first 97 frameNum = timeInterval.first
84 if not args.startFrame0: 98 capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
85 capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
86 lastFrameNum = timeInterval.last 99 lastFrameNum = timeInterval.last
87 100
88 while ret and frameNum <= lastFrameNum: 101 while ret and frameNum <= lastFrameNum:
89 ret, img = capture.read() 102 ret, img = capture.read()
90 if ret: 103 if ret:
91 if frameNum%50 == 0: 104 if frameNum%50 == 0:
92 print('frame number: {}'.format(frameNum)) 105 print('frame number: {}'.format(frameNum))
93 #if undistort: 106 #if undistort:
94 # img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR) 107 # img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
108 if useYolo:
109 results = yolo.predict(img, classes=list(moving.cocoTypeNames.keys()), verbose=False)
110
95 for obj in objects[:]: 111 for obj in objects[:]:
96 if obj.getFirstInstant() <= frameNum: # if images are skipped 112 if obj.getFirstInstant() <= frameNum: # if images are skipped
97 obj.initClassifyUserTypeHoGSVM(speedAggregationFunc, pedBikeCarSVM, bikeCarSVM, classifierParams.maxPedestrianSpeed, classifierParams.maxCyclistSpeed, classifierParams.nFramesIgnoreAtEnds, invHomography, intrinsicCameraMatrix, distortionCoefficients) 113 obj.initClassifyUserTypeHoGSVM(speedAggregationFunc, pedBikeCarSVM, bikeCarSVM, classifierParams.maxPedestrianSpeed, classifierParams.maxCyclistSpeed, classifierParams.nFramesIgnoreAtEnds, invHomography, intrinsicCameraMatrix, distortionCoefficients)
98 currentObjects.append(obj) 114 currentObjects.append(obj)
99 objects.remove(obj) 115 objects.remove(obj)
100 116
101 for obj in currentObjects[:]: 117 for obj in currentObjects[:]:
102 if obj.getLastInstant() <= frameNum: # if images are skipped 118 if obj.getLastInstant() <= frameNum:
103 obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = classifierParams.minSpeedEquiprobable, speedProbabilities = speedProbabilities, maxPercentUnknown = classifierParams.maxPercentUnknown) 119 obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = classifierParams.minSpeedEquiprobable, speedProbabilities = speedProbabilities, maxPercentUnknown = classifierParams.maxPercentUnknown)
104 pastObjects.append(obj) 120 pastObjects.append(obj)
105 currentObjects.remove(obj) 121 currentObjects.remove(obj)
106 else: 122 else:
107 obj.classifyUserTypeHoGSVMAtInstant(img, frameNum, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels, classifierParams.hogRescaleSize, classifierParams.hogNOrientations, classifierParams.hogNPixelsPerCell, classifierParams.hogNCellsPerBlock, classifierParams.hogBlockNorm) 123 if useYolo:
124 # if one feature falls in bike, it's a bike
125 # could one count all hits in various objects, or one takes majority at the instant?
126 # obj.classifyUserTypeYoloAtInstant(img, frameNum, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, results[0].boxes)
127 pass
128 else:
129 obj.classifyUserTypeHoGSVMAtInstant(img, frameNum, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels, classifierParams.hogRescaleSize, classifierParams.hogNOrientations, classifierParams.hogNPixelsPerCell, classifierParams.hogNCellsPerBlock, classifierParams.hogBlockNorm)
108 if args.verbose: 130 if args.verbose:
109 print('obj {}@{}: {}'.format(obj.getNum(), frameNum, moving.userTypeNames[obj.userTypes[frameNum]])) 131 print('obj {}@{}: {}'.format(obj.getNum(), frameNum, moving.userTypeNames[obj.userTypes[frameNum]]))
110 frameNum += 1 132 frameNum += 1
111 133
112 for obj in currentObjects: 134 for obj in currentObjects: