view scripts/classify-objects.py @ 947:053484e08947

found a more elegant solution, making a copy of the list to iterate
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Fri, 21 Jul 2017 11:31:42 -0400
parents e5970606066f
children 64259b9885bf
line wrap: on
line source

#! /usr/bin/env python

import cvutils, moving, ml, storage

import numpy as np
import sys, argparse
#from cv2.ml import SVM_RBF, SVM_C_SVC
import cv2
from scipy.stats import norm, lognorm

# TODO add mode detection live, add choice of kernel and svm type (to be saved in future classifier format)

parser = argparse.ArgumentParser(description='The program processes indicators for all pairs of road users in the scene')
parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file', required = True)
parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file (overrides the configuration file)')
parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)')
parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to classify', type = int, default = None)
parser.add_argument('--start-frame0', dest = 'startFrame0', help = 'starts with first frame for videos with index problem where frames cannot be reached', action = 'store_true')
parser.add_argument('--plot-speed-distributions', dest = 'plotSpeedDistribution', help = 'simply plots the distributions used for each user type', action = 'store_true')
parser.add_argument('--max-speed-distribution-plot', dest = 'maxSpeedDistributionPlot', help = 'if plotting the user distributions, the maximum speed to display (km/h)', type = float, default = 50.)

args = parser.parse_args()
params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum = storage.processVideoArguments(args)
classifierParams = storage.ClassifierParameters(params.classifierFilename)
classifierParams.convertToFrames(params.videoFrameRate, 3.6) # conversion from km/h to m/frame

if classifierParams.speedAggregationMethod == 'median':
    speedAggregationFunc = np.median
elif classifierParams.speedAggregationMethod == 'mean':
    speedAggregationFunc = np.mean
elif classifierParams.speedAggregationMethod == 'quantile':
    speedAggregationFunc = lambda speeds: np.percentile(speeds, args.speedAggregationQuantile)
else:
    print('Unknown speed aggregation method: {}. Exiting'.format(classifierParams.speedAggregationMethod))
    sys.exit()

pedBikeCarSVM = ml.SVM()
pedBikeCarSVM.load(classifierParams.pedBikeCarSVMFilename)
bikeCarSVM = ml.SVM()
bikeCarSVM.load(classifierParams.bikeCarSVMFilename)

# log logistic for ped and bik otherwise ((pedBeta/pedAlfa)*((sMean/pedAlfa)**(pedBeta-1)))/((1+(sMean/pedAlfa)**pedBeta)**2.)
carNorm = norm(classifierParams.meanVehicleSpeed, classifierParams.stdVehicleSpeed)
pedNorm = norm(classifierParams.meanPedestrianSpeed, classifierParams.stdPedestrianSpeed)
# numpy lognorm shape, loc, scale: shape for numpy is scale (std of the normal) and scale for numpy is exp(location) (loc=mean of the normal)
bicLogNorm = lognorm(classifierParams.scaleCyclistSpeed, loc = 0., scale = np.exp(classifierParams.locationCyclistSpeed))
speedProbabilities = {'car': lambda s: carNorm.pdf(s),
                      'pedestrian': lambda s: pedNorm.pdf(s), 
                      'bicycle': lambda s: bicLogNorm.pdf(s)}

if args.plotSpeedDistribution:
    import matplotlib.pyplot as plt
    plt.figure()
    for k in speedProbabilities:
        plt.plot(np.arange(0.1, args.maxSpeedDistributionPlot, 0.1), [speedProbabilities[k](s/(3.6*params.videoFrameRate)) for s in np.arange(0.1, args.maxSpeedDistributionPlot, 0.1)], label = k)
    maxProb = -1.
    for k in speedProbabilities:
        maxProb = max(maxProb, np.max([speedProbabilities[k](s/(3.6*params.videoFrameRate)) for s in np.arange(0.1, args.maxSpeedDistributionPlot, 0.1)]))
    plt.plot([classifierParams.minSpeedEquiprobable*3.6*params.videoFrameRate]*2, [0., maxProb], 'k-')
    plt.text(classifierParams.minSpeedEquiprobable*3.6*params.videoFrameRate, maxProb, 'threshold for equiprobable class')
    plt.xlabel('Speed (km/h)')
    plt.ylabel('Probability')
    plt.legend()
    plt.title('Probability Density Function')
    plt.show()
    sys.exit()

objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', args.nObjects, withFeatures = True)
timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects])
if args.startFrame0:
    timeInterval.first = 0

capture = cv2.VideoCapture(videoFilename)
width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))

#if undistort: # setup undistortion
#     [map1, map2] = cvutils.computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
#     height, width = map1.shape
#    newImgSize = (int(round(width*undistortedImageMultiplication)), int(round(height*undistortedImageMultiplication)))
#    newCameraMatrix = cv2.getDefaultNewCameraMatrix(intrinsicCameraMatrix, newImgSize, True)
#else:
#    newCameraMatrix = None

pastObjects = []
currentObjects = []
if capture.isOpened():
    ret = True
    frameNum = timeInterval.first
    if not args.startFrame0:
        capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
    lastFrameNum = timeInterval.last

    while ret and frameNum <= lastFrameNum:
        ret, img = capture.read()
        if ret:
            if frameNum%50 == 0:
                print('frame number: {}'.format(frameNum))
            #if undistort:
            #    img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)            
            for obj in objects[:]:
                if obj.getFirstInstant() <= frameNum: # if images are skipped
                    obj.initClassifyUserTypeHoGSVM(speedAggregationFunc, pedBikeCarSVM, bikeCarSVM, classifierParams.maxPedestrianSpeed, classifierParams.maxCyclistSpeed, classifierParams.nFramesIgnoreAtEnds, invHomography, intrinsicCameraMatrix, distortionCoefficients)
                    currentObjects.append(obj)
                    objects.remove(obj)

            for obj in currentObjects[:]:
                if obj.getLastInstant() <= frameNum:  # if images are skipped
                    obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = classifierParams.minSpeedEquiprobable, speedProbabilities = speedProbabilities, maxPercentUnknown = classifierParams.maxPercentUnknown)
                    pastObjects.append(obj)
                    currentObjects.remove(obj)
                else:
                    obj.classifyUserTypeHoGSVMAtInstant(img, frameNum, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels, classifierParams.hogRescaleSize, classifierParams.hogNOrientations, classifierParams.hogNPixelsPerCell, classifierParams.hogNCellsPerBlock, classifierParams.hogBlockNorm)
        frameNum += 1
    
    for obj in currentObjects:
        obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = classifierParams.minSpeedEquiprobable, speedProbabilities = speedProbabilities, maxPercentUnknown = classifierParams.maxPercentUnknown)
        pastObjects.append(obj)
    print('Saving user types')
    storage.setRoadUserTypes(databaseFilename, pastObjects)