view scripts/process.py @ 1064:cbc026dacf0b

changed interval string representation
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Sun, 15 Jul 2018 22:52:26 -0400
parents 3c37d8d20e97
children d4d052a05337
line wrap: on
line source

#! /usr/bin/env python3

import sys, argparse
from pathlib import Path
from multiprocessing.pool import Pool

#import matplotlib
#atplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

from trafficintelligence import storage, events, prediction, cvutils, utils, moving, processing, ml
from trafficintelligence.metadata import *

parser = argparse.ArgumentParser(description='This program manages the processing of several files based on a description of the sites and video data in an SQLite database following the metadata module.')
# input
parser.add_argument('--db', dest = 'metadataFilename', help = 'name of the metadata file', required = True)
parser.add_argument('--videos', dest = 'videoIds', help = 'indices of the video sequences', nargs = '*')
parser.add_argument('--sites', dest = 'siteIds', help = 'indices of the video sequences', nargs = '*')

# main function
parser.add_argument('--delete', dest = 'delete', help = 'data to delete', choices = ['feature', 'object', 'classification', 'interaction'])
parser.add_argument('--process', dest = 'process', help = 'data to process', choices = ['feature', 'object', 'classification', 'prototype', 'interaction'])
parser.add_argument('--display', dest = 'display', help = 'data to display (replay over video)', choices = ['feature', 'object', 'classification', 'interaction'])
parser.add_argument('--progress', dest = 'progress', help = 'information about the progress of processing', action = 'store_true')
parser.add_argument('--analyze', dest = 'analyze', help = 'data to analyze (results)', choices = ['feature', 'object', 'classification', 'interaction', 'event'])

# common options
parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file')
parser.add_argument('-n', dest = 'nObjects', help = 'number of objects/interactions to process', type = int)
parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories', choices = ['feature', 'object'], default = 'feature')
parser.add_argument('--dry', dest = 'dryRun', help = 'dry run of processing', action = 'store_true')
parser.add_argument('--nthreads', dest = 'nProcesses', help = 'number of processes to run in parallel', type = int, default = 1)
parser.add_argument('--subsample', dest = 'positionSubsamplingRate', help = 'rate of position subsampling (1 every n positions)', type = int)

### process options
# motion pattern learning and assignment
parser.add_argument('--prototype-filename', dest = 'outputPrototypeDatabaseFilename', help = 'name of the Sqlite database file to save prototypes', default = 'prototypes.sqlite')
#parser.add_argument('-i', dest = 'inputPrototypeDatabaseFilename', help = 'name of the Sqlite database file for prototypes to start the algorithm with')
parser.add_argument('--nobjects-mp', dest = 'nMPObjects', help = 'number of objects/interactions to process', type = int)
parser.add_argument('--nfeatures-per-object', dest = 'nLongestFeaturesPerObject', help = 'maximum number of features per object to load', type = int)
parser.add_argument('--epsilon', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float)
parser.add_argument('--metric', dest = 'metric', help = 'metric for the similarity of trajectory points', default = 'cityblock') # default is manhattan distance
parser.add_argument('--minsimil', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float)
parser.add_argument('--min-cluster-size', dest = 'minClusterSize', help = 'minimum cluster size', type = int, default = 0)
#parser.add_argument('--learn', dest = 'learn', help = 'learn', action = 'store_true')
parser.add_argument('--optimize', dest = 'optimizeCentroid', help = 'recompute centroid at each assignment', action = 'store_true')
parser.add_argument('--random', dest = 'randomInitialization', help = 'random initialization of clustering algorithm', action = 'store_true')
#parser.add_argument('--similarities-filename', dest = 'similaritiesFilename', help = 'filename of the similarities')
parser.add_argument('--save-similarities', dest = 'saveSimilarities', help = 'save computed similarities (in addition to prototypes)', action = 'store_true')
parser.add_argument('--save-assignments', dest = 'saveAssignments', help = 'saves the assignments of the objects to the prototypes', action = 'store_true')
parser.add_argument('--assign', dest = 'assign', help = 'assigns the objects to the prototypes and saves the assignments', action = 'store_true')

# safety analysis
parser.add_argument('--prediction-method', dest = 'predictionMethod', help = 'prediction method (constant velocity (cvd: vector computation (approximate); cve: equation solving; cv: discrete time (approximate)), normal adaptation, point set prediction)', choices = ['cvd', 'cve', 'cv', 'na', 'ps', 'mp'])
parser.add_argument('--pet', dest = 'computePET', help = 'computes PET', action = 'store_true')
# override other tracking config, erase sqlite?


# analysis options
parser.add_argument('--output', dest = 'output', help = 'kind of output to produce (interval means)', choices = ['figure', 'interval', 'event'])
parser.add_argument('--min-user-duration', dest = 'minUserDuration', help = 'mininum duration we have to see the user to take into account in the analysis (s)', type = float, default = 0.1)
parser.add_argument('--interval-duration', dest = 'intervalDuration', help = 'length of time interval to aggregate data (min)', type = int, default = 15)
parser.add_argument('--aggregation', dest = 'aggMethods', help = 'aggregation method per user/interaction and per interval', choices = ['mean', 'median', 'centile'], nargs = '*', default = ['median'])
parser.add_argument('--aggregation-centile', dest = 'aggCentiles', help = 'centile(s) to compute from the observations', nargs = '*', type = int)
parser.add_argument('--event-filename', dest = 'eventFilename', help = 'filename of the event data')
dpi = 150
# unit of analysis: site - camera-view

# need way of selecting sites as similar as possible to sql alchemy syntax
# override tracking.cfg from db
# manage cfg files, overwrite them (or a subset of parameters)
# delete sqlite files
# info of metadata

args = parser.parse_args()

#################################
# Data preparation
#################################
session = connectDatabase(args.metadataFilename)
parentPath = Path(args.metadataFilename).parent # files are relative to metadata location
videoSequences = []
sites = []
if args.videoIds is not None:
    for videoId in args.videoIds:
        if '-' in videoId:
            videoSequences.extend([session.query(VideoSequence).get(i) for i in moving.TimeInterval.parse(videoId)])
        else:
            videoSequences.append(session.query(VideoSequence).get(int(videoId)))
    sites = set([vs.cameraView.site for vs in videoSequences])
elif args.siteIds is not None:
    for siteId in args.siteIds:
        if '-' in siteId:
            sites.extend([session.query(Site).get(i) for i in moving.TimeInterval.parse(siteId)])
        else:
            sites.append(session.query(Site).get(int(siteId)))
    for site in sites:
        videoSequences.extend(getSiteVideoSequences(site))
else:
    print('No video/site to process')
sys.exit()
if args.nProcesses > 1:
    pool = Pool(args.nProcesses)

#################################
# Report progress in the processing
#################################
if args.progress:
    print('Providing information on data progress')

#################################
# Delete
#################################
if args.delete is not None:
    if args.delete == 'feature':
        response = input('Are you sure you want to delete the tracking results (SQLite files) of all these sites (y/n)?')
        if response == 'y':
            for vs in videoSequences:
                p = parentPath.absolute()/vs.getDatabaseFilename()
                p.unlink()
    elif args.delete in ['object', 'interaction']:
        #parser.add_argument('-t', dest = 'dataType', help = 'type of the data to remove', required = True, choices = ['object','interaction', 'bb', 'pois', 'prototype'])
        for vs in videoSequences:
            storage.deleteFromSqlite(str(parentPath/vs.getDatabaseFilename()), args.delete)

#################################
# Process
#################################
if args.process in ['feature', 'object']: # tracking
    if args.nProcesses == 1:
        for vs in videoSequences:
            if not (parentPath/vs.getDatabaseFilename()).exists() or args.process == 'object':
                if args.configFilename is None:
                    configFilename = str(parentPath/vs.cameraView.getTrackingConfigurationFilename())
                else:
                    configFilename = args.configFilename
                if vs.cameraView.cameraType is None:
                    cvutils.tracking(configFilename, args.process == 'object', str(parentPath.absolute()/vs.getVideoSequenceFilename()), str(parentPath.absolute()/vs.getDatabaseFilename()), str(parentPath.absolute()/vs.cameraView.getHomographyFilename()), str(parentPath.absolute()/vs.cameraView.getMaskFilename()), False, None, None, args.dryRun)
                else:
                    cvutils.tracking(configFilename, args.process == 'object', str(parentPath.absolute()/vs.getVideoSequenceFilename()), str(parentPath.absolute()/vs.getDatabaseFilename()), str(parentPath.absolute()/vs.cameraView.getHomographyFilename()), str(parentPath.absolute()/vs.cameraView.getMaskFilename()), True, vs.cameraView.cameraType.intrinsicCameraMatrix, vs.cameraView.cameraType.distortionCoefficients, args.dryRun)
            else:
                print('SQLite already exists: {}'.format(parentPath/vs.getDatabaseFilename()))
    else:
        for vs in videoSequences:
            if not (parentPath/vs.getDatabaseFilename()).exists() or args.process == 'object':
                if args.configFilename is None:
                    configFilename = str(parentPath/vs.cameraView.getTrackingConfigurationFilename())
                else:
                    configFilename = args.configFilename
                if vs.cameraView.cameraType is None:
                    pool.apply_async(cvutils.tracking, args = (configFilename, args.process == 'object', str(parentPath.absolute()/vs.getVideoSequenceFilename()), str(parentPath.absolute()/vs.getDatabaseFilename()), str(parentPath.absolute()/vs.cameraView.getHomographyFilename()), str(parentPath.absolute()/vs.cameraView.getMaskFilename()), False, None, None, args.dryRun))
                else:
                    pool.apply_async(cvutils.tracking, args = (configFilename, args.process == 'object', str(parentPath.absolute()/vs.getVideoSequenceFilename()), str(parentPath.absolute()/vs.getDatabaseFilename()), str(parentPath.absolute()/vs.cameraView.getHomographyFilename()), str(parentPath.absolute()/vs.cameraView.getMaskFilename()), True, vs.cameraView.cameraType.intrinsicCameraMatrix, vs.cameraView.cameraType.distortionCoefficients, args.dryRun))
            else:
                print('SQLite already exists: {}'.format(parentPath/vs.getDatabaseFilename()))
        pool.close()
        pool.join()

elif args.process == 'prototype': # motion pattern learning
    # learn by site by default -> group videos by camera view TODO
    # by default, load all objects, learn and then assign (BUT not save the assignments)
    for site in sites:
        print('Learning motion patterns for site {} ({})'.format(site.idx, site.name))
        objects = {}
        object2VideoSequences = {}
        for cv in site.cameraViews:
            for vs in cv.videoSequences:
                print('Loading '+vs.getDatabaseFilename())
                objects[vs.idx] = storage.loadTrajectoriesFromSqlite(str(parentPath/vs.getDatabaseFilename()), args.trajectoryType, args.nObjects, timeStep = args.positionSubsamplingRate, nLongestFeaturesPerObject = args.nLongestFeaturesPerObject)
                if args.trajectoryType == 'object' and args.nLongestFeaturesPerObject is not None:
                    objectsWithFeatures = objects[vs.idx]
                    objects[vs.idx] = [f for o in objectsWithFeatures for f in o.getFeatures()]
                    prototypeType = 'feature'
                else:
                    prototypeType = args.trajectoryType
                for obj in objects[vs.idx]:
                    object2VideoSequences[obj] = vs
        lcss = utils.LCSS(metric = args.metric, epsilon = args.epsilon)
        similarityFunc = lambda x,y : lcss.computeNormalized(x, y)
        trainingObjects = [o for tmpobjects in objects.values() for o in tmpobjects]
        if args.nMPObjects is not None and args.nMPObjects < len(trainingObjects):
            m = int(np.floor(float(len(trainingObjects))/args.nMPObjects))
            trainingObjects = trainingObjects[::m]
        similarities = -np.ones((len(trainingObjects), len(trainingObjects)))
        prototypeIndices, labels = processing.learnAssignMotionPatterns(True, True, trainingObjects, similarities, args.minSimilarity, similarityFunc, args.minClusterSize, args.optimizeCentroid, args.randomInitialization, True, [])
        if args.outputPrototypeDatabaseFilename is None:
            outputPrototypeDatabaseFilename = args.databaseFilename
        else:
            outputPrototypeDatabaseFilename = args.outputPrototypeDatabaseFilename
        clusterSizes = ml.computeClusterSizes(labels, prototypeIndices, -1)
        storage.savePrototypesToSqlite(str(parentPath/site.getPath()/outputPrototypeDatabaseFilename), [moving.Prototype(object2VideoSequences[trainingObjects[i]].getDatabaseFilename(False), trainingObjects[i].getNum(), prototypeType, clusterSizes[i]) for i in prototypeIndices])


elif args.process == 'interaction':
    # safety analysis TODO make function in safety analysis script
    if args.predictionMethod == 'cvd':
        predictionParameters = prediction.CVDirectPredictionParameters()
    if args.predictionMethod == 'cve':
        predictionParameters = prediction.CVExactPredictionParameters()
    for vs in videoSequences:
        print('Processing '+vs.getDatabaseFilename())
        objects = storage.loadTrajectoriesFromSqlite(str(parentPath/vs.getDatabaseFilename()), 'object')#, args.nObjects, withFeatures = (params.useFeaturesForPrediction or predictionMethod == 'ps' or predictionMethod == 'mp'))
        interactions = events.createInteractions(objects)
        #if args.nProcesses == 1:
        #print(str(parentPath/vs.cameraView.getTrackingConfigurationFilename()))
        params = storage.ProcessParameters(str(parentPath/vs.cameraView.getTrackingConfigurationFilename()))
        #print(len(interactions), args.computePET, predictionParameters, params.collisionDistance, params.predictionTimeHorizon, params.crossingZones)
        processed = events.computeIndicators(interactions, True, args.computePET, predictionParameters, params.collisionDistance, params.predictionTimeHorizon, params.crossingZones, False, None)
        storage.saveIndicatorsToSqlite(str(parentPath/vs.getDatabaseFilename()), processed)
    # else:
    #     pool = Pool(processes = args.nProcesses)
    #     nInteractionPerProcess = int(np.ceil(len(interactions)/float(args.nProcesses)))
    #     jobs = [pool.apply_async(events.computeIndicators, args = (interactions[i*nInteractionPerProcess:(i+1)*nInteractionPerProcess], not args.noMotionPrediction, args.computePET, predictionParameters, params.collisionDistance, params.predictionTimeHorizon, params.crossingZones, False, None)) for i in range(args.nProcesses)]
    #     processed = []
    #     for job in jobs:
    #         processed += job.get()
    #     pool.close()

#################################
# Analyze
#################################
if args.analyze == 'object':
    # user speeds, accelerations
    # aggregation per site
    data = [] # list of observation per site-user with time
    headers = ['site', 'date', 'time', 'user_type']
    aggFunctions, tmpheaders = utils.aggregationMethods(args.aggMethods, args.aggCentiles)
    headers.extend(tmpheaders)
    for vs in videoSequences:
        d = vs.startTime.date()
        t1 = vs.startTime.time()
        minUserDuration = args.minUserDuration*vs.cameraView.cameraType.frameRate
        print('Extracting speed from '+vs.getDatabaseFilename())
        objects = storage.loadTrajectoriesFromSqlite(str(parentPath/vs.getDatabaseFilename()), 'object', args.nObjects)
        for o in objects:
            if o.length() > minUserDuration:
                row = [vs.cameraView.site.name, d, utils.framesToTime(o.getFirstInstant(), vs.cameraView.cameraType.frameRate, t1), o.getUserType()]
                tmp = o.getSpeeds()
                for method,func in aggFunctions.items():
                    aggSpeeds = vs.cameraView.cameraType.frameRate*3.6*func(tmp)
                    if method == 'centile':
                        row += aggSpeeds.tolist()
                    else:
                        row.append(aggSpeeds)
            data.append(row)
    data = pd.DataFrame(data, columns = headers)
    if args.output == 'figure':
        for name in headers[4:]:
            plt.ioff()
            plt.figure() # siteids does not exist
            plt.boxplot([data.loc[data['site']==site.name, name] for site in sites], labels = [site.name for site in sites])
            plt.ylabel(name+' Speeds (km/h)')
            plt.savefig(name.lower()+'-speeds.png', dpi=dpi)
            plt.close()
    elif args.output == 'event':
        data.to_csv(args.eventFilename, index = False)

if args.analyze == 'interaction': # redo as for object, export in dataframe all interaction data
    indicatorIds = [2,5,7,10]
    conversionFactors = {2: 1., 5: 30.*3.6, 7:1./30, 10:1./30}
    maxIndicatorValue = {2: float('inf'), 5: float('inf'), 7:10., 10:10.}
    indicators = {}
    interactions = {}
    for vs in videoSequences:
        if not vs.cameraView.siteIdx in interactions:
            interactions[vs.cameraView.siteIdx] = []
            indicators[vs.cameraView.siteIdx] = {}
            for i in indicatorIds:
                indicators[vs.cameraView.siteIdx][i] = []
        interactions[vs.cameraView.siteIdx] += storage.loadInteractionsFromSqlite(str(parentPath/vs.getDatabaseFilename()))
        print(vs.getDatabaseFilename(), len(interactions[vs.cameraView.siteIdx]))
        for inter in interactions[vs.cameraView.siteIdx]:
            for i in indicatorIds:
                indic = inter.getIndicator(events.Interaction.indicatorNames[i])
                if indic is not None:
                    v = indic.getMostSevereValue()*conversionFactors[i]
                    if v < maxIndicatorValue[i]:
                        indicators[vs.cameraView.siteIdx][i].append(v)

    for i in indicatorIds:
        tmp = [indicators[siteId][i] for siteId in indicators]
        plt.ioff()
        plt.figure()
        plt.boxplot(tmp, labels = [session.query(Site).get(siteId).name for siteId in indicators])
        plt.ylabel(events.Interaction.indicatorNames[i]+' ('+events.Interaction.indicatorUnits[i]+')')
        plt.savefig(events.Interaction.indicatorNames[i]+'.png', dpi=150)
        plt.close()

if args.analyze == 'event': # aggregate event data by 15 min interval (args.intervalDuration), count events with thresholds
    data = pd.read_csv(args.eventFilename, parse_dates = [2])
    #data = pd.read_csv('./speeds.csv', converters = {'time': lambda s: datetime.datetime.strptime(s, "%H:%M:%S").time()}, nrows = 5000)
    # create time for end of each 15 min, then group by, using the agg method for each data column
    headers = ['site', 'date', 'intervalend15', 'duration', 'count']
    aggFunctions, tmpheaders = utils.aggregationMethods(args.aggMethods, args.aggCentiles)
    dataColumns = list(data.columns[4:])
    for h in dataColumns:
        for h2 in tmpheaders:
            headers.append(h+'-'+h2)
    data['intervalend15'] = data.time.apply(lambda t: (pd.Timestamp(year = t.year, month = t.month, day = t.day,hour = t.hour, minute = (t.minute // args.intervalDuration)*args.intervalDuration)+pd.Timedelta(minutes = 15)).time())
    outputData = []
    for name, group in data.groupby(['site', 'date', 'intervalend15']):
        # get duration as intervalend15-min(time), apply agg methods to each centile
        row = []
        row.extend(name)
        row.append((name[2].minute-group.time.min().minute) % 60)
        row.append(len(group))
        for h in dataColumns:
            for method,func in aggFunctions.items():
                aggregated = func(group[h])
                if method == 'centile':
                    row.extend(aggregated)
                else:
                    row.append(aggregated)
        outputData.append(row)
    pd.DataFrame(outputData, columns = headers).to_csv(utils.removeExtension(args.eventFilename)+'-aggregated.csv', index = False)