view scripts/learn-motion-patterns.py @ 1035:933588568bec

major update to learn motion pattern, see program description
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Wed, 20 Jun 2018 16:48:20 -0400
parents 8ffb3ae9f3d2
children 5621e4ad2428
line wrap: on
line source

#! /usr/bin/env python3

import sys, argparse

import numpy as np
import matplotlib.pyplot as plt

from trafficintelligence import ml, utils, storage, moving

parser = argparse.ArgumentParser(description='''The program clusters trajectories, each cluster being represented by a trajectory. It can either work on the same dataset (database) or different ones, but only does learning or assignment at a time to avoid issues (the minimum cluster size argument is not used for now as it may change prototypes when assigning other trajectories)''') #, epilog = ''
#parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file')
parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', required = True)
parser.add_argument('-o', dest = 'outputPrototypeDatabaseFilename', help = 'name of the Sqlite database file to save prototypes')
parser.add_argument('-i', dest = 'inputPrototypeDatabaseFilename', help = 'name of the Sqlite database file for prototypes to start the algorithm with')
parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to learn from', choices = ['objectfeature', 'feature', 'object'], default = 'objectfeatures')
parser.add_argument('--max-nobjectfeatures', dest = 'maxNObjectFeatures', help = 'maximum number of features per object to load', type = int, default = 1)
parser.add_argument('-n', dest = 'nTrajectories', help = 'number of the object or feature trajectories to load', type = int, default = None)
parser.add_argument('-e', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float, required = True)
parser.add_argument('--metric', dest = 'metric', help = 'metric for the similarity of trajectory points', default = 'cityblock') # default is manhattan distance
parser.add_argument('-s', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float, required = True)
parser.add_argument('-c', dest = 'minClusterSize', help = 'minimum cluster size', type = int, default = 0)
parser.add_argument('--learn', dest = 'learn', help = 'learn', action = 'store_true')
parser.add_argument('--optimize', dest = 'optimizeCentroid', help = 'recompute centroid at each assignment', action = 'store_true')
parser.add_argument('--random', dest = 'randomInitialization', help = 'random initialization of clustering algorithm', action = 'store_true')
parser.add_argument('--subsample', dest = 'positionSubsamplingRate', help = 'rate of position subsampling (1 every n positions)', type = int)
parser.add_argument('--display', dest = 'display', help = 'display trajectories', action = 'store_true')
parser.add_argument('--save-similarities', dest = 'saveSimilarities', help = 'save computed similarities (in addition to prototypes)', action = 'store_true')
parser.add_argument('--save-assignments', dest = 'saveAssignments', help = 'saves the assignments of the objects to the prototypes', action = 'store_true')
parser.add_argument('--assign', dest = 'assign', help = 'assigns the objects to the prototypes and saves the assignments', action = 'store_true')

args = parser.parse_args()

# use cases
# 1. learn proto from one file, save in same or another
# 2. load proto, load objects (from same or other db), update proto matchings, save proto
# TODO 3. on same dataset, learn and assign trajectories (could be done with min cluster size)
# TODO? 4. when assigning, allow min cluster size only to avoid assigning to small clusters (but prototypes are not removed even if in small clusters, can be done after assignment with nmatchings)

# TODO add possibility to cluster with velocities
# TODO add possibility to load all trajectories and use minclustersize

# load trajectories to cluster or assign
if args.trajectoryType == 'objectfeature':
    trajectoryType = 'feature'
    objectFeatureNumbers = storage.loadObjectFeatureFrameNumbers(args.databaseFilename, objectNumbers = args.nTrajectories)
    featureNumbers = []
    for numbers in objectFeatureNumbers.values():
        featureNumbers += numbers[:min(len(numbers), args.maxNObjectFeatures)]
    objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, 'feature', objectNumbers = featureNumbers, timeStep = args.positionSubsamplingRate)
else:
    trajectoryType = args.trajectoryType
    objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, trajectoryType, objectNumbers = args.nTrajectories, timeStep = args.positionSubsamplingRate)

trajectories = [o.getPositions().asArray().T for o in objects]

# load initial prototypes, if any    
if args.inputPrototypeDatabaseFilename is not None:
    initialPrototypes = storage.loadPrototypesFromSqlite(args.inputPrototypeDatabaseFilename, True)
    trajectories = [p.getMovingObject().getPositions().asArray().T for p in initialPrototypes]+trajectories
    if len(initialPrototypes) > 0:
        initialPrototypeIndices = list(range(len(initialPrototypes)))
    else:
        initialPrototypeIndices = None
else:
    initialPrototypes = []
    initialPrototypeIndices = None

lcss = utils.LCSS(metric = args.metric, epsilon = args.epsilon)
nTrajectories = len(trajectories)

similarities = -np.ones((nTrajectories, nTrajectories))
similarityFunc = lambda x,y : lcss.computeNormalized(x, y)
# the next line can be called again without reinitializing similarities
if args.learn:
    prototypeIndices = ml.prototypeCluster(trajectories, similarities, args.minSimilarity, similarityFunc, args.optimizeCentroid, args.randomInitialization, initialPrototypeIndices)
else:
    prototypeIndices = initialPrototypeIndices

if args.assign: # TODO don't touch initial prototypes if not from same db as trajectories
    #if not args.learn and args.minClusterSize >= 1: # allow only 
    #   print('Warning: you did not learn the prototypes and you are using minimum cluster size of {}, which may lead to removing prototypes and assigning them to others'.format(args.minClusterSize))
    # if args.minClusterSize >= 1:
    #     if initialPrototypeIndices is None:
    #         prototypeIndices, labels = ml.assignToPrototypeClusters(trajectories, prototypeIndices, similarities, args.minSimilarity, similarityFunc, args.minClusterSize)
    #     else:
    #         print('Not assigning with non-zero minimum cluster size and initial prototypes (would remove initial prototypes based on other trajectories')
    # else:
    #     prototypeIndices, labels = ml.assignToPrototypeClusters(trajectories, prototypeIndices, similarities, args.minSimilarity, similarityFunc)
    assignedPrototypeIndices, labels = ml.assignToPrototypeClusters(trajectories, prototypeIndices, similarities, args.minSimilarity, similarityFunc)

if args.learn and not args.assign:
    prototypes = []
    for i in prototypeIndices:
        if i<len(initialPrototypes):
            prototypes.append(initialPrototypes[i])
        else:
            prototypes.append(moving.Prototype(args.databaseFilename, objects[i-len(initialPrototypes)].getNum(), trajectoryType))

    if args.outputPrototypeDatabaseFilename is None:
        outputPrototypeDatabaseFilename = args.databaseFilename
    else:
        outputPrototypeDatabaseFilename = args.outputPrototypeDatabaseFilename
        if args.inputPrototypeDatabaseFilename == args.outputPrototypeDatabaseFilename:
            storage.deleteFromSqlite(args.outputPrototypeDatabaseFilename, 'prototype')
    storage.savePrototypesToSqlite(outputPrototypeDatabaseFilename, prototypes)
    if args.display:
        plt.figure()
        for p in prototypes:
            p.getMovingObject().plot()
        plt.axis('equal')
        plt.show()

if not args.learn and args.assign: # no modification to prototypes, can work with initialPrototypes
    clusterSizes = ml.computeClusterSizes(labels, prototypeIndices, -1)
    for i in prototypeIndices:
        nMatchings = clusterSizes[i]-1
        if initialPrototypes[i].nMatchings is None:
            initialPrototypes[i].nMatchings = nMatchings
        else:
            initialPrototypes[i].nMatchings += nMatchings
    if args.outputPrototypeDatabaseFilename is None:
        outputPrototypeDatabaseFilename = args.databaseFilename
    else:
        outputPrototypeDatabaseFilename = args.outputPrototypeDatabaseFilename
    storage.setPrototypeMatchingsInSqlite(outputPrototypeDatabaseFilename, initialPrototypes)
    if args.saveAssignments:
        if args.trajectoryType == 'objectfeature': # consider that the object is assigned through its longest features
            objectNumbers = []
            objectLabels = []
            for objNum, objFeatureNumbers in objectFeatureNumbers.items():
                objLabels = []
                for i, o in enumerate(objects):
                    if o.getNum() in objFeatureNumbers:
                        objLabels.append(labels[i+len(initialPrototypes)])
                objectLabels.append(utils.mostCommon(objLabels))
                objectNumbers.append(objNum)
            storage.savePrototypeAssignmentsToSqlite(args.databaseFilename, objectNumbers, 'object', objectLabels, initialPrototypes)
        else:
            storage.savePrototypeAssignmentsToSqlite(args.databaseFilename, [obj.getNum() for obj in objects], trajectoryType, labels[len(initialPrototypes):], initialPrototypes)
    if args.display:
        plt.figure()
        for i,o in enumerate(objects):
            if labels[i+len(initialPrototypes)] < 0:
                o.plot('kx-')
            else:
                o.plot(utils.colors[labels[i+len(initialPrototypes)]])
        for i,p in enumerate(initialPrototypes):
            p.getMovingObject().plot(utils.colors[i]+'o')
        plt.axis('equal')
        plt.show()

if (args.learn or args.assign) and args.saveSimilarities:
    np.savetxt(utils.removeExtension(args.databaseFilename)+'-prototype-similarities.txt.gz', similarities, '%.4f')