view scripts/learn-motion-patterns.py @ 949:d6c1c05d11f5

modified multithreading at the interaction level for safety computations
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Fri, 21 Jul 2017 17:52:56 -0400
parents 584b9405e494
children 2a4f174879dd
line wrap: on
line source

#! /usr/bin/env python

import sys, argparse

#import matplotlib.pyplot as plt
import numpy as np

import ml, utils, storage, moving

parser = argparse.ArgumentParser(description='The program learns prototypes for the motion patterns') #, epilog = ''
#parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file')
parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', required = True)
parser.add_argument('-o', dest = 'outputPrototypeDatabaseFilename', help = 'name of the Sqlite database file to save prototypes')
parser.add_argument('-i', dest = 'inputPrototypeDatabaseFilename', help = 'name of the Sqlite database file for prototypes to start the algorithm with')
parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to learn from', choices = ['objectfeatures', 'feature', 'object'], default = 'objectfeatures')
parser.add_argument('--max-nobjectfeatures', dest = 'maxNObjectFeatures', help = 'maximum number of features per object to load', type = int, default = 1)
parser.add_argument('-n', dest = 'nTrajectories', help = 'number of the object or feature trajectories to load', type = int, default = None)
parser.add_argument('-e', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float, required = True)
parser.add_argument('--metric', dest = 'metric', help = 'metric for the similarity of trajectory points', default = 'cityblock') # default is manhattan distance
parser.add_argument('-s', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float, required = True)
parser.add_argument('-c', dest = 'minClusterSize', help = 'minimum cluster size', type = int, default = None)
parser.add_argument('--optimize', dest = 'optimizeCentroid', help = 'recompute centroid at each assignment', action = 'store_true')
parser.add_argument('--random', dest = 'randomInitialization', help = 'random initialization of clustering algorithm', action = 'store_true')
parser.add_argument('--subsample', dest = 'positionSubsamplingRate', help = 'rate of position subsampling (1 every n positions)', type = int)
parser.add_argument('--display', dest = 'display', help = 'display trajectories', action = 'store_true')
parser.add_argument('--save-similarities', dest = 'saveSimilarities', help = 'save computed similarities (in addition to prototypes)', action = 'store_true')
parser.add_argument('--save-matches', dest = 'saveMatches', help = 'saves the assignments of the objects (not for features) to the prototypes', action = 'store_true')
#parser.add_argument('--assign', dest = 'assign', help = 'saves the assignments of the objects (not for features) to the prototypes', action = 'store_true') # default is train, but one could want only to assign the objects to the loaded prototypes without learning

args = parser.parse_args()

# use cases
# 1. learn proto from one file, save in same or another (with traj)
# 2. load proto, load objects, update proto, save proto
# 3. assign objects from one db to proto
# 4. load objects from several files, save in another -> see metadata: site with view and times
# 5. keep prototypes, with positions/velocities, in separate db (keep link to original data through filename, type and index)

# TODO add possibility to cluster with velocities
# TODO add possibility to start with saved prototypes so that one can incrementally learn from several databases
# save the objects that match the prototypes
# write an assignment function for objects

trajectoryType = args.trajectoryType
prototypeType = args.trajectoryType
if args.trajectoryType == 'objectfeatures':
    trajectoryType = 'object'
    prototypeType = 'feature'

if args.trajectoryType == 'objectfeatures':
    objectFeatureNumbers = storage.loadObjectFeatureFrameNumbers(args.databaseFilename, objectNumbers = args.nTrajectories)
    featureNumbers = []
    for numbers in objectFeatureNumbers.values():
        featureNumbers += numbers[:min(len(numbers), args.maxNObjectFeatures)]
    objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, 'feature', objectNumbers = featureNumbers, timeStep = args.positionSubsamplingRate)
else:
    objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, trajectoryType, withFeatures = (args.trajectoryType == 'objectfeatures'), objectNumbers = args.nTrajectories, timeStep = args.positionSubsamplingRate)

    
trajectories = [o.getPositions().asArray().T for o in objects]
if args.inputPrototypeDatabaseFilename is not None:
    initialPrototypes = storage.loadPrototypesFromSqlite(args.inputPrototypeDatabaseFilename, True)
    trajectories = [p.getMovingObject().getPositions().asArray().T for p in initialPrototypes]+trajectories
    initialPrototypeIndices = range(len(initialPrototypes))
else:
    initialPrototypes = []
    initialPrototypeIndices = None

lcss = utils.LCSS(metric = args.metric, epsilon = args.epsilon)
nTrajectories = len(trajectories)

similarities = -np.ones((nTrajectories, nTrajectories))
# the next line can be called again without reinitializing similarities
prototypeIndices, labels = ml.prototypeCluster(trajectories, similarities, args.minSimilarity, lambda x,y : lcss.computeNormalized(x, y), args.minClusterSize, args.optimizeCentroid, args.randomInitialization, args.inputPrototypeDatabaseFilename is not None, initialPrototypeIndices) # assignment is done only if working on the same database, otherwise the matchings will not compare and one has to to matchings on a large scale at once

clusterSizes = ml.computeClusterSizes(labels, prototypeIndices, -1)
print(clusterSizes)

prototypes = []
for i in prototypeIndices:
    if i<len(initialPrototypes):
        initialPrototypes[i].nMatchings = 0
        prototypes.append(initialPrototypes[i])
    else:
        if args.inputPrototypeDatabaseFilename is None:
            nmatchings = clusterSizes[i]
        else:
            nmatchings = 0
        prototypes.append(moving.Prototype(args.databaseFilename, objects[i].getNum(), prototypeType, nmatchings)
if args.outputPrototypeDatabaseFilename is None:
    outputPrototypeDatabaseFilename = args.databaseFilename
else:
    outputPrototypeDatabaseFilename = args.outputPrototypeDatabaseFilename
storage.savePrototypesToSqlite(outputPrototypeDatabaseFilename, prototypes)

if args.saveSimilarities:
    np.savetxt(utils.removeExtension(args.databaseFilename)+'-prototype-similarities.txt.gz', similarities, '%.4f')

labelsToProtoIndices = {protoId: i for i, protoId in enumerate(prototypeIndices)}
if args.saveMatches: # or args.assign
# save in the db that contained originally the data
    # retirer les assignations anterieures?
    storage.savePrototypeAssignmentsToSqlite(args.databaseFilename, objects, [labelsToProtoIndices[l] for l in labels], prototypes)

if args.display:
    from matplotlib.pyplot import figure, show, axis
    figure()
    for i,o in enumerate(objects):
        if i not in prototypeIndices:
            if labels[i] < 0:
                o.plot('kx')
            else:
                o.plot(utils.colors[labels[i]])
    for i in prototypeIndices:
            objects[i].plot(utils.colors[i]+'o')
    axis('equal')
    show()