Mercurial Hosting > traffic-intelligence
view scripts/learn-motion-patterns.py @ 910:b58a1061a717
loading is faster for longest object features
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Wed, 28 Jun 2017 15:36:25 -0400 |
parents | b297525b2cbf |
children | 89cc05867c4c |
line wrap: on
line source
#! /usr/bin/env python import sys, argparse #import matplotlib.pyplot as plt import numpy as np import ml, utils, storage parser = argparse.ArgumentParser(description='The program learns prototypes for the motion patterns') #, epilog = '' #parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file') parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', required = True) parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to learn from', choices = ['objectfeatures', 'feature', 'object'], default = 'objectfeatures') parser.add_argument('--max-nobjectfeatures', dest = 'maxNObjectFeatures', help = 'maximum number of features per object to load', type = int, default = 3) parser.add_argument('-n', dest = 'nTrajectories', help = 'number of the object or feature trajectories to load', type = int, default = None) parser.add_argument('-e', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float, required = True) parser.add_argument('--metric', dest = 'metric', help = 'metric for the similarity of trajectory points', default = 'cityblock') # default is manhattan distance parser.add_argument('-s', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float, required = True) parser.add_argument('-c', dest = 'minClusterSize', help = 'minimum cluster size', type = int, default = None) parser.add_argument('--optimize', dest = 'optimizeCentroid', help = 'recompute centroid at each assignment', action = 'store_true') parser.add_argument('--random', dest = 'randomInitialization', help = 'random initialization of clustering algorithm', action = 'store_true') parser.add_argument('--subsample', dest = 'positionSubsamplingRate', help = 'rate of position subsampling (1 every n positions)', type = int) parser.add_argument('--display', dest = 'display', help = 'display trajectories', action = 'store_true') parser.add_argument('--save-similarities', dest = 'saveSimilarities', help = 'save computed similarities (in addition to prototypes)', action = 'store_true') #parser.add_argument('--save-matches', dest = 'saveMatches', help = 'save the matched prototype information', action = 'store_true') args = parser.parse_args() # use cases # 1. learn proto from one file, save in same or another (with traj) # 2. load proto, load objects, update proto, save proto # 3. assign objects from one db to proto # 4. load objects from several files, save in another # TODO add possibility to cluter with velocities # TODO add possibility to start with saved prototypes so that one can incrementally learn from several databases # save prototypes with database name, add option to keep trajectory along: if saved in same db, no need # load proto must load the movingobject # save the objects that match the prototypes # write an assignment function for objects trajectoryType = args.trajectoryType prototypeType = args.trajectoryType if args.trajectoryType == 'objectfeatures': trajectoryType = 'object' prototypeType = 'feature' if args.trajectoryType == 'objectfeatures': objectFeatureNumbers = storage.loadObjectFeatureFrameNumbers(args.databaseFilename, objectNumbers = args.nTrajectories) featureNumbers = [] for numbers in objectFeatureNumbers.values(): featureNumbers += numbers[:min(len(numbers), args.maxNObjectFeatures)] objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, 'feature', objectNumbers = featureNumbers, timeStep = args.positionSubsamplingRate) else: objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, trajectoryType, withFeatures = (args.trajectoryType == 'objectfeatures'), objectNumbers = args.nTrajectories, timeStep = args.positionSubsamplingRate) trajectories = [o.getPositions().asArray().T for o in objects] lcss = utils.LCSS(metric = args.metric, epsilon = args.epsilon) nTrajectories = len(trajectories) similarities = -np.ones((nTrajectories, nTrajectories)) prototypeIndices, labels = ml.prototypeCluster(trajectories, similarities, args.minSimilarity, lambda x,y : lcss.computeNormalized(x, y), args.minClusterSize, args.optimizeCentroid, args.randomInitialization, True, None) # this line can be called again without reinitializing similarities clusterSizes = ml.computeClusterSizes(labels, prototypeIndices, -1) print(clusterSizes) storage.savePrototypesToSqlite(args.databaseFilename, [objects[i].getNum() for i in prototypeIndices], prototypeType, [clusterSizes[i] for i in prototypeIndices]) # if saving filenames, add for example [objects[i].dbFilename for i in prototypeIndices] if args.saveSimilarities: np.savetxt(utils.removeExtension(args.databaseFilename)+'-prototype-similarities.txt.gz', similarities, '%.4f') # if args.saveMatches: # out = storage.openCheck(utils.removeExtension(args.databaseFilename)+'prototypes-matches.csv', 'w') # for o in ojbects: # out.write('') if args.display: from matplotlib.pyplot import figure, show, axis figure() for i,o in enumerate(objects): if i not in prototypeIndices: if labels[i] < 0: o.plot('kx') else: o.plot(utils.colors[labels[i]]) for i in prototypeIndices: objects[i].plot(utils.colors[i]+'o') axis('equal') show()