changeset 1046:f2ba9858e6c6

motion pattern learning seems to work
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Thu, 05 Jul 2018 23:12:11 -0400
parents 25db2383e7ae
children 0b62e37991ab
files scripts/learn-motion-patterns.py scripts/process.py
diffstat 2 files changed, 13 insertions(+), 13 deletions(-) [+]
line wrap: on
line diff
--- a/scripts/learn-motion-patterns.py	Thu Jul 05 17:45:18 2018 -0400
+++ b/scripts/learn-motion-patterns.py	Thu Jul 05 23:12:11 2018 -0400
@@ -14,7 +14,7 @@
 parser.add_argument('-i', dest = 'inputPrototypeDatabaseFilename', help = 'name of the Sqlite database file for prototypes to start the algorithm with')
 parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to process', choices = ['feature', 'object'], default = 'feature')
 parser.add_argument('--nfeatures-per-object', dest = 'nLongestFeaturesPerObject', help = 'maximum number of features per object to load', type = int)
-parser.add_argument('-n', dest = 'nTrajectories', help = 'number of the object or feature trajectories to load', type = int, default = None)
+parser.add_argument('-n', dest = 'nObjects', help = 'number of the object or feature trajectories to load', type = int, default = None)
 parser.add_argument('-e', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float, required = True)
 parser.add_argument('--metric', dest = 'metric', help = 'metric for the similarity of trajectory points', default = 'cityblock') # default is manhattan distance
 parser.add_argument('-s', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float, required = True)
@@ -44,7 +44,7 @@
     print('Cannot learn and assign simultaneously')
     sys.exit(0)
 
-objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, args.trajectoryType, args.nTrajectories, timeStep = args.positionSubsamplingRate, nLongestFeaturesPerObject = args.nLongestFeaturesPerObject)
+objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, args.trajectoryType, args.nObjects, timeStep = args.positionSubsamplingRate, nLongestFeaturesPerObject = args.nLongestFeaturesPerObject)
 if args.trajectoryType == 'object' and args.nLongestFeaturesPerObject is not None:
     objectsWithFeatures = objects
     objects = [f for o in objectsWithFeatures for f in o.getFeatures()]
--- a/scripts/process.py	Thu Jul 05 17:45:18 2018 -0400
+++ b/scripts/process.py	Thu Jul 05 23:12:11 2018 -0400
@@ -7,17 +7,17 @@
 #import matplotlib
 #atplotlib.use('Agg')
 import matplotlib.pyplot as plt
-from numpy import percentile
+from numpy import percentile, ones
 from pandas import DataFrame
 
-from trafficintelligence import storage, events, prediction, cvutils, utils, moving
+from trafficintelligence import storage, events, prediction, cvutils, utils, moving, processing, ml
 from trafficintelligence.metadata import *
 
 parser = argparse.ArgumentParser(description='This program manages the processing of several files based on a description of the sites and video data in an SQLite database following the metadata module.')
 # input
 parser.add_argument('--db', dest = 'metadataFilename', help = 'name of the metadata file', required = True)
 parser.add_argument('--videos', dest = 'videoIds', help = 'indices of the video sequences', nargs = '*', type = int)
-parser.add_argument('--sites', dest = 'siteIds', help = 'indices of the video sequences', nargs = '*', type = int)
+parser.add_argument('--sites', dest = 'siteIds', help = 'indices of the video sequences', nargs = '*')
 
 # main function
 parser.add_argument('--delete', dest = 'delete', help = 'data to delete', choices = ['feature', 'object', 'classification', 'interaction'])
@@ -32,17 +32,16 @@
 parser.add_argument('--dry', dest = 'dryRun', help = 'dry run of processing', action = 'store_true')
 parser.add_argument('--nthreads', dest = 'nProcesses', help = 'number of processes to run in parallel', type = int, default = 1)
 parser.add_argument('--subsample', dest = 'positionSubsamplingRate', help = 'rate of position subsampling (1 every n positions)', type = int)
-parser.add_argument('--display', dest = 'display', help = 'display trajectories', action = 'store_true')
 
 ### process options
 # motion pattern learning and assignment
-parser.add_argument('--prototype-filename', dest = 'outputPrototypeDatabaseFilename', help = 'name of the Sqlite database file to save prototypes')
+parser.add_argument('--prototype-filename', dest = 'outputPrototypeDatabaseFilename', help = 'name of the Sqlite database file to save prototypes', default = 'prototypes.sqlite')
 #parser.add_argument('-i', dest = 'inputPrototypeDatabaseFilename', help = 'name of the Sqlite database file for prototypes to start the algorithm with')
 parser.add_argument('--nfeatures-per-object', dest = 'nLongestFeaturesPerObject', help = 'maximum number of features per object to load', type = int)
-parser.add_argument('--epsilon', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float, required = True)
+parser.add_argument('--epsilon', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float)
 parser.add_argument('--metric', dest = 'metric', help = 'metric for the similarity of trajectory points', default = 'cityblock') # default is manhattan distance
-parser.add_argument('--minsimil', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float, required = True)
-parser.add_argument('-min-cluster-size', dest = 'minClusterSize', help = 'minimum cluster size', type = int, default = 0)
+parser.add_argument('--minsimil', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float)
+parser.add_argument('--min-cluster-size', dest = 'minClusterSize', help = 'minimum cluster size', type = int, default = 0)
 #parser.add_argument('--learn', dest = 'learn', help = 'learn', action = 'store_true')
 parser.add_argument('--optimize', dest = 'optimizeCentroid', help = 'recompute centroid at each assignment', action = 'store_true')
 parser.add_argument('--random', dest = 'randomInitialization', help = 'random initialization of clustering algorithm', action = 'store_true')
@@ -155,7 +154,7 @@
         for cv in site.cameraViews:
             for vs in cv.videoSequences:
                 print('Loading '+vs.getDatabaseFilename())
-                objects[vs.idx] = storage.loadTrajectoriesFromSqlite(str(parentPath/vs.getDatabaseFilename()), args.trajectoryType, args.nTrajectories, timeStep = args.positionSubsamplingRate, nLongestFeaturesPerObject = args.nLongestFeaturesPerObject)
+                objects[vs.idx] = storage.loadTrajectoriesFromSqlite(str(parentPath/vs.getDatabaseFilename()), args.trajectoryType, args.nObjects, timeStep = args.positionSubsamplingRate, nLongestFeaturesPerObject = args.nLongestFeaturesPerObject)
                 if args.trajectoryType == 'object' and args.nLongestFeaturesPerObject is not None:
                     objectsWithFeatures = objects[vs.idx]
                     objects[vs.idx] = [f for o in objectsWithFeatures for f in o.getFeatures()]
@@ -167,14 +166,15 @@
         lcss = utils.LCSS(metric = args.metric, epsilon = args.epsilon)
         similarityFunc = lambda x,y : lcss.computeNormalized(x, y)
         allobjects = [o for tmpobjects in objects.values() for o in tmpobjects]
-        prototypeIndices, labels = processing.learnAssignMotionPatterns(True, True, allobjects, similarities, args.minsimil, similarityFunc, args.minClusterSize, args.optimizeCentroid, args.randomInitialization, True, [])
+        similarities = -ones((len(allobjects), len(allobjects)))
+        prototypeIndices, labels = processing.learnAssignMotionPatterns(True, True, allobjects, similarities, args.minSimilarity, similarityFunc, args.minClusterSize, args.optimizeCentroid, args.randomInitialization, True, [])
         if args.outputPrototypeDatabaseFilename is None:
             outputPrototypeDatabaseFilename = args.databaseFilename
         else:
             outputPrototypeDatabaseFilename = args.outputPrototypeDatabaseFilename
         # TODO maintain mapping from object prototype to db filename + compute nmatchings before
         clusterSizes = ml.computeClusterSizes(labels, prototypeIndices, -1)
-        storage.savePrototypesToSqlite(outputPrototypeDatabaseFilename, [moving.Prototype(object2VideoSequences[allobjects[i]].getDatabaseFilename(False), allobjects[i].getNum(), prototypeType) for i in prototypeIndices])
+        storage.savePrototypesToSqlite(str(parentPath/site.getPath()/outputPrototypeDatabaseFilename), [moving.Prototype(object2VideoSequences[allobjects[i]].getDatabaseFilename(False), allobjects[i].getNum(), prototypeType, clusterSizes[i]) for i in prototypeIndices])
 
 
 elif args.process == 'interaction':