diff scripts/learn-motion-patterns.py @ 1043:b735895c8815

work in progress on process (learn motion patterns)
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Wed, 04 Jul 2018 17:39:39 -0400
parents 5621e4ad2428
children 75a6ad604cc5
line wrap: on
line diff
--- a/scripts/learn-motion-patterns.py	Wed Jul 04 16:21:09 2018 -0400
+++ b/scripts/learn-motion-patterns.py	Wed Jul 04 17:39:39 2018 -0400
@@ -12,7 +12,7 @@
 parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', required = True)
 parser.add_argument('-o', dest = 'outputPrototypeDatabaseFilename', help = 'name of the Sqlite database file to save prototypes')
 parser.add_argument('-i', dest = 'inputPrototypeDatabaseFilename', help = 'name of the Sqlite database file for prototypes to start the algorithm with')
-parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to learn from', choices = ['objectfeature', 'feature', 'object'], default = 'objectfeatures')
+parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to learn from', choices = ['objectfeature', 'feature', 'object'], default = 'objectfeature')
 parser.add_argument('--max-nobjectfeatures', dest = 'maxNObjectFeatures', help = 'maximum number of features per object to load', type = int, default = 1)
 parser.add_argument('-n', dest = 'nTrajectories', help = 'number of the object or feature trajectories to load', type = int, default = None)
 parser.add_argument('-e', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float, required = True)
@@ -24,6 +24,7 @@
 parser.add_argument('--random', dest = 'randomInitialization', help = 'random initialization of clustering algorithm', action = 'store_true')
 parser.add_argument('--subsample', dest = 'positionSubsamplingRate', help = 'rate of position subsampling (1 every n positions)', type = int)
 parser.add_argument('--display', dest = 'display', help = 'display trajectories', action = 'store_true')
+parser.add_argument('--similarities-filename', dest = 'similaritiesFilename', help = 'filename of the similarities')
 parser.add_argument('--save-similarities', dest = 'saveSimilarities', help = 'save computed similarities (in addition to prototypes)', action = 'store_true')
 parser.add_argument('--save-assignments', dest = 'saveAssignments', help = 'saves the assignments of the objects to the prototypes', action = 'store_true')
 parser.add_argument('--assign', dest = 'assign', help = 'assigns the objects to the prototypes and saves the assignments', action = 'store_true')
@@ -40,17 +41,7 @@
 # TODO add possibility to load all trajectories and use minclustersize
 
 # load trajectories to cluster or assign
-if args.trajectoryType == 'objectfeature':
-    trajectoryType = 'feature'
-    objectFeatureNumbers = storage.loadObjectFeatureFrameNumbers(args.databaseFilename, objectNumbers = args.nTrajectories)
-    featureNumbers = []
-    for numbers in objectFeatureNumbers.values():
-        featureNumbers += numbers[:min(len(numbers), args.maxNObjectFeatures)]
-    objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, 'feature', objectNumbers = featureNumbers, timeStep = args.positionSubsamplingRate)
-else:
-    trajectoryType = args.trajectoryType
-    objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, trajectoryType, objectNumbers = args.nTrajectories, timeStep = args.positionSubsamplingRate)
-
+objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, args.trajectoryType, args.nTrajectories, timeStep = args.positionSubsamplingRate)
 trajectories = [o.getPositions().asArray().T for o in objects]
 
 # load initial prototypes, if any    
@@ -66,9 +57,10 @@
     initialPrototypeIndices = None
 
 lcss = utils.LCSS(metric = args.metric, epsilon = args.epsilon)
-nTrajectories = len(trajectories)
-
-similarities = -np.ones((nTrajectories, nTrajectories))
+if args.similaritiesFilename is not None:
+    similarities = np.loadtxt(args.similaritiesFilename)
+if args.similaritiesFilename is None or similarities.shape[0] != len(trajectories) or similarities.shape[1] != len(trajectories):
+    similarities = -np.ones((len(trajectories), len(trajectories)))
 similarityFunc = lambda x,y : lcss.computeNormalized(x, y)
 # the next line can be called again without reinitializing similarities
 if args.learn:
@@ -90,6 +82,10 @@
 
 if args.learn and not args.assign:
     prototypes = []
+    if args.trajectoryType == 'objectfeature':
+        trajectoryType = 'feature'
+    else:
+        trajectoryType = args.trajectoryType
     for i in prototypeIndices:
         if i<len(initialPrototypes):
             prototypes.append(initialPrototypes[i])
@@ -136,7 +132,7 @@
                 objectNumbers.append(objNum)
             storage.savePrototypeAssignmentsToSqlite(args.databaseFilename, objectNumbers, 'object', objectLabels, initialPrototypes)
         else:
-            storage.savePrototypeAssignmentsToSqlite(args.databaseFilename, [obj.getNum() for obj in objects], trajectoryType, labels[len(initialPrototypes):], initialPrototypes)
+            storage.savePrototypeAssignmentsToSqlite(args.databaseFilename, [obj.getNum() for obj in objects], args.trajectoryType, labels[len(initialPrototypes):], initialPrototypes)
     if args.display:
         plt.figure()
         for i,o in enumerate(objects):
@@ -150,4 +146,7 @@
         plt.show()
 
 if (args.learn or args.assign) and args.saveSimilarities:
-    np.savetxt(utils.removeExtension(args.databaseFilename)+'-prototype-similarities.txt.gz', similarities, '%.4f')
+    if args.similaritiesFilename is not None:
+        np.savetxt(args.similaritiesFilename, similarities, '%.4f')
+    else:
+        np.savetxt(utils.removeExtension(args.databaseFilename)+'-prototype-similarities.txt.gz', similarities, '%.4f')