Mercurial Hosting > traffic-intelligence
changeset 1045:25db2383e7ae
work in progress on process.py
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Thu, 05 Jul 2018 17:45:18 -0400 |
parents | 75a6ad604cc5 |
children | f2ba9858e6c6 |
files | scripts/process.py trafficintelligence/processing.py |
diffstat | 2 files changed, 38 insertions(+), 13 deletions(-) [+] |
line wrap: on
line diff
--- a/scripts/process.py Thu Jul 05 17:06:40 2018 -0400 +++ b/scripts/process.py Thu Jul 05 17:45:18 2018 -0400 @@ -10,7 +10,7 @@ from numpy import percentile from pandas import DataFrame -from trafficintelligence import storage, events, prediction, cvutils, utils +from trafficintelligence import storage, events, prediction, cvutils, utils, moving from trafficintelligence.metadata import * parser = argparse.ArgumentParser(description='This program manages the processing of several files based on a description of the sites and video data in an SQLite database following the metadata module.') @@ -38,12 +38,12 @@ # motion pattern learning and assignment parser.add_argument('--prototype-filename', dest = 'outputPrototypeDatabaseFilename', help = 'name of the Sqlite database file to save prototypes') #parser.add_argument('-i', dest = 'inputPrototypeDatabaseFilename', help = 'name of the Sqlite database file for prototypes to start the algorithm with') -parser.add_argument('--max-nobjectfeatures', dest = 'maxNObjectFeatures', help = 'maximum number of features per object to load', type = int, default = 1) -parser.add_argument('--maxdist', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float, required = True) +parser.add_argument('--nfeatures-per-object', dest = 'nLongestFeaturesPerObject', help = 'maximum number of features per object to load', type = int) +parser.add_argument('--epsilon', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float, required = True) parser.add_argument('--metric', dest = 'metric', help = 'metric for the similarity of trajectory points', default = 'cityblock') # default is manhattan distance -parser.add_argument('-minsimil', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float, required = True) +parser.add_argument('--minsimil', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float, required = True) parser.add_argument('-min-cluster-size', dest = 'minClusterSize', help = 'minimum cluster size', type = int, default = 0) -parser.add_argument('--learn', dest = 'learn', help = 'learn', action = 'store_true') +#parser.add_argument('--learn', dest = 'learn', help = 'learn', action = 'store_true') parser.add_argument('--optimize', dest = 'optimizeCentroid', help = 'recompute centroid at each assignment', action = 'store_true') parser.add_argument('--random', dest = 'randomInitialization', help = 'random initialization of clustering algorithm', action = 'store_true') #parser.add_argument('--similarities-filename', dest = 'similaritiesFilename', help = 'filename of the similarities') @@ -80,15 +80,18 @@ session = connectDatabase(args.metadataFilename) parentPath = Path(args.metadataFilename).parent # files are relative to metadata location videoSequences = [] +sites = [] if args.videoIds is not None: videoSequences = [session.query(VideoSequence).get(videoId) for videoId in args.videoIds] siteIds = set([vs.cameraView.siteIdx for vs in videoSequences]) elif args.siteIds is not None: siteIds = set(args.siteIds) for siteId in siteIds: - for site in getSite(session, siteId): + tmpsites = getSite(session, siteId) + sites.extend(tmpsites) + for site in tmpsites: for cv in site.cameraViews: - videoSequences += cv.videoSequences + videoSequences.extend(cv.videoSequences) else: print('No video/site to process') @@ -145,11 +148,33 @@ elif args.process == 'prototype': # motion pattern learning # learn by site by default -> group videos by site (or by camera view? TODO add cameraviews) - # by default, load all objects, learn and then assign - objects = {siteId: [] for siteId in siteIds} - for vs in videoSequences: - print('Loading '+vs.getDatabaseFilename()) - objects[vs.cameraView.siteIdx] += storage.loadTrajectoriesFromSqlite(str(parentPath/vs.getDatabaseFilename()), args.trajectoryType, args.nTrajectories, timeStep = args.positionSubsamplingRate) + # by default, load all objects, learn and then assign (BUT not save the assignments) + for site in sites: + objects = {} + object2VideoSequences = {} + for cv in site.cameraViews: + for vs in cv.videoSequences: + print('Loading '+vs.getDatabaseFilename()) + objects[vs.idx] = storage.loadTrajectoriesFromSqlite(str(parentPath/vs.getDatabaseFilename()), args.trajectoryType, args.nTrajectories, timeStep = args.positionSubsamplingRate, nLongestFeaturesPerObject = args.nLongestFeaturesPerObject) + if args.trajectoryType == 'object' and args.nLongestFeaturesPerObject is not None: + objectsWithFeatures = objects[vs.idx] + objects[vs.idx] = [f for o in objectsWithFeatures for f in o.getFeatures()] + prototypeType = 'feature' + else: + prototypeType = args.trajectoryType + for obj in objects[vs.idx]: + object2VideoSequences[obj] = vs + lcss = utils.LCSS(metric = args.metric, epsilon = args.epsilon) + similarityFunc = lambda x,y : lcss.computeNormalized(x, y) + allobjects = [o for tmpobjects in objects.values() for o in tmpobjects] + prototypeIndices, labels = processing.learnAssignMotionPatterns(True, True, allobjects, similarities, args.minsimil, similarityFunc, args.minClusterSize, args.optimizeCentroid, args.randomInitialization, True, []) + if args.outputPrototypeDatabaseFilename is None: + outputPrototypeDatabaseFilename = args.databaseFilename + else: + outputPrototypeDatabaseFilename = args.outputPrototypeDatabaseFilename + # TODO maintain mapping from object prototype to db filename + compute nmatchings before + clusterSizes = ml.computeClusterSizes(labels, prototypeIndices, -1) + storage.savePrototypesToSqlite(outputPrototypeDatabaseFilename, [moving.Prototype(object2VideoSequences[allobjects[i]].getDatabaseFilename(False), allobjects[i].getNum(), prototypeType) for i in prototypeIndices]) elif args.process == 'interaction':
--- a/trafficintelligence/processing.py Thu Jul 05 17:06:40 2018 -0400 +++ b/trafficintelligence/processing.py Thu Jul 05 17:45:18 2018 -0400 @@ -41,7 +41,7 @@ if minClusterSize > 0 and removePrototypesAfterAssignment: # use prototypeIndices anyway prototypeIndices = assignedPrototypeIndices else: - labels = None + labels = None return prototypeIndices, labels