Mercurial Hosting > traffic-intelligence
diff scripts/process.py @ 1050:9d4a06f49cb8
work in progress
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Fri, 06 Jul 2018 18:12:15 -0400 |
parents | c9c03c97ed9f |
children | 16575ca4537d |
line wrap: on
line diff
--- a/scripts/process.py Fri Jul 06 15:58:45 2018 -0400 +++ b/scripts/process.py Fri Jul 06 18:12:15 2018 -0400 @@ -61,10 +61,10 @@ parser.add_argument('--output', dest = 'output', help = 'kind of output to produce (interval means)', choices = ['figure', 'interval', 'event']) parser.add_argument('--min-user-duration', dest = 'minUserDuration', help = 'mininum duration we have to see the user to take into account in the analysis (s)', type = float, default = 0.1) parser.add_argument('--interval-duration', dest = 'intervalDuration', help = 'length of time interval to aggregate data (min)', type = float, default = 15.) -parser.add_argument('--aggregation', dest = 'aggMethod', help = 'aggregation method per user/event and per interval', choices = ['mean', 'median', 'centile'], nargs = '*', default = ['median']) +parser.add_argument('--aggregation', dest = 'aggMethod', help = 'aggregation method per user/interaction and per interval', choices = ['mean', 'median', 'centile'], nargs = '*', default = ['median']) parser.add_argument('--aggregation-centile', dest = 'aggCentiles', help = 'centile(s) to compute from the observations', nargs = '*', type = int) dpi = 150 -# unit of analysis: site or video sequence? +# unit of analysis: site - camera-view # need way of selecting sites as similar as possible to sql alchemy syntax # override tracking.cfg from db @@ -90,8 +90,7 @@ tmpsites = getSite(session, siteId) sites.extend(tmpsites) for site in tmpsites: - for cv in site.cameraViews: - videoSequences.extend(cv.videoSequences) + videoSequences.extend(getSiteVideoSequences(site)) else: print('No video/site to process') @@ -147,7 +146,7 @@ pool.join() elif args.process == 'prototype': # motion pattern learning - # learn by site by default -> group videos by site (or by camera view? TODO add cameraviews) + # learn by site by default -> group videos by camera view TODO # by default, load all objects, learn and then assign (BUT not save the assignments) for site in sites: print('Learning motion patterns for site {} ({})'.format(site.idx, site.name)) @@ -177,7 +176,6 @@ outputPrototypeDatabaseFilename = args.databaseFilename else: outputPrototypeDatabaseFilename = args.outputPrototypeDatabaseFilename - # TODO maintain mapping from object prototype to db filename + compute nmatchings before clusterSizes = ml.computeClusterSizes(labels, prototypeIndices, -1) storage.savePrototypesToSqlite(str(parentPath/site.getPath()/outputPrototypeDatabaseFilename), [moving.Prototype(object2VideoSequences[trainingObjects[i]].getDatabaseFilename(False), trainingObjects[i].getNum(), prototypeType, clusterSizes[i]) for i in prototypeIndices]) @@ -232,7 +230,7 @@ objects = storage.loadTrajectoriesFromSqlite(str(parentPath/vs.getDatabaseFilename()), 'object', args.nObjects) for o in objects: if o.length() > minUserDuration: - row = [vs.cameraView.siteIdx, d, utils.framesToTime(o.getFirstInstant(), vs.cameraView.cameraType.frameRate, t1), o.getUserType()] + row = [vs.cameraView.site.name, d, utils.framesToTime(o.getFirstInstant(), vs.cameraView.cameraType.frameRate, t1), o.getUserType()] tmp = o.getSpeeds() for method,func in aggFunctions.items(): aggSpeeds = vs.cameraView.cameraType.frameRate*3.6*func(tmp) @@ -252,7 +250,8 @@ plt.close() elif args.output == 'event': data.to_csv('speeds.csv', index = False) -if args.analyze == 'interaction': + +if args.analyze == 'interaction': # redo as for object, export in dataframe all interaction data indicatorIds = [2,5,7,10] conversionFactors = {2: 1., 5: 30.*3.6, 7:1./30, 10:1./30} maxIndicatorValue = {2: float('inf'), 5: float('inf'), 7:10., 10:10.}