Mercurial Hosting > traffic-intelligence
comparison scripts/learn-motion-patterns.py @ 953:989917b1ed85
assign and learn work
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Tue, 25 Jul 2017 17:36:07 -0400 |
parents | a9b2beef0db4 |
children | cc89267b5ff9 |
comparison
equal
deleted
inserted
replaced
952:a9b2beef0db4 | 953:989917b1ed85 |
---|---|
16 parser.add_argument('--max-nobjectfeatures', dest = 'maxNObjectFeatures', help = 'maximum number of features per object to load', type = int, default = 1) | 16 parser.add_argument('--max-nobjectfeatures', dest = 'maxNObjectFeatures', help = 'maximum number of features per object to load', type = int, default = 1) |
17 parser.add_argument('-n', dest = 'nTrajectories', help = 'number of the object or feature trajectories to load', type = int, default = None) | 17 parser.add_argument('-n', dest = 'nTrajectories', help = 'number of the object or feature trajectories to load', type = int, default = None) |
18 parser.add_argument('-e', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float, required = True) | 18 parser.add_argument('-e', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float, required = True) |
19 parser.add_argument('--metric', dest = 'metric', help = 'metric for the similarity of trajectory points', default = 'cityblock') # default is manhattan distance | 19 parser.add_argument('--metric', dest = 'metric', help = 'metric for the similarity of trajectory points', default = 'cityblock') # default is manhattan distance |
20 parser.add_argument('-s', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float, required = True) | 20 parser.add_argument('-s', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float, required = True) |
21 parser.add_argument('-c', dest = 'minClusterSize', help = 'minimum cluster size', type = int, default = None) | 21 parser.add_argument('-c', dest = 'minClusterSize', help = 'minimum cluster size', type = int, default = 0) |
22 parser.add_argument('--learn', dest = 'learn', help = 'learn', action = 'store_true') | 22 parser.add_argument('--learn', dest = 'learn', help = 'learn', action = 'store_true') |
23 parser.add_argument('--optimize', dest = 'optimizeCentroid', help = 'recompute centroid at each assignment', action = 'store_true') | 23 parser.add_argument('--optimize', dest = 'optimizeCentroid', help = 'recompute centroid at each assignment', action = 'store_true') |
24 parser.add_argument('--random', dest = 'randomInitialization', help = 'random initialization of clustering algorithm', action = 'store_true') | 24 parser.add_argument('--random', dest = 'randomInitialization', help = 'random initialization of clustering algorithm', action = 'store_true') |
25 parser.add_argument('--subsample', dest = 'positionSubsamplingRate', help = 'rate of position subsampling (1 every n positions)', type = int) | 25 parser.add_argument('--subsample', dest = 'positionSubsamplingRate', help = 'rate of position subsampling (1 every n positions)', type = int) |
26 parser.add_argument('--display', dest = 'display', help = 'display trajectories', action = 'store_true') | 26 parser.add_argument('--display', dest = 'display', help = 'display trajectories', action = 'store_true') |
75 | 75 |
76 similarities = -np.ones((nTrajectories, nTrajectories)) | 76 similarities = -np.ones((nTrajectories, nTrajectories)) |
77 similarityFunc = lambda x,y : lcss.computeNormalized(x, y) | 77 similarityFunc = lambda x,y : lcss.computeNormalized(x, y) |
78 # the next line can be called again without reinitializing similarities | 78 # the next line can be called again without reinitializing similarities |
79 if args.learn: | 79 if args.learn: |
80 prototypeIndices = ml.prototypeCluster(trajectories, similarities, args.minSimilarity, similarityFunc, args.minClusterSize, args.optimizeCentroid, args.randomInitialization, initialPrototypeIndices) | 80 prototypeIndices = ml.prototypeCluster(trajectories, similarities, args.minSimilarity, similarityFunc, args.optimizeCentroid, args.randomInitialization, initialPrototypeIndices) |
81 # assignment is done if explicitly passed as argument or if working on the same database (starting prototypes from scratch and assigning the ) | 81 # assignment is done if explicitly passed as argument or if working on the same database (starting prototypes from scratch and assigning the ) |
82 # (otherwise the matchings will not compare and one has to to matchings on a large scale at once) | 82 # (otherwise the matchings will not compare and one has to to matchings on a large scale at once) |
83 else: | |
84 prototypeIndices = initialPrototypeIndices | |
83 | 85 |
84 if args.assign: | 86 if args.assign: |
85 prototypeIndices, labels = ml.assignToPrototypeClusters(trajectories, prototypeIndices, similarities, args.minSimilarity, similarityFunc, args.minClusterSize) | 87 prototypeIndices, labels = ml.assignToPrototypeClusters(trajectories, prototypeIndices, similarities, args.minSimilarity, similarityFunc, args.minClusterSize) |
86 clusterSizes = ml.computeClusterSizes(labels, prototypeIndices, -1) | 88 clusterSizes = ml.computeClusterSizes(labels, prototypeIndices, -1) |
87 print(clusterSizes) | 89 print(clusterSizes) |
88 | 90 |
89 if args.learn or args.assign: | 91 if args.learn or args.assign: |
90 prototypes = [] | 92 prototypes = [] |
91 for i in prototypeIndices: | 93 for i in prototypeIndices: |
92 if args.assign: | 94 if args.assign: |
93 nMatchings = clusterSizes[i] | 95 nMatchings = clusterSizes[i]-1 |
94 else: | 96 else: |
95 nMatchings = 0 | 97 nMatchings = 0 |
96 if i<len(initialPrototypes): | 98 if i<len(initialPrototypes): |
97 initialPrototypes[i].nMatchings += nMatchings | 99 initialPrototypes[i].nMatchings += nMatchings |
98 prototypes.append(initialPrototypes[i]) | 100 prototypes.append(initialPrototypes[i]) |