Mercurial Hosting > traffic-intelligence
changeset 928:063d1267585d
work in progress
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Wed, 12 Jul 2017 01:24:31 -0400 |
parents | c030f735c594 |
children | be28a3538dc9 |
files | python/cvutils.py python/moving.py python/prediction.py scripts/classify-objects.py scripts/safety-analysis.py |
diffstat | 5 files changed, 36 insertions(+), 25 deletions(-) [+] |
line wrap: on
line diff
--- a/python/cvutils.py Tue Jul 11 17:56:23 2017 -0400 +++ b/python/cvutils.py Wed Jul 12 01:24:31 2017 -0400 @@ -257,16 +257,16 @@ print('Video capture for {} failed'.format(videoFilename)) return None - def imageBoxSize(obj, frameNum, homography, width, height, px = 0.2, py = 0.2): + def imageBoxSize(obj, frameNum, width, height, px = 0.2, py = 0.2): 'Computes the bounding box size of object at frameNum' x = [] y = [] if obj.hasFeatures(): for f in obj.getFeatures(): if f.existsAtInstant(frameNum): - projectedPosition = f.getPositionAtInstant(frameNum).project(homography) - x.append(projectedPosition.x) - y.append(projectedPosition.y) + projectedPosition = f.projectPositions[:, frameNum-f.getFirstInstant()] + x.append(projectedPosition[0]) + y.append(projectedPosition[1]) xmin = min(x) xmax = max(x) ymin = min(y) @@ -280,9 +280,9 @@ xCropMax = int(min(width - 1, .5 * (xmin + xmax + a))) return yCropMin, yCropMax, xCropMin, xCropMax - def imageBox(img, obj, frameNum, homography, width, height, px = 0.2, py = 0.2, minNPixels = 800): + def imageBox(img, obj, frameNum, width, height, px = 0.2, py = 0.2, minNPixels = 800): 'Computes the bounding box of object at frameNum' - yCropMin, yCropMax, xCropMin, xCropMax = imageBoxSize(obj, frameNum, homography, width, height, px, py) + yCropMin, yCropMax, xCropMin, xCropMax = imageBoxSize(obj, frameNum, width, height, px, py) if yCropMax != yCropMin and xCropMax != xCropMin and (yCropMax - yCropMin) * (xCropMax - xCropMin) > minNPixels: return img[yCropMin : yCropMax, xCropMin : xCropMax] else: @@ -518,19 +518,23 @@ out.write('{0} '.format(cvmat[i,j])) out.write('\n') -def projectArray(homography, points): +def projectArray(homography, points, intrinsicCameraMatrix = None, distortionCoefficients = None): '''Returns the coordinates of the projected points through homography (format: array 2xN points)''' - if points.shape[0] != 2: + if points.shape[0] not in [2, 3]: raise Exception('points of dimension {0} {1}'.format(points.shape[0], points.shape[1])) - if (homography is not None) and homography.size>0: - #alternatively, on could use cv2.convertpointstohomogeneous and other conversion to/from homogeneous coordinates - augmentedPoints = append(points,[[1]*points.shape[1]], 0) + augmentedPoints = append(points,[[1]*points.shape[1]], 0) + if homography is not None and homography.size>0: prod = dot(homography, augmentedPoints) - return prod[0:2]/prod[2] + projected = prod/prod[2] + projected[3,:] = 0 else: - return points + projected = augmentedPoints + + if intrinsicCameraMatrix is not None and distortionCoefficients is not None: + projected = cv2.projectPoints(projected, None, None, intrinsicCameraMatrix, distortionCoefficients) + return projected def project(homography, p): '''Returns the coordinates of the projection of the point p with coordinates p[0], p[1]
--- a/python/moving.py Tue Jul 11 17:56:23 2017 -0400 +++ b/python/moving.py Wed Jul 12 01:24:31 2017 -0400 @@ -551,7 +551,7 @@ speedOrientation is the other encoding of velocity, (speed, orientation) speedOrientation and control are NormAngle''' predictedSpeedTheta = speedOrientation+control - if maxSpeed: + if maxSpeed is not None: predictedSpeedTheta.norm = min(predictedSpeedTheta.norm, maxSpeed) predictedPosition = position+predictedSpeedTheta.getPoint() return predictedPosition, predictedSpeedTheta @@ -1561,7 +1561,7 @@ self.setUserType(utils.argmaxDict(userTypeProbabilities)) return userTypeProbabilities - def initClassifyUserTypeHoGSVM(self, aggregationFunc, pedBikeCarSVM, bikeCarSVM = None, pedBikeSpeedTreshold = float('Inf'), bikeCarSpeedThreshold = float('Inf'), nInstantsIgnoredAtEnds = 0): + def initClassifyUserTypeHoGSVM(self, aggregationFunc, pedBikeCarSVM, bikeCarSVM = None, pedBikeSpeedTreshold = float('Inf'), bikeCarSpeedThreshold = float('Inf'), nInstantsIgnoredAtEnds = 0, homography = None, intrinsicCameraMatrix = None, distortionCoefficients = None): '''Initializes the data structures for classification TODO? compute speed for longest feature?''' @@ -1575,7 +1575,10 @@ def predict(self, hog): return userType2Num['car'] self.appearanceClassifier = CarClassifier() - + # project feature positions + if self.hasFeatures(): + for f in self.getFeatures(): + f.projectedPositions = cvutils.projectArray(homography, f.getPositions().asArray(), intrinsicCameraMatrix, distortionCoefficients) self.userTypes = {} def classifyUserTypeHoGSVMAtInstant(self, img, instant, homography, width, height, px, py, minNPixels, rescaleSize, orientations, pixelsPerCell, cellsPerBlock, blockNorm):
--- a/python/prediction.py Tue Jul 11 17:56:23 2017 -0400 +++ b/python/prediction.py Wed Jul 12 01:24:31 2017 -0400 @@ -51,7 +51,7 @@ def getControl(self): return self.control -def findNearestParams(initialPosition,prototypeTrajectory): +def findNearestParams(initialPosition, prototypeTrajectory): ''' nearest parameters are the index of minDistance and the orientation ''' distances=[] for position in prototypeTrajectory.positions: @@ -71,7 +71,7 @@ (applying a constant ratio equal to the ratio of the user instantaneous speed and the trajectory closest speed)''' - def __init__(self, initialPosition, initialVelocity, prototypeTrajectory, constantSpeed = True, probability = 1.): + def __init__(self, initialPosition, initialVelocity, prototypeTrajectory, constantSpeed = False, probability = 1.): self.prototypeTrajectory = prototypeTrajectory self.constantSpeed = constantSpeed self.probability = probability
--- a/scripts/classify-objects.py Tue Jul 11 17:56:23 2017 -0400 +++ b/scripts/classify-objects.py Wed Jul 12 01:24:31 2017 -0400 @@ -75,8 +75,12 @@ height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) if undistort: # setup undistortion - [map1, map2] = cvutils.computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients) - height, width = map1.shape +# [map1, map2] = cvutils.computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients) +# height, width = map1.shape + newImgSize = (int(round(width*undistortedImageMultiplication)), int(round(height*undistortedImageMultiplication))) + newCameraMatrix = cv2.getDefaultNewCameraMatrix(intrinsicCameraMatrix, newImgSize, True) +else: + newCameraMatrix = None pastObjects = [] currentObjects = [] @@ -92,11 +96,11 @@ if ret: if frameNum%50 == 0: print('frame number: {}'.format(frameNum)) - if undistort: - img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR) + #if undistort: + # img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR) for obj in objects: if obj.getFirstInstant() <= frameNum: # if images are skipped - obj.initClassifyUserTypeHoGSVM(speedAggregationFunc, pedBikeCarSVM, bikeCarSVM, classifierParams.maxPedestrianSpeed, classifierParams.maxCyclistSpeed, classifierParams.nFramesIgnoreAtEnds) + obj.initClassifyUserTypeHoGSVM(speedAggregationFunc, pedBikeCarSVM, bikeCarSVM, classifierParams.maxPedestrianSpeed, classifierParams.maxCyclistSpeed, classifierParams.nFramesIgnoreAtEnds, invHomography, newCameraMatrix, distortionCoefficients) currentObjects.append(obj) objects.remove(obj) @@ -106,7 +110,7 @@ pastObjects.append(obj) currentObjects.remove(obj) else: - obj.classifyUserTypeHoGSVMAtInstant(img, frameNum, invHomography, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels, classifierParams.hogRescaleSize, classifierParams.hogNOrientations, classifierParams.hogNPixelsPerCell, classifierParams.hogNCellsPerBlock, classifierParams.hogBlockNorm) + obj.classifyUserTypeHoGSVMAtInstant(img, frameNum, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels, classifierParams.hogRescaleSize, classifierParams.hogNOrientations, classifierParams.hogNPixelsPerCell, classifierParams.hogNCellsPerBlock, classifierParams.hogBlockNorm) frameNum += 1 for obj in currentObjects:
--- a/scripts/safety-analysis.py Tue Jul 11 17:56:23 2017 -0400 +++ b/scripts/safety-analysis.py Wed Jul 12 01:24:31 2017 -0400 @@ -71,7 +71,7 @@ for inter in interactions: inter.computePET(params.collisionDistance) -storage.saveIndicators(params.databaseFilename, interactions) +storage.saveIndicatorsToSqlite(params.databaseFilename, interactions) if args.displayCollisionPoints: plt.figure()