changeset 993:e8eabef7857c

update to OpenCV3 for python
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Wed, 16 May 2018 21:06:52 -0400
parents 2cd1ce245024
children 8118c6b77d7c
files python/cvutils.py python/ml.py python/moving.py scripts/classify-objects.py scripts/train-object-classification.py
diffstat 5 files changed, 85 insertions(+), 95 deletions(-) [+]
line wrap: on
line diff
--- a/python/cvutils.py	Fri Apr 13 16:48:02 2018 -0400
+++ b/python/cvutils.py	Wed May 16 21:06:52 2018 -0400
@@ -94,15 +94,6 @@
         H, mask = cv2.findHomography(srcPoints, dstPoints, method, ransacReprojThreshold)
         return H
 
-    def arrayToCvMat(a, t = cv2.CV_64FC1):
-        '''Converts a numpy array to an OpenCV CvMat, with default type CV_64FC1.'''
-        print('Deprecated, use new interface')
-        cvmat = cv2.cv.CreateMat(a.shape[0], a.shape[1], t)
-        for i in range(cvmat.rows):
-            for j in range(cvmat.cols):
-                cvmat[i,j] = a[i,j]
-        return cvmat
-
     def cvPlot(img, positions, color, lastCoordinate = None, **kwargs):
         if lastCoordinate is None:
             last = positions.length()-1
@@ -152,7 +143,7 @@
             nFramesShown = 0
             if firstFrameNums is not None:
                 for i in xrange(len(captures)):
-                    captures[i].set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNums[i])
+                    captures[i].set(cv2.PROP_POS_FRAMES, firstFrameNums[i])
             while ret and not quitKey(key):
                 rets = []
                 images = []
@@ -174,45 +165,45 @@
                     nFramesShown += step
                     if step > 1:
                         for i in xrange(len(captures)):
-                            captures[i].set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNums[i]+nFramesShown)
+                            captures[i].set(cv2.CAP_PROP_POS_FRAMES, firstFrameNums[i]+nFramesShown)
             cv2.destroyAllWindows()
         else:
             print('Video captures for {} failed'.format(filenames))
 
     def infoVideo(filename):
         '''Provides all available info on video '''
-        cvPropertyNames = {cv2.cv.CV_CAP_PROP_FORMAT: "format",
-                           cv2.cv.CV_CAP_PROP_FOURCC: "codec (fourcc)",
-                           cv2.cv.CV_CAP_PROP_FPS: "fps",
-                           cv2.cv.CV_CAP_PROP_FRAME_COUNT: "number of frames",
-                           cv2.cv.CV_CAP_PROP_FRAME_HEIGHT: "heigh",
-                           cv2.cv.CV_CAP_PROP_FRAME_WIDTH: "width",
-                           cv2.cv.CV_CAP_PROP_RECTIFICATION: "rectification",
-                           cv2.cv.CV_CAP_PROP_SATURATION: "saturation"}
+        cvPropertyNames = {cv2.CAP_PROP_FORMAT: "format",
+                           cv2.CAP_PROP_FOURCC: "codec (fourcc)",
+                           cv2.CAP_PROP_FPS: "fps",
+                           cv2.CAP_PROP_FRAME_COUNT: "number of frames",
+                           cv2.CAP_PROP_FRAME_HEIGHT: "heigh",
+                           cv2.CAP_PROP_FRAME_WIDTH: "width",
+                           cv2.CAP_PROP_RECTIFICATION: "rectification",
+                           cv2.CAP_PROP_SATURATION: "saturation"}
         capture = cv2.VideoCapture(filename)
         videoProperties = {}
         if capture.isOpened():
-            for cvprop in [#cv2.cv.CV_CAP_PROP_BRIGHTNESS
-                    #cv2.cv.CV_CAP_PROP_CONTRAST
-                    #cv2.cv.CV_CAP_PROP_CONVERT_RGB
-                    #cv2.cv.CV_CAP_PROP_EXPOSURE
-                    cv2.cv.CV_CAP_PROP_FORMAT,
-                    cv2.cv.CV_CAP_PROP_FOURCC,
-                    cv2.cv.CV_CAP_PROP_FPS,
-                    cv2.cv.CV_CAP_PROP_FRAME_COUNT,
-                    cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,
-                    cv2.cv.CV_CAP_PROP_FRAME_WIDTH,
-                    #cv2.cv.CV_CAP_PROP_GAIN,
-                    #cv2.cv.CV_CAP_PROP_HUE
-                    #cv2.cv.CV_CAP_PROP_MODE
-                    #cv2.cv.CV_CAP_PROP_POS_AVI_RATIO
-                    #cv2.cv.CV_CAP_PROP_POS_FRAMES
-                    #cv2.cv.CV_CAP_PROP_POS_MSEC
-                    #cv2.cv.CV_CAP_PROP_RECTIFICATION,
-                    #cv2.cv.CV_CAP_PROP_SATURATION
+            for cvprop in [#cv2.CAP_PROP_BRIGHTNESS
+                    #cv2.CAP_PROP_CONTRAST
+                    #cv2.CAP_PROP_CONVERT_RGB
+                    #cv2.CAP_PROP_EXPOSURE
+                    cv2.CAP_PROP_FORMAT,
+                    cv2.CAP_PROP_FOURCC,
+                    cv2.CAP_PROP_FPS,
+                    cv2.CAP_PROP_FRAME_COUNT,
+                    cv2.CAP_PROP_FRAME_HEIGHT,
+                    cv2.CAP_PROP_FRAME_WIDTH,
+                    #cv2.CAP_PROP_GAIN,
+                    #cv2.CAP_PROP_HUE
+                    #cv2.CAP_PROP_MODE
+                    #cv2.CAP_PROP_POS_AVI_RATIO
+                    #cv2.CAP_PROP_POS_FRAMES
+                    #cv2.CAP_PROP_POS_MSEC
+                    #cv2.CAP_PROP_RECTIFICATION,
+                    #cv2.CAP_PROP_SATURATION
             ]:
                 prop = capture.get(cvprop)
-                if cvprop == cv2.cv.CV_CAP_PROP_FOURCC and prop > 0:
+                if cvprop == cv2.CAP_PROP_FOURCC and prop > 0:
                     prop = int2FOURCC(int(prop))
                 videoProperties[cvPropertyNames[cvprop]] = prop
         else:
@@ -224,12 +215,12 @@
         images = []
         capture = cv2.VideoCapture(videoFilename)
         if capture.isOpened():
-            rawCount = capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
+            rawCount = capture.get(cv2.CAP_PROP_FRAME_COUNT)
             if rawCount < 0:
                 rawCount = lastFrameNum+1
             nDigits = int(floor(log10(rawCount)))+1
             ret = False
-            capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum)
+            capture.set(cv2.CAP_PROP_POS_FRAMES, firstFrameNum)
             frameNum = firstFrameNum
             while frameNum<lastFrameNum and frameNum<rawCount:
                 ret, img = capture.read()
@@ -245,7 +236,7 @@
                         images.append(img)
                     frameNum +=step
                     if step > 1:
-                        capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
+                        capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
             capture.release()
         else:
             print('Video capture for {} failed'.format(videoFilename))
@@ -254,7 +245,7 @@
     def getFPS(videoFilename):
         capture = cv2.VideoCapture(videoFilename)
         if capture.isOpened():
-            fps = capture.get(cv2.cv.CV_CAP_PROP_FPS)
+            fps = capture.get(cv2.CAP_PROP_FPS)
             capture.release()
             return fps
         else:
@@ -300,8 +291,8 @@
             colorType = 'default'
 
         capture = cv2.VideoCapture(videoFilename)
-        width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
-        height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
+        width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
+        height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
 
         windowName = 'frame'
         if rescale == 1.:
@@ -313,7 +304,7 @@
             key = -1
             ret = True
             frameNum = firstFrameNum
-            capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum)
+            capture.set(cv2.CAP_PROP_POS_FRAMES, firstFrameNum)
             if lastFrameNumArg is None:
                 lastFrameNum = float("inf")
             else:
@@ -374,7 +365,7 @@
                         cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img)
                     frameNum += nFramesStep
                     if nFramesStep > 1:
-                        capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
+                        capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
             cv2.destroyAllWindows()
         else:
             print('Cannot load file ' + videoFilename)
@@ -496,11 +487,11 @@
         try:
             flags = 0
             if fixK2:
-                flags += cv2.cv.CV_CALIB_FIX_K2
+                flags += cv2.CALIB_FIX_K2
             if fixK3:
-                flags += cv2.cv.CV_CALIB_FIX_K3
+                flags += cv2.CALIB_FIX_K3
             if zeroTangent:
-                flags += cv2.cv.CV_CALIB_ZERO_TANGENT_DIST
+                flags += cv2.CALIB_ZERO_TANGENT_DIST
             ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None, flags = flags)
         except NameError:
             return None
--- a/python/ml.py	Fri Apr 13 16:48:02 2018 -0400
+++ b/python/ml.py	Wed May 16 21:06:52 2018 -0400
@@ -25,50 +25,52 @@
 #####################
 
 def computeConfusionMatrix(model, samples, responses):
-    'computes the confusion matrix of the classifier (model)'
+    '''computes the confusion matrix of the classifier (model)
+
+    samples should be n samples by m variables'''
     classifications = {}
-    for x,y in zip(samples, responses):
-        predicted = model.predict(x)
+    predictions = model.predict(samples)
+    for predicted, y in zip(predictions, responses):
         classifications[(y, predicted)] = classifications.get((y, predicted), 0)+1
     return classifications
 
-class StatModel(object):
-    '''Abstract class for loading/saving model'''    
-    def load(self, filename):
-        if path.exists(filename):
-            self.model.load(filename)
-        else:
-            print('Provided filename {} does not exist: model not loaded!'.format(filename))
-
-    def save(self, filename):
-        self.model.save(filename)
-
 if opencvAvailable:
-    class SVM(StatModel):
+    class SVM(object):
         '''wrapper for OpenCV SimpleVectorMachine algorithm'''
-        def __init__(self, svmType = cv2.SVM_C_SVC, kernelType = cv2.SVM_RBF, degree = 0, gamma = 1, coef0 = 0, Cvalue = 1, nu = 0, p = 0):
-            self.model = cv2.SVM()
-            self.params = dict(svm_type = svmType, kernel_type = kernelType, degree = degree, gamma = gamma, coef0 = coef0, Cvalue = Cvalue, nu = nu, p = p)
-            # OpenCV3
-            # self.model = cv2.SVM()
-            # self.model.setType(svmType)
-            # self.model.setKernel(kernelType)
-            # self.model.setDegree(degree)
-            # self.model.setGamma(gamma)
-            # self.model.setCoef0(coef0)
-            # self.model.setC(Cvalue)
-            # self.model.setNu(nu)
-            # self.model.setP(p)
+        def __init__(self, svmType = cv2.ml.SVM_C_SVC, kernelType = cv2.ml.SVM_RBF, degree = 0, gamma = 1, coef0 = 0, Cvalue = 1, nu = 0, p = 0):
+            self.model = cv2.ml.SVM_create()
+            self.model.setType(svmType)
+            self.model.setKernel(kernelType)
+            self.model.setDegree(degree)
+            self.model.setGamma(gamma)
+            self.model.setCoef0(coef0)
+            self.model.setC(Cvalue)
+            self.model.setNu(nu)
+            self.model.setP(p)
 
-        def train(self, samples, responses, computePerformance = False):
-            self.model.train(samples, responses, params = self.params)
+        def save(self, filename):
+            self.model.save(filename)
+            
+        def train(self, samples, layout, responses, computePerformance = False):
+            self.model.train(samples, layout, responses)
             if computePerformance:
                 return computeConfusionMatrix(self, samples, responses)
 
         def predict(self, hog):
-            return self.model.predict(hog)
+            retval, predictions = self.model.predict(hog)
+            if hog.shape[0] == 1:
+                return predictions[0][0]
+            else:
+                return np.asarray(predictions, dtype = np.int).ravel().tolist()
 
-
+    def SVM_load(filename):
+        if path.exists(filename):
+            svm = SVM()
+            svm.model = cv2.ml.SVM_load(filename)
+            return svm
+        else:
+            print('Provided filename {} does not exist: model not loaded!'.format(filename))
+        
 #####################
 # Clustering
 #####################
--- a/python/moving.py	Fri Apr 13 16:48:02 2018 -0400
+++ b/python/moving.py	Wed May 16 21:06:52 2018 -0400
@@ -1641,7 +1641,7 @@
         croppedImg = cvutils.imageBox(img, self, instant, width, height, px, py, minNPixels)
         if croppedImg is not None and len(croppedImg) > 0:
             hog = cvutils.HOG(croppedImg, rescaleSize, orientations, pixelsPerCell, cellsPerBlock, blockNorm)
-            self.userTypes[instant] = int(self.appearanceClassifier.predict(hog))
+            self.userTypes[instant] = self.appearanceClassifier.predict(hog.reshape(1,hog.size))
         else:
             self.userTypes[instant] = userType2Num['unknown']
 
--- a/scripts/classify-objects.py	Fri Apr 13 16:48:02 2018 -0400
+++ b/scripts/classify-objects.py	Wed May 16 21:06:52 2018 -0400
@@ -35,10 +35,8 @@
     print('Unknown speed aggregation method: {}. Exiting'.format(classifierParams.speedAggregationMethod))
     sys.exit()
 
-pedBikeCarSVM = ml.SVM()
-pedBikeCarSVM.load(classifierParams.pedBikeCarSVMFilename)
-bikeCarSVM = ml.SVM()
-bikeCarSVM.load(classifierParams.bikeCarSVMFilename)
+pedBikeCarSVM = ml.SVM_load(classifierParams.pedBikeCarSVMFilename)
+bikeCarSVM = ml.SVM_load(classifierParams.bikeCarSVMFilename)
 
 # log logistic for ped and bik otherwise ((pedBeta/pedAlfa)*((sMean/pedAlfa)**(pedBeta-1)))/((1+(sMean/pedAlfa)**pedBeta)**2.)
 carNorm = norm(classifierParams.meanVehicleSpeed, classifierParams.stdVehicleSpeed)
@@ -72,8 +70,8 @@
     timeInterval.first = 0
 
 capture = cv2.VideoCapture(videoFilename)
-width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
-height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
+width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
+height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
 
 #if undistort: # setup undistortion
 #     [map1, map2] = cvutils.computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
@@ -89,7 +87,7 @@
     ret = True
     frameNum = timeInterval.first
     if not args.startFrame0:
-        capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
+        capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
     lastFrameNum = timeInterval.last
 
     while ret and frameNum <= lastFrameNum:
--- a/scripts/train-object-classification.py	Fri Apr 13 16:48:02 2018 -0400
+++ b/scripts/train-object-classification.py	Wed May 16 21:06:52 2018 -0400
@@ -2,8 +2,7 @@
 
 import numpy as np
 import argparse
-from cv2 import SVM_RBF, SVM_C_SVC
-#from cv2.ml import SVM_RBF, SVM_C_SVC, ROW_SAMPLE # row_sample for layout in cv2.ml.SVM_load
+from cv2.ml import SVM_RBF, SVM_C_SVC, ROW_SAMPLE # row_sample for layout in cv2.ml.SVM_load
 
 import cvutils, moving, ml, storage
 
@@ -50,28 +49,28 @@
 # Training the Support Vector Machine
 print "Training Pedestrian-Cyclist-Vehicle Model"
 model = ml.SVM(args.svmType, args.kernelType, args.degree, args.gamma, args.coef0, args.cvalue, args.nu, args.svmP)
-classifications = model.train(np.concatenate(trainingSamplesPBV.values()), np.concatenate(trainingLabelsPBV.values()), True)
+classifications = model.train(np.concatenate(trainingSamplesPBV.values()), ROW_SAMPLE, np.concatenate(trainingLabelsPBV.values()), True)
 if args.computeConfusionMatrix:
     print(classifications)
 model.save(args.directoryName + "/modelPBV.xml")
 
 print "Training Cyclist-Vehicle Model"
 model = ml.SVM(args.svmType, args.kernelType, args.degree, args.gamma, args.coef0, args.cvalue, args.nu, args.svmP)
-classifications = model.train(np.concatenate(trainingSamplesBV.values()), np.concatenate(trainingLabelsBV.values()), True)
+classifications = model.train(np.concatenate(trainingSamplesBV.values()), ROW_SAMPLE, np.concatenate(trainingLabelsBV.values()), True)
 if args.computeConfusionMatrix:
     print(classifications)
 model.save(args.directoryName + "/modelBV.xml")
 
 print "Training Pedestrian-Cyclist Model"
 model = ml.SVM(args.svmType, args.kernelType, args.degree, args.gamma, args.coef0, args.cvalue, args.nu, args.svmP)
-classifications = model.train(np.concatenate(trainingSamplesPB.values()), np.concatenate(trainingLabelsPB.values()), True)
+classifications = model.train(np.concatenate(trainingSamplesPB.values()), ROW_SAMPLE, np.concatenate(trainingLabelsPB.values()), True)
 if args.computeConfusionMatrix:
     print(classifications)
 model.save(args.directoryName + "/modelPB.xml")
 
 print "Training Pedestrian-Vehicle Model"
 model = ml.SVM(args.svmType, args.kernelType, args.degree, args.gamma, args.coef0, args.cvalue, args.nu, args.svmP)
-classifications = model.train(np.concatenate(trainingSamplesPV.values()), np.concatenate(trainingLabelsPV.values()), True)
+classifications = model.train(np.concatenate(trainingSamplesPV.values()), ROW_SAMPLE, np.concatenate(trainingLabelsPV.values()), True)
 if args.computeConfusionMatrix:
     print(classifications)
 model.save(args.directoryName + "/modelPV.xml")