Mercurial Hosting > traffic-intelligence
changeset 636:3058e00887bc
removed all issues because of tests with None, using is instead of == or !=
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Tue, 24 Mar 2015 18:11:28 +0100 |
parents | 6ae68383071e |
children | c9a0b72979fd |
files | python/cvutils.py python/events.py python/indicators.py python/ml.py python/moving.py python/pavement.py python/prediction.py python/storage.py scripts/compute-clearmot.py scripts/compute-homography.py scripts/create-bounding-boxes.py scripts/display-trajectories.py scripts/play-video.py scripts/polytracktopdtv.py scripts/undistort-video.py |
diffstat | 15 files changed, 108 insertions(+), 102 deletions(-) [+] |
line wrap: on
line diff
--- a/python/cvutils.py Tue Mar 24 14:17:12 2015 +0100 +++ b/python/cvutils.py Tue Mar 24 18:11:28 2015 +0100 @@ -95,7 +95,7 @@ def cvPlot(img, positions, color, lastCoordinate = None): last = lastCoordinate+1 - if lastCoordinate != None and lastCoordinate >=0: + if lastCoordinate is not None and lastCoordinate >=0: last = min(positions.length()-1, lastCoordinate) for i in range(0, last-1): cv2.line(img, positions[i].asint().astuple(), positions[i+1].asint().astuple(), color) @@ -140,7 +140,7 @@ if printFrames: print('frame {0}'.format(frameNum)) frameNum+=1 - if text != None: + if text is not None: cv2.putText(img, text, (10,50), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) cvImshow(windowName, img, rescale) key = cv2.waitKey(wait) @@ -234,7 +234,7 @@ ret = True frameNum = firstFrameNum capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum) - if lastFrameNumArg == None: + if lastFrameNumArg is None: from sys import maxint lastFrameNum = maxint else: @@ -250,7 +250,7 @@ for obj in objects: if obj.existsAtInstant(frameNum): if not hasattr(obj, 'projectedPositions'): - if homography != None: + if homography is not None: obj.projectedPositions = obj.positions.project(homography) else: obj.projectedPositions = obj.positions @@ -422,7 +422,7 @@ if points.shape[0] != 2: raise Exception('points of dimension {0} {1}'.format(points.shape[0], points.shape[1])) - if (homography!=None) and homography.size>0: + if (homography is not None) and homography.size>0: augmentedPoints = append(points,[[1]*points.shape[1]], 0) prod = dot(homography, augmentedPoints) return prod[0:2]/prod[2]
--- a/python/events.py Tue Mar 24 14:17:12 2015 +0100 +++ b/python/events.py Tue Mar 24 18:11:28 2015 +0100 @@ -91,13 +91,13 @@ def __init__(self, num = None, timeInterval = None, roaduserNum1 = None, roaduserNum2 = None, roadUser1 = None, roadUser2 = None, categoryNum = None): moving.STObject.__init__(self, num, timeInterval) - if timeInterval == None and roadUser1 != None and roadUser2 != None: + if timeInterval is None and roadUser1 is not None and roadUser2 is not None: self.timeInterval = roadUser1.commonTimeInterval(roadUser2) self.roadUser1 = roadUser1 self.roadUser2 = roadUser2 - if roaduserNum1 != None and roaduserNum2 != None: + if roaduserNum1 is not None and roaduserNum2 is not None: self.roadUserNumbers = set([roaduserNum1, roaduserNum2]) - elif roadUser1 != None and roadUser2 != None: + elif roadUser1 is not None and roadUser2 is not None: self.roadUserNumbers = set(roadUser1.getNum(), roadUser2.getNum()) else: self.roadUserNumbers = None @@ -116,9 +116,9 @@ self.roadUser2 = objects[nums[1]] i = 0 - while i < len(objects) and self.roadUser2 == None: + while i < len(objects) and self.roadUser2 is None: if objects[i].getNum() in nums: - if self.roadUser1 == None: + if self.roadUser1 is None: self.roadUser1 = objects[i] else: self.roadUser2 = objects[i] @@ -140,7 +140,7 @@ self.roadUser2.plotOnWorldImage(nPixelsPerUnitDistance, options, withOrigin, timeStep, **kwargs) def play(self, videoFilename, homography = None, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1.): - if self.roadUser1 != None and self.roadUser2 != None: + if self.roadUser1 is not None and self.roadUser2 is not None: cvutils.displayTrajectories(videoFilename, [self.roadUser1, self.roadUser2], homography = homography, firstFrameNum = self.getFirstInstant(), lastFrameNumArg = self.getLastInstant(), undistort = undistort, intrinsicCameraMatrix = intrinsicCameraMatrix, distortionCoefficients = distortionCoefficients, undistortedImageMultiplication = undistortedImageMultiplication) else: print('Please set the interaction road user attributes roadUser1 and roadUser1 through the method setRoadUsers') @@ -192,7 +192,7 @@ route1= getRoute(self.roadUser1,prototypes,objects,noiseEntryNums,noiseExitNums,useDestination) route2= getRoute(self.roadUser2,prototypes,objects,noiseEntryNums,noiseExitNums,useDestination) - if timeInterval != None: + if timeInterval is not None: commonTimeInterval = timeInterval else: commonTimeInterval = self.timeInterval @@ -221,13 +221,13 @@ def createInteractions(objects, _others = None): '''Create all interactions of two co-existing road users''' - if _others != None: + if _others is not None: others = _others interactions = [] num = 0 for i in xrange(len(objects)): - if _others == None: + if _others is None: others = objects[:i] for j in xrange(len(others)): commonTimeInterval = objects[i].commonTimeInterval(others[j])
--- a/python/indicators.py Tue Mar 24 14:17:12 2015 +0100 +++ b/python/indicators.py Tue Mar 24 18:11:28 2015 +0100 @@ -98,7 +98,7 @@ def l1Distance(x, y): # lambda x,y:abs(x-y) - if x == None or y == None: + if x is None or y is None: return float('inf') else: return abs(x-y) @@ -112,7 +112,7 @@ self.minLength = minLength def checkIndicator(self, indicator): - return indicator != None and len(indicator) >= self.minLength + return indicator is not None and len(indicator) >= self.minLength def compute(self, indicator1, indicator2, computeSubSequence = False): if self.checkIndicator(indicator1) and self.checkIndicator(indicator2):
--- a/python/ml.py Tue Mar 24 14:17:12 2015 +0100 +++ b/python/ml.py Tue Mar 24 18:11:28 2015 +0100 @@ -71,7 +71,7 @@ localdata = copy(data) # shallow copy to avoid modifying data if shuffleData: shuffle(localdata) - if initialCentroids == None: + if initialCentroids is None: centroids = [Centroid(localdata[0])] else: centroids = deepcopy(initialCentroids)
--- a/python/moving.py Tue Mar 24 14:17:12 2015 +0100 +++ b/python/moving.py Tue Mar 24 18:11:28 2015 +0100 @@ -405,7 +405,7 @@ for spline_p in range(len(splines[spline])-1): #Get closest point on spline closestPoint = ppldb2p(p.x,p.y,splines[spline][spline_p][0],splines[spline][spline_p][1],splines[spline][spline_p+1][0],splines[spline][spline_p+1][1]) - if closestPoint == None: + if closestPoint is None: print('Error: Spline {0}, segment {1} has identical bounds and therefore is not a vector. Projection cannot continue.'.format(spline, spline_p)) return None # check if the @@ -570,7 +570,7 @@ return None else: inter = intersection(p1, p2, p3, p4) - if (inter != None + if (inter is not None and utils.inBetween(p1.x, p2.x, inter.x) and utils.inBetween(p3.x, p4.x, inter.x) and utils.inBetween(p1.y, p2.y, inter.y) @@ -582,7 +582,7 @@ def segmentLineIntersection(p1, p2, p3, p4): '''Indicates if the line going through p1 and p2 intersects inside p3, p4''' inter = intersection(p1, p2, p3, p4) - if inter != None and utils.inBetween(p3.x, p4.x, inter.x) and utils.inBetween(p3.y, p4.y, inter.y): + if inter is not None and utils.inBetween(p3.x, p4.x, inter.x) and utils.inBetween(p3.y, p4.y, inter.y): return inter else: return None @@ -594,7 +594,7 @@ The class is iterable''' def __init__(self, positions=None): - if positions != None: + if positions is not None: self.positions = positions else: self.positions = [[],[]] @@ -681,7 +681,7 @@ @staticmethod def _plot(positions, options = '', withOrigin = False, lastCoordinate = None, timeStep = 1, **kwargs): from matplotlib.pylab import plot - if lastCoordinate == None: + if lastCoordinate is None: plot(positions[0][::timeStep], positions[1][::timeStep], options, **kwargs) elif 0 <= lastCoordinate <= len(positions[0]): plot(positions[0][:lastCoordinate:timeStep], positions[1][:lastCoordinate:timeStep], options, **kwargs) @@ -818,7 +818,7 @@ q1=self.__getitem__(i) q2=self.__getitem__(i+1) p = utils.segmentIntersection(q1, q2, p1, p2) - if p != None: + if p is not None: if q1.x != q2.x: ratio = (p.x-q1.x)/(q2.x-q1.x) elif q1.y != q2.y: @@ -840,7 +840,7 @@ q1=self.__getitem__(i) q2=self.__getitem__(i+1) p = utils.segmentLineIntersection(p1, p2, q1, q2) - if p != None: + if p is not None: if q1.x != q2.x: ratio = (p.x-q1.x)/(q2.x-q1.x) elif q1.y != q2.y: @@ -897,13 +897,13 @@ lateral coordiante is stored as second coordinate''' def __init__(self, S = None, Y = None, lanes = None): - if S == None or Y == None or len(S) != len(Y): + if S is None or Y is None or len(S) != len(Y): self.positions = [[],[]] - if S != None and Y != None and len(S) != len(Y): + if S is not None and Y is not None and len(S) != len(Y): print("S and Y coordinates of different lengths\nInitializing to empty lists") else: self.positions = [S,Y] - if lanes == None or len(lanes) != self.length(): + if lanes is None or len(lanes) != self.length(): self.lanes = [] else: self.lanes = lanes @@ -948,13 +948,13 @@ def getIntersections(self, S1, lane = None): '''Returns a list of the indices at which the trajectory goes past the curvilinear coordinate S1 - (in provided lane if lane != None) + (in provided lane if lane is not None) the list is empty if there is no crossing''' indices = [] for i in xrange(self.length()-1): q1=self.__getitem__(i) q2=self.__getitem__(i+1) - if q1[0] <= S1 < q2[0] and (lane == None or (self.lanes[i] == lane and self.lanes[i+1] == lane)): + if q1[0] <= S1 < q2[0] and (lane is None or (self.lanes[i] == lane and self.lanes[i+1] == lane)): indices.append(i+(S1-q1[0])/(q2[0]-q1[0])) return indices @@ -1000,7 +1000,7 @@ print('The two objects\' time intervals do not overlap: obj1 {} and obj2 {}'.format(obj1.getTimeInterval(), obj2.getTimeInterval())) return None else: - if num == None: + if num is None: newNum = obj1.getNum() else: newNum = num @@ -1135,7 +1135,7 @@ @staticmethod def distances(obj1, obj2, instant1, _instant2 = None): from scipy.spatial.distance import cdist - if _instant2 == None: + if _instant2 is None: instant2 = instant1 else: instant2 = _instant2 @@ -1229,7 +1229,7 @@ result = getSYfromXY(self.getPositionAt(i), alignments) # Error handling - if(result == None): + if(result is None): print('Warning: trajectory {} at point {} {} has alignment errors (spline snapping)\nCurvilinear trajectory could not be computed'.format(self.getNum(), i, self.getPositionAt(i))) else: [align, alignPoint, snappedPoint, subsegmentDistance, S, Y] = result @@ -1246,7 +1246,7 @@ result = getSYfromXY(self.getPositionAt(i),[alignments[smoothed_lanes[i]]]) # Error handling - if(result == None): + if(result is None): ## This can be triggered by tracking errors when the trajectory jumps around passed another alignment. print(' Warning: trajectory {} at point {} {} has alignment errors during trajectory smoothing and will not be corrected.'.format(self.getNum(), i, self.getPositionAt(i))) else: @@ -1333,7 +1333,7 @@ croppedImg, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, self, instant, homography, width, height, px, py, pixelThreshold) if len(croppedImg) > 0: # != [] hog = array([cvutils.HOG(croppedImg)], dtype = np.float32) - if self.aggregatedSpeed < pedBikeSpeedTreshold or bikeCarSVM == None: + if self.aggregatedSpeed < pedBikeSpeedTreshold or bikeCarSVM is None: self.userTypes[instant] = int(pedBikeCarSVM.predict(hog)) elif self.aggregatedSpeed < bikeCarSpeedTreshold: self.userTypes[instant] = int(bikeCarSVM.predict(hog)) @@ -1358,7 +1358,7 @@ if t not in self.userTypes: self.classifyUserTypeHoGSVMAtInstant(images[t], pedBikeCarSVM, t, homography, width, height, bikeCarSVM, pedBikeSpeedTreshold, bikeCarSpeedThreshold, px, py, pixelThreshold) # compute P(Speed|Class) - if speedProbabilities == None: # equiprobable information from speed + if speedProbabilities is None: # equiprobable information from speed userTypeProbabilities = {userType2Num['car']: 1., userType2Num['pedestrian']: 1., userType2Num['bicycle']: 1.} else: userTypeProbabilities = {userType2Num[userTypename]: speedProbabilities[userTypename](self.aggregatedSpeed) for userTypename in speedProbabilities} @@ -1380,7 +1380,7 @@ skip frames at beginning/end?''' print('not implemented/tested yet') if not hasattr(self, projectedPositions): - if homography != None: + if homography is not None: self.projectedPositions = obj.positions.project(homography) else: self.projectedPositions = obj.positions @@ -1424,7 +1424,7 @@ def computeCentroidTrajectory(self, homography = None): self.positions = self.topLeftPositions.add(self.bottomRightPositions).multiply(0.5) - if homography != None: + if homography is not None: self.positions = self.positions.project(homography) def matches(self, obj, instant, matchingDistance):
--- a/python/pavement.py Tue Mar 24 14:17:12 2015 +0100 +++ b/python/pavement.py Tue Mar 24 18:11:28 2015 +0100 @@ -164,14 +164,14 @@ else: tmin = None if weatherDatatype == 'ec': - if data['pluie_tot'][i] != None and not np.isnan(data['pluie_tot'][i]): + if data['pluie_tot'][i] is not None and not np.isnan(data['pluie_tot'][i]): pluie_tot += data['pluie_tot'][i] - if data['neige_tot'][i] != None and not np.isnan(data['neige_tot'][i]): + if data['neige_tot'][i] is not None and not np.isnan(data['neige_tot'][i]): neige_tot += data['neige_tot'][i] - if tmax != None: + if tmax is not None: if tmax < 0: nbre_jours_T_negatif += 1 - if tmax != None and tmin != None: + if tmax is not None and tmin is not None: if tmax > 0 and tmin < 0: nbre_jours_gel_degel += 1 for l in range(len(seuils_T)): @@ -179,7 +179,7 @@ deltas_T[l] += 1 if not np.isnan(data['tmoy'][i]): tmoys.append(data['tmoy'][i]) - if tmax != None: + if tmax is not None: if tmax < 0: compteur += 1 elif tmax >= 0 and compteur >= nbre_jours_gel_consecutifs: @@ -296,7 +296,7 @@ measure variation, initial measure, time duration, lane position2, weather indicators ...''' variationData = [] - if lanePositions == None: + if lanePositions is None: nonZeroIndices = ~np.isnan(self.data[dataLabel]) days = self.data[nonZeroIndices]['jours'] dates = self.data[nonZeroIndices]['date_mesure']
--- a/python/prediction.py Tue Mar 24 14:17:12 2015 +0100 +++ b/python/prediction.py Tue Mar 24 18:11:28 2015 +0100 @@ -276,7 +276,7 @@ #if (et1.predictPosition(t1)-et2.predictPosition(t2)).norm2() < collisionDistanceThreshold: # cz = (et1.predictPosition(t1)+et2.predictPosition(t2)).multiply(0.5) cz = moving.segmentIntersection(et1.predictPosition(t1), et1.predictPosition(t1+1), et2.predictPosition(t2), et2.predictPosition(t2+1)) - if cz != None: + if cz is not None: deltaV= (et1.predictPosition(t1)- et1.predictPosition(t1+1) - et2.predictPosition(t2)+ et2.predictPosition(t2+1)).norm2() crossingZones.append(SafetyPoint(cz, et1.probability*et2.probability, abs(t1-t2)-(float(collisionDistanceThreshold)/deltaV))) t2 += 1 @@ -500,7 +500,7 @@ v2 = obj2.getVelocityAtInstant(currentInstant) intersection = moving.intersection(p1, p1+v1, p2, p2+v2) - if intersection != None: + if intersection is not None: dp1 = intersection-p1 dp2 = intersection-p2 dot1 = moving.Point.dot(dp1, v1) @@ -532,11 +532,11 @@ # p2 = secondUser.getPositionAtInstant(currentInstant) # v2 = secondUser.getVelocityAtInstant(currentInstant) # indices, intersections = firstUser.getPositions().getLineIntersections(p2, p2+v2) - # if indices != None: + # if indices is not None: # pass # else: # one has to predict !!! - if debug and intersection!= None: + if debug and intersection is not None: from matplotlib.pyplot import plot, figure, axis, title figure() plot([p1.x, intersection.x], [p1.y, intersection.y], 'r') @@ -567,7 +567,7 @@ v2 = obj2.getVelocityAtInstant(currentInstant) intersection = moving.intersection(p1, p1+v1, p2, p2+v2) - if intersection != None: + if intersection is not None: ttc = moving.Point.timeToCollision(p1, p2, v1, v2, collisionDistanceThreshold) if ttc: return [SafetyPoint(intersection, 1., ttc)], [] # (p1+v1.multiply(ttc)+p2+v2.multiply(ttc)).multiply(0.5)
--- a/python/storage.py Tue Mar 24 14:17:12 2015 +0100 +++ b/python/storage.py Tue Mar 24 18:11:28 2015 +0100 @@ -345,18 +345,18 @@ for row in cursor: if row[0] != objId: objId = row[0] - if obj != None and obj.length() == obj.positions.length(): + if obj is not None and obj.length() == obj.positions.length(): objects.append(obj) - elif obj != None: + elif obj is not None: print('Object {} is missing {} positions'.format(obj.getNum(), int(obj.length())-obj.positions.length())) obj = moving.MovingObject(row[0], timeInterval = moving.TimeInterval(row[1], row[1]), positions = moving.Trajectory([[row[2]],[row[3]]])) else: obj.timeInterval.last = row[1] obj.positions.addPositionXY(row[2],row[3]) - if obj != None and obj.length() == obj.positions.length(): + if obj is not None and obj.length() == obj.positions.length(): objects.append(obj) - elif obj != None: + elif obj is not None: print('Object {} is missing {} positions'.format(obj.getNum(), int(obj.length())-obj.positions.length())) return objects @@ -498,7 +498,7 @@ saveInteraction(cursor, inter) for indicatorName in indicatorNames: indicator = inter.getIndicator(indicatorName) - if indicator != None: + if indicator is not None: saveIndicator(cursor, inter.getNum(), indicator) except sqlite3.OperationalError as error: printDBError(error) @@ -652,7 +652,7 @@ finally: self.sechead = None else: return self.fp.readline() -def loadTrajectoriesFromVissimFile(filename, simulationStepsPerTimeUnit, nObjects = -1, warmUpLastInstant = None): +def loadTrajectoriesFromVissimFile(filename, simulationStepsPerTimeUnit, nObjects = -1, warmUpLastInstant = None, usePandas = False): '''Reads data from VISSIM .fzp trajectory file simulationStepsPerTimeUnit is the number of simulation steps per unit of time used by VISSIM for example, there seems to be 5 simulation steps per simulated second in VISSIM, @@ -663,30 +663,36 @@ objects = {} # dictionary of objects index by their id firstInstants = {} - inputfile = openCheck(filename, quitting = True) + if usePandas: + from pandas import read_csv + data = read_csv(filename, delimiter=';', skiprows=16) + print('Work in progress') + return [] + else: + inputfile = openCheck(filename, quitting = True) - # data = pd.read_csv(filename, skiprows=15, delimiter=';') - # skip header: 15 lines + 1 - line = readline(inputfile, '*$') - while len(line) > 0:#for line in inputfile: - data = line.strip().split(';') - objNum = int(data[1]) - instant = int(float(data[0])*simulationStepsPerTimeUnit) - s = float(data[4]) - y = float(data[5]) - lane = data[2]+'_'+data[3] - if objNum not in firstInstants: - firstInstants[objNum] = instant - if warmUpLastInstant == None or firstInstants[objNum] >= warmUpLastInstant: - if nObjects < 0 or len(objects) < nObjects: - objects[objNum] = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(instant, instant)) - objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory() - if (warmUpLastInstant == None or firstInstants[objNum] >= warmUpLastInstant) and objNum in objects: - objects[objNum].timeInterval.last = instant - objects[objNum].curvilinearPositions.addPositionSYL(s, y, lane) + # data = pd.read_csv(filename, skiprows=15, delimiter=';') + # skip header: 15 lines + 1 line = readline(inputfile, '*$') + while len(line) > 0:#for line in inputfile: + data = line.strip().split(';') + objNum = int(data[1]) + instant = int(float(data[0])*simulationStepsPerTimeUnit) + s = float(data[4]) + y = float(data[5]) + lane = data[2]+'_'+data[3] + if objNum not in firstInstants: + firstInstants[objNum] = instant + if warmUpLastInstant is None or firstInstants[objNum] >= warmUpLastInstant: + if nObjects < 0 or len(objects) < nObjects: + objects[objNum] = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(instant, instant)) + objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory() + if (warmUpLastInstant is None or firstInstants[objNum] >= warmUpLastInstant) and objNum in objects: + objects[objNum].timeInterval.last = instant + objects[objNum].curvilinearPositions.addPositionSYL(s, y, lane) + line = readline(inputfile, '*$') - return objects.values() + return objects.values() def loadTrajectoriesFromNgsimFile(filename, nObjects = -1, sequenceNum = -1): '''Reads data from the trajectory data provided by NGSIM project @@ -776,7 +782,7 @@ for i in xrange(int(obj.length())): p1 = positions[i] s = '{},{},{},{}'.format(obj.num,timeInterval[i],p1.x,p1.y) - if curvilinearPositions != None: + if curvilinearPositions is not None: p2 = curvilinearPositions[i] s += ',{},{}'.format(p2[0],p2[1]) f.write(s+'\n') @@ -844,7 +850,7 @@ self.useFeaturesForPrediction = config.getboolean(self.sectionHeader, 'use-features-prediction') def __init__(self, filename = None): - if filename != None: + if filename is not None: self.loadConfigFile(filename) class SceneParameters:
--- a/scripts/compute-clearmot.py Tue Mar 24 14:17:12 2015 +0100 +++ b/scripts/compute-clearmot.py Tue Mar 24 18:11:28 2015 +0100 @@ -20,7 +20,7 @@ parser.add_argument('-l', dest = 'lastInstant', help = 'last instant for measurement', required = True, type = int) args = parser.parse_args() -if args.homographyFilename != None: +if args.homographyFilename is not None: homography = loadtxt(args.homographyFilename) else: homography = None
--- a/scripts/compute-homography.py Tue Mar 24 14:17:12 2015 +0100 +++ b/scripts/compute-homography.py Tue Mar 24 18:11:28 2015 +0100 @@ -75,10 +75,10 @@ homography = np.array([]) -if args.pointCorrespondencesFilename != None: +if args.pointCorrespondencesFilename is not None: worldPts, videoPts = cvutils.loadPointCorrespondences(args.pointCorrespondencesFilename) homography, mask = cv2.findHomography(videoPts, worldPts) # method=0, ransacReprojThreshold=3 -elif args.videoFrameFilename != None and args.worldFilename != None: +elif args.videoFrameFilename is not None and args.worldFilename is not None: worldImg = plt.imread(args.worldFilename) videoImg = plt.imread(args.videoFrameFilename) if args.undistort: @@ -103,7 +103,7 @@ if homography.size>0: np.savetxt('homography.txt',homography) -if args.displayPoints and args.videoFrameFilename != None and args.worldFilename != None and homography.size>0: +if args.displayPoints and args.videoFrameFilename is not None and args.worldFilename is not None and homography.size>0: worldImg = cv2.imread(args.worldFilename) videoImg = cv2.imread(args.videoFrameFilename) if args.undistort:
--- a/scripts/create-bounding-boxes.py Tue Mar 24 14:17:12 2015 +0100 +++ b/scripts/create-bounding-boxes.py Tue Mar 24 18:11:28 2015 +0100 @@ -14,7 +14,7 @@ args = parser.parse_args() homography = None -if args.homography != None: +if args.homography is not None: homography = inv(loadtxt(args.homography)) storage.createBoundingBoxTable(args.databaseFilename, homography)
--- a/scripts/display-trajectories.py Tue Mar 24 14:17:12 2015 +0100 +++ b/scripts/display-trajectories.py Tue Mar 24 18:11:28 2015 +0100 @@ -29,7 +29,7 @@ params = storage.ProcessParameters(args.configFilename) videoFilename = params.videoFilename databaseFilename = params.databaseFilename - if params.homography != None: + if params.homography is not None: homography = inv(params.homography) else: homography = None @@ -46,19 +46,19 @@ undistortedImageMultiplication = None firstFrameNum = 0 -if not args.configFilename and args.videoFilename != None: +if not args.configFilename and args.videoFilename is not None: videoFilename = args.videoFilename -if not args.configFilename and args.databaseFilename != None: +if not args.configFilename and args.databaseFilename is not None: databaseFilename = args.databaseFilename -if not args.configFilename and args.homographyFilename != None: +if not args.configFilename and args.homographyFilename is not None: homography = inv(loadtxt(args.homographyFilename)) -if not args.configFilename and args.intrinsicCameraMatrixFilename != None: +if not args.configFilename and args.intrinsicCameraMatrixFilename is not None: intrinsicCameraMatrix = loadtxt(args.intrinsicCameraMatrixFilename) -if not args.configFilename and args.distortionCoefficients != None: +if not args.configFilename and args.distortionCoefficients is not None: distortionCoefficients = args.distortionCoefficients -if not args.configFilename and args.undistortedImageMultiplication != None: +if not args.configFilename and args.undistortedImageMultiplication is not None: undistortedImageMultiplication = args.undistortedImageMultiplication -if args.firstFrameNum != None: +if args.firstFrameNum is not None: firstFrameNum = args.firstFrameNum
--- a/scripts/play-video.py Tue Mar 24 14:17:12 2015 +0100 +++ b/scripts/play-video.py Tue Mar 24 18:11:28 2015 +0100 @@ -13,11 +13,11 @@ args = parser.parse_args() firstFrameNum = 0 -if args.firstFrameNum != None: +if args.firstFrameNum is not None: firstFrameNum = args.firstFrameNum frameRate = -1 -if args.frameRate != None: +if args.frameRate is not None: frameRate = args.frameRate cvutils.playVideo(args.videoFilename, firstFrameNum, frameRate, rescale = args.rescale)
--- a/scripts/polytracktopdtv.py Tue Mar 24 14:17:12 2015 +0100 +++ b/scripts/polytracktopdtv.py Tue Mar 24 18:11:28 2015 +0100 @@ -35,7 +35,7 @@ cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='objects_type'") data = cursor.fetchone() - if(data == None): + if(data is None): typeDict["0"] = "unknown" typeDict["1"] = "car" typeDict["2"] = "pedestrians" @@ -59,7 +59,7 @@ inc = 1000 #How many frame we fetch in the video at a time - if lastFrameNum != None: + if lastFrameNum is not None: delta = lastFrameNum-firstFrameNum if delta < inc: inc = delta @@ -74,7 +74,7 @@ time += datetime.timedelta(microseconds=deltaTimestamp*1000) currentIdx = currentIdx + inc - if lastFrameNum != None: + if lastFrameNum is not None: delta = lastFrameNum-currentIdx if delta < inc: inc = delta @@ -96,15 +96,15 @@ lastFrameNum is the last frame we want to extract (or None if we want to extract everything) ''' error = False - if sceneFilename != None: + if sceneFilename is not None: scene = utils.SceneParameters.loadConfigFile(os.path.join(workDirname, sceneFilename)) time = scene[sectionName].date inputDb = os.path.join(workDirname, scene[sectionName].databaseFilename) videoFile = os.path.join(workDirname, scene[sectionName].videoFilename) - if databaseFilename != None: + if databaseFilename is not None: inputDb = os.path.join(workDirname, databaseFilename) - if videoFilename != None: + if videoFilename is not None: videoFile = os.path.join(workDirname, videoFilename) # elif videoFolderExist == False: # print('No video path specified') @@ -113,7 +113,7 @@ videoFolderPath = os.path.join(workDirname, "videoframes/") fileName = sectionName - if videoFile != None: + if videoFile is not None: fps = cvutils.getFPS(videoFile) print('Video should run at ' + str(fps) + ' fps') deltaTimestamp = 1000.0/float(fps); @@ -147,7 +147,7 @@ zipFolder(videoFolderPath, inputZipVideoName) print('Zipping files...Done.') #We generate the structure for ZipVideo - if cameraCalibrationFilename != None: + if cameraCalibrationFilename is not None: calibrationFile = cameraCalibrationFilename else: calibrationFile = 'calib.json' @@ -189,7 +189,7 @@ #3) We read the bounding box table cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='bounding_boxes'") data = cursor.fetchone() - if data == None: + if data is None: print('No bounding box table. Maybe it was not generated ?') else: cursor.execute("SELECT object_id, frame_number, x_top_left, y_top_left, x_bottom_right, y_bottom_right FROM bounding_boxes")
--- a/scripts/undistort-video.py Tue Mar 24 14:17:12 2015 +0100 +++ b/scripts/undistort-video.py Tue Mar 24 18:11:28 2015 +0100 @@ -33,7 +33,7 @@ ret = True frameNum = args.firstFrameNum capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, args.firstFrameNum) - if args.lastFrameNum == None: + if args.lastFrameNum is None: from sys import maxint lastFrameNum = maxint else: