Mercurial Hosting > traffic-intelligence
changeset 868:1fdafa9f6bf4
added colors more friendly for color blind people (thanks Ryan Louie!)
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Wed, 01 Feb 2017 11:58:04 -0500 |
parents | 003445db1e30 |
children | eb2f8ce2b39d |
files | python/cvutils.py scripts/compute-homography.py scripts/display-synced-trajectories.py |
diffstat | 3 files changed, 44 insertions(+), 37 deletions(-) [+] |
line wrap: on
line diff
--- a/python/cvutils.py Thu Dec 08 18:00:53 2016 -0500 +++ b/python/cvutils.py Wed Feb 01 11:58:04 2017 -0500 @@ -30,25 +30,23 @@ #import aggdraw # agg on top of PIL (antialiased drawing) -cvRed = (0,0,255) -cvGreen = (0,255,0) -cvBlue = (255,0,0) -cvCyan = (255, 255, 0) -cvYellow = (0, 255, 255) -cvMagenta = (255, 0, 255) -cvWhite = (255, 255, 255) -cvBlack = (0,0,0) -cvColors3 = utils.PlottingPropertyValues([cvRed, - cvGreen, - cvBlue]) -cvColors = utils.PlottingPropertyValues([cvRed, - cvGreen, - cvBlue, - cvCyan, - cvYellow, - cvMagenta, - cvWhite, - cvBlack]) +cvRed = {'default': (0,0,255), + 'colorblind': (0,114,178)} +cvGreen = {'default': (0,255,0), + 'colorblind': (0,158,115)} +cvBlue = {'default': (255,0,0), + 'colorblind': (213,94,0)} +cvCyan = {'default': (255, 255, 0), + 'colorblind': (240,228,66)} +cvYellow = {'default': (0, 255, 255), + 'colorblind': (86,180,233)} +cvMagenta = {'default': (255, 0, 255), + 'colorblind': (204,121,167)} +cvWhite = {k: (255, 255, 255) for k in ['default', 'colorblind']} +cvBlack = {k: (0,0,0) for k in ['default', 'colorblind']} + +cvColors3 = {k: utils.PlottingPropertyValues([cvRed[k], cvGreen[k], cvBlue[k]]) for k in ['default', 'colorblind']} +cvColors = {k: utils.PlottingPropertyValues([cvRed[k], cvGreen[k], cvBlue[k], cvCyan[k], cvYellow[k], cvMagenta[k], cvWhite[k], cvBlack[k]]) for k in ['default', 'colorblind']} def quitKey(key): return chr(key&255)== 'q' or chr(key&255) == 'Q' @@ -146,8 +144,12 @@ newCameraMatrix[1,2] = newImgSize[1]/2. return cv2.initUndistortRectifyMap(intrinsicCameraMatrix, array(distortionCoefficients), identity(3), newCameraMatrix, newImgSize, cv2.CV_32FC1) - def playVideo(filenames, windowNames = None, firstFrameNums = None, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1., step = 1): + def playVideo(filenames, windowNames = None, firstFrameNums = None, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1., step = 1, colorBlind = False): '''Plays the video(s)''' + if colorBlind: + colorType = 'colorblind' + else: + colorType = 'default' if len(filenames) == 0: print('Empty filename list') return @@ -181,7 +183,7 @@ print('frame shown {0}'.format(nFramesShown)) for i in xrange(len(filenames)): if text is not None: - cv2.putText(images[i], text, (10,50), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) + cv2.putText(images[i], text, (10,50), cv2.FONT_HERSHEY_PLAIN, 1, cvRed[colorType]) cvImshow(windowNames[i], images[i], rescale) # cv2.imshow('frame', img) key = cv2.waitKey(wait) if saveKey(key): @@ -302,8 +304,13 @@ return croppedImg, yCropMin, yCropMax, xCropMin, xCropMax - def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, nZerosFilenameArg = None, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., annotations = [], gtMatches = {}, toMatches = {}): + def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, nZerosFilenameArg = None, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., annotations = [], gtMatches = {}, toMatches = {}, colorBlind = False): '''Displays the objects overlaid frame by frame over the video ''' + if colorBlind: + colorType = 'colorblind' + else: + colorType = 'default' + capture = cv2.VideoCapture(videoFilename) width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) @@ -348,30 +355,30 @@ obj.projectedPositions = obj.positions.project(homography) else: obj.projectedPositions = obj.positions - cvPlot(img, obj.projectedPositions, cvColors[obj.getNum()], frameNum-obj.getFirstInstant()) + cvPlot(img, obj.projectedPositions, cvColors[colorType][obj.getNum()], frameNum-obj.getFirstInstant()) if frameNum not in boundingBoxes.keys() and obj.hasFeatures(): imgcrop, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, obj, frameNum, homography, width, height) - cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue, 1) + cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue[colorType], 1) objDescription = '{} '.format(obj.num) if moving.userTypeNames[obj.userType] != 'unknown': objDescription += moving.userTypeNames[obj.userType][0].upper() if len(annotations) > 0: # if we loaded annotations, but there is no match if frameNum not in toMatches[obj.getNum()]: objDescription += " FA" - cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvColors[obj.getNum()]) + cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvColors[colorType][obj.getNum()]) # plot object bounding boxes if frameNum in boundingBoxes.keys(): for rect in boundingBoxes[frameNum]: - cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvColors[obj.getNum()]) + cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvColors[colorType][obj.getNum()]) # plot ground truth if len(annotations) > 0: for gt in annotations: if gt.existsAtInstant(frameNum): if frameNum in gtMatches[gt.getNum()]: - color = cvColors[gtMatches[gt.getNum()][frameNum]] # same color as object + color = cvColors[colorType][gtMatches[gt.getNum()][frameNum]] # same color as object else: - color = cvRed - cv2.putText(img, 'Miss', gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) + color = cvRed[colorType] + cv2.putText(img, 'Miss', gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, color) cv2.rectangle(img, gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), gt.bottomRightPositions[frameNum-gt.getFirstInstant()].asint().astuple(), color) # saving images and going to next if not saveAllImages:
--- a/scripts/compute-homography.py Thu Dec 08 18:00:53 2016 -0500 +++ b/scripts/compute-homography.py Wed Feb 01 11:58:04 2017 -0500 @@ -131,13 +131,13 @@ projectedVideoPts = cvutils.projectArray(homography, videoPts.T).T for i in range(worldPts.shape[0]): # world image - cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))),2,cvutils.cvBlue) - cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/args.unitsPerPixel))),2,cvutils.cvRed) - cv2.putText(worldImg, str(i+1), tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))+5), cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue, 2) + cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))),2,cvutils.cvBlue['default']) + cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/args.unitsPerPixel))),2,cvutils.cvRed['default']) + cv2.putText(worldImg, str(i+1), tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))+5), cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue['default'], 2) # video image - cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))),2,cvutils.cvBlue) - cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))),2,cvutils.cvRed) - cv2.putText(videoImg, str(i+1), tuple(np.int32(np.round(videoPts[i])+5)), cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue, 2) + cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))),2,cvutils.cvBlue['default']) + cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))),2,cvutils.cvRed['default']) + cv2.putText(videoImg, str(i+1), tuple(np.int32(np.round(videoPts[i])+5)), cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue['default'], 2) cv2.imshow('video frame',videoImg) cv2.imshow('world image',worldImg) cv2.waitKey()
--- a/scripts/display-synced-trajectories.py Thu Dec 08 18:00:53 2016 -0500 +++ b/scripts/display-synced-trajectories.py Wed Feb 01 11:58:04 2017 -0500 @@ -101,10 +101,10 @@ obj.projectedPositions[i] = obj.positions.project(homographies[i]) else: obj.projectedPositions[i] = obj.positions - cvutils.cvPlot(images[i], obj.projectedPositions[i], cvutils.cvColors[obj.getNum()], int(mergedFirstFrameNum+nFramesShown)-obj.getFirstInstant()) + cvutils.cvPlot(images[i], obj.projectedPositions[i], cvutils.cvColors['default'][obj.getNum()], int(mergedFirstFrameNum+nFramesShown)-obj.getFirstInstant()) #if text is not None: - # cv2.putText(images[i], text, (10,50), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) + # cv2.putText(images[i], text, (10,50), cv2.FONT_HERSHEY_PLAIN, 1, cvRed['default']) cvutils.cvImshow(windowNames[i], images[i], rescale) # cv2.imshow('frame', img) key = cv2.waitKey(wait) #if cvutils.saveKey(key):