view scripts/display-synced-trajectories.py @ 832:02f2809c2f66

work in progress on synced trajectory display
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Wed, 29 Jun 2016 17:57:21 -0400
parents
children 7058a40a4bbc
line wrap: on
line source

#! /usr/bin/env python

import sys, argparse, os.path
from datetime import datetime, timedelta
from numpy import array
import cv2
import cvutils, utils, storage
from metadata import createDatabase, Site, VideoSequence

parser = argparse.ArgumentParser(description='The program displays several views of the same site synchronously.')
parser.add_argument('-i', dest = 'metadataFilename', help = 'name of the metadata file', required = True)
#parser.add_argument('-n', dest = 'siteId', help = 'site id or site name', required = True)
parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', required = True)
parser.add_argument('-f', dest = 'startTime', help = 'time to start playing (format %Y-%m-%d %H:%M:%S, eg 2011-06-22 10:00:39)', required = True)
parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to display', choices = ['feature', 'object'], default = 'object')
parser.add_argument('-r', dest = 'rescale', help = 'rescaling factor for the displayed image', default = 1., type = float)
parser.add_argument('-s', dest = 'step', help = 'display every s image', default = 1, type = int)

args = parser.parse_args()

session = createDatabase(args.metadataFilename)

mergedSequence = session.query(VideoSequence).filter(VideoSequence.databaseFilename == args.databaseFilename).first()
if mergedSequence is None:
    print('Video sequence {} was not found in {}. Exiting'.format(args.databaseFilename, args.metadataFilename))
    sys.exit()

dirname = os.path.split(args.metadataFilename)[0]

startTime = datetime.strptime(args.startTime, utils.datetimeFormat)
# TODO issue with framerate
if startTime > mergedSequence.startTime:
    mergedFirstFrameNum = (startTime-mergedSequence.startTime).seconds*

videoSequences = session.query(VideoSequence).filter(VideoSequence.site == mergedSequence.site).filter(VideoSequence.startTime <= startTime).all()
videoSequences.remove(mergedSequence)
videoSequences = [v for v in videoSequences if v.containsInstant(startTime)]
filenames = [dirname+os.path.sep+v.getVideoSequenceFilename() for v in videoSequences]
firstFrameNums = [v.getFrameNum(startTime) for v in videoSequences]
windowNames = [v.cameraView.description for v in videoSequences]

objects = storage.loadTrajectoriesFromSqlite(dirname+os.path.sep+mergedSequence.getDatabaseFilename(), args.trajectoryType)

#def playVideo(filenames, windowNames = None, firstFrameNums = None, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1., step = 1):
if len(filenames) == 0:
    print('Empty filename list')
    sys.exit()

if windowNames is None:
    windowNames = ['frame{}'.format(i) for i in xrange(len(filenames))]
#wait = 5
#if rescale == 1.:
for windowName in windowNames:
    cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
#if frameRate > 0:
#    wait = int(round(1000./frameRate))
#if interactive:
wait = 0
step = 1
rescale = 1.
captures = [cv2.VideoCapture(fn) for fn in filenames]
if array([cap.isOpened() for cap in captures]).all():
    key = -1
    ret = True
    nFramesShown = 0
    if firstFrameNums is not None:
        for i in xrange(len(captures)):
            captures[i].set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNums[i])
    while ret and not cvutils.quitKey(key):
        rets = []
        images = []
        for cap in captures:
            ret, img = cap.read()
            rets.append(ret)
            images.append(img)
        if array(rets).all():
            #if printFrames:
            print('frame shown {0}'.format(nFramesShown))
            for i in xrange(len(filenames)):
                for obj in objects:
                    if obj.existsAtInstant():
                #if text is not None:
                #    cv2.putText(images[i], text, (10,50), cv2.FONT_HERSHEY_PLAIN, 1, cvRed)
                cvutils.cvImshow(windowNames[i], images[i], rescale) # cv2.imshow('frame', img)
            key = cv2.waitKey(wait)
            #if cvutils.saveKey(key):
            #    cv2.imwrite('image-{}.png'.format(frameNum), img)
            nFramesShown += step
            if step > 1:
                for i in xrange(len(captures)):
                    captures.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNums[i]+nFramesShown)
    cv2.destroyAllWindows()
else:
    print('Video captures for {} failed'.format(filenames))