Mercurial Hosting > traffic-intelligence
diff scripts/display-synced-trajectories.py @ 837:e01cabca4c55
minor modifications to merge-features
author | Nicolas Saunier <nicolas.saunier@polymtl.ca> |
---|---|
date | Wed, 06 Jul 2016 10:58:18 -0400 |
parents | 7058a40a4bbc |
children | 2918de3d40fc |
line wrap: on
line diff
--- a/scripts/display-synced-trajectories.py Wed Jul 06 10:18:35 2016 -0400 +++ b/scripts/display-synced-trajectories.py Wed Jul 06 10:58:18 2016 -0400 @@ -2,10 +2,10 @@ import sys, argparse, os.path from datetime import datetime, timedelta -from numpy import array +import numpy as np import cv2 import cvutils, utils, storage -from metadata import createDatabase, Site, VideoSequence +from metadata import createDatabase, Site, CameraView, VideoSequence parser = argparse.ArgumentParser(description='The program displays several views of the same site synchronously.') parser.add_argument('-i', dest = 'metadataFilename', help = 'name of the metadata file', required = True) @@ -31,13 +31,14 @@ if startTime > mergedSequence.startTime: mergedFirstFrameNum = (startTime-mergedSequence.startTime).seconds*mergedSequence.cameraView.cameraType.frameRate -cameraViews = session.query(CameraView).filter(CameraView.site == mergedSequence.site).filter(CameraView.virtual == False) +cameraViews = session.query(CameraView).filter(CameraView.site == mergedSequence.cameraView.site).filter(CameraView.virtual == False).all() videoSequences = session.query(VideoSequence).filter(VideoSequence.virtual == False).filter(VideoSequence.startTime <= startTime).all() #videoSequences.remove(mergedSequence) videoSequences = [v for v in videoSequences if v.cameraView in cameraViews and v.containsInstant(startTime)] filenames = [dirname+os.path.sep+v.getVideoSequenceFilename() for v in videoSequences] firstFrameNums = [v.getFrameNum(startTime) for v in videoSequences] windowNames = [v.cameraView.description for v in videoSequences] +homographies = [np.inv(np.loadtxt(dirname+os.path.sep+v.cameraView.getHomographyFilename())) for v in videoSequences] objects = storage.loadTrajectoriesFromSqlite(dirname+os.path.sep+mergedSequence.getDatabaseFilename(), args.trajectoryType) @@ -59,7 +60,7 @@ step = 1 rescale = 1. captures = [cv2.VideoCapture(fn) for fn in filenames] -if array([cap.isOpened() for cap in captures]).all(): +if np.array([cap.isOpened() for cap in captures]).all(): key = -1 ret = True nFramesShown = 0 @@ -73,14 +74,16 @@ ret, img = cap.read() rets.append(ret) images.append(img) - if array(rets).all(): + if np.array(rets).all(): #if printFrames: print('frame shown {0}'.format(nFramesShown)) for i in xrange(len(filenames)): for obj in objects: + if obj.existsAtInstant(): - #if text is not None: - # cv2.putText(images[i], text, (10,50), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) + + #if text is not None: + # cv2.putText(images[i], text, (10,50), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) cvutils.cvImshow(windowNames[i], images[i], rescale) # cv2.imshow('frame', img) key = cv2.waitKey(wait) #if cvutils.saveKey(key):