changeset 334:1d90e9080cb2

moved python scripts to the scripts directory
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Fri, 14 Jun 2013 10:34:11 -0400
parents c9201f6b143a
children 3950bfe22768
files python/calibration-translation.py python/compute-homography.py python/compute-object-from-features.py python/delete-object-tables.py python/display-trajectories.py python/offset-trajectories.py python/play-video.py python/poly-utils.py python/poly_utils.py python/safety-analysis.py scripts/compute-homography.py scripts/delete-object-tables.py scripts/display-trajectories.py scripts/play-video.py scripts/safety-analysis.py scripts/test-compute-object-position-from-features.py
diffstat 16 files changed, 424 insertions(+), 563 deletions(-) [+]
line wrap: on
line diff
--- a/python/calibration-translation.py	Fri Jun 14 10:25:00 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,116 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-import os
-
-import matplotlib.mlab as pylab
-import matplotlib.pyplot as plt
-import numpy as np
-
-import cv2
-import utils
-import cvutils
-
-# development for the data collected and stabilized by Paul in Summer 2011
-# todo write help, add options to control the parameters for matching (n points and distance)
-
-options = utils.parseCLIOptions('Program to re-calibrate an initial calibration based on point correspondences by adjusting the points to slightly different viewpoints, where all the points are still visible\n\nUsage: ', ['ref_video=', 'ref_points='], sys.argv, ['mask_img='])
-
-referenceVideoFilename=options['--ref_video']
-wldPts, imgPts = cvutils.loadPointCorrespondences(options['--ref_points'])
-
-def translatePoints(points, t):
-    'points is Nx2, t is [x,y]'
-    translated = points.copy()
-    for i in xrange(2):
-        translated[i] += t[i]
-    return translated
-
-filenames = [f for f in utils.listfiles('.','avi')] # directory to examine should be current directory
-
-referenceVideoIndex = filenames.index(referenceVideoFilename)
-indices = set(range(len(filenames)))
-indices.discard(referenceVideoIndex)
-
-images = {}
-captures = {}
-
-captures[referenceVideoFilename] = cv2.VideoCapture(referenceVideoFilename)
-(ret, img) = captures[referenceVideoFilename].read()
-images[referenceVideoFilename] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
-
-# load a mask image to compute the translation
-if '--mask_img' in options.keys():
-    maskImg = cv2.imread('mask.png', cv2.CV_LOAD_IMAGE_GRAYSCALE) # todo add possibility to look in the whole image if not providing mask
-else:
-    maskImg = np.ones(images[referenceVideoFilename].shape, dtype=np.uint8)
-
-referenceFeatures = cv2.goodFeaturesToTrack(images[referenceVideoFilename], 1000, 0.02, 2, useHarrisDetector = True, mask=maskImg)
-displayRef = cv2.cvtColor(images[referenceVideoFilename], cv2.COLOR_GRAY2RGB)
-for j,p in enumerate(imgPts):
-    cv2.circle(displayRef, tuple(p), 3, (255,0,0))
-    cv2.putText(displayRef, str(j+1), tuple(p), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0))
-cv2.imshow('Reference',displayRef)
-
-# get suitable image references for each video
-for f in filenames: 
-    captures[f] = cv2.VideoCapture(f)
-    frameFilename = utils.removeExtension(f)+'-frame.png' # TODO if frame image already exists, no need to search for it again
-    if not os.path.exists(frameFilename):
-        key = -1
-        while chr(key&255) != 'y':
-            (ret, img) = captures[f].read()
-            cv2.imshow('Image',img)
-            print('Can one see the reference points in the image? (y/n)')
-            key = cv2.waitKey(0)
-
-        images[f] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
-        cv2.imwrite(frameFilename, img)
-    else:
-        images[f] = cv2.imread(frameFilename, cv2.CV_LOAD_IMAGE_GRAYSCALE)
-    #features[f] = cv2.goodFeaturesToTrack(images[f], 1000, 0.02, 2, useHarrisDetector = True, mask=maskImg) # todo put parameters on the command line ? 
-    # goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]])
-    # display features
-    # if False:
-    #     display = img.copy()#cv2.cvtColor(images[f], cv2.COLOR_GRAY2RGB) #.copy()
-    #     for p in features[f]:
-    #         cv2.circle(display, tuple(p[0]), 3, (255,0,0))
-    #     cv2.imshow('Reference',display)
-    #     cv2.waitKey()
-
-plt.close('all')
-
-# validate or input point correspondences and compute homography
-for i in indices:
-    t = cvutils.computeTranslation(images[filenames[referenceVideoIndex]], images[filenames[i]], referenceFeatures, 100, 10)
-    print filenames[i],t
-    key = -1
-    if t != None: # show translated points and ask if ok
-        displayImg = cv2.cvtColor(images[filenames[i]], cv2.COLOR_GRAY2RGB) #.copy()
-        for p in imgPts:
-            cv2.circle(displayImg, tuple(p+t[0]), 3, (255,0,0))
-        cv2.imshow('Image',displayImg)
-
-        while not(chr(key&255) == 'y' or chr(key&255) == 'n'):
-            print('Are the translated points rightly located (y/n)?')
-            key = cv2.waitKey(0)
-        if chr(key&255) == 'y': # compute homography with translated numbers
-            newImgPts = np.array([p+t[0] for p in imgPts])
-    else:
-        print('No translation could be found automatically. You will have to manually input world reference points.')
-
-    if t==None or chr(key&255) != 'y':# if no translation could computed or it is not satisfactory
-        print('Select the corresponding points in the same order as in the reference image')
-        plt.figure(1)
-        plt.imshow(displayRef)
-        plt.figure(2)
-        plt.imshow(img)
-        plt.show()
-        newImgPts = np.array([list(p) for p in plt.ginput(n=wldPts.shape[0], timeout=-1)], dtype = np.float32)
-
-    homography, mask = cv2.findHomography(newImgPts, wldPts) # method=0, ransacReprojThreshold=3
-    print homography
-    np.savetxt(utils.removeExtension(filenames[i])+'-homography.txt',homography)
-    np.savetxt(utils.removeExtension(filenames[i])+'-point-correspondences.txt', np.append(wldPts.T, newImgPts.T, axis=0))
-
-cv2.destroyAllWindows()
--- a/python/compute-homography.py	Fri Jun 14 10:25:00 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,122 +0,0 @@
-#! /usr/bin/env python
-
-import sys,getopt
-
-import matplotlib.pyplot as plt
-import numpy as np
-import cv2
-
-import cvutils
-import utils
-
-options, args = getopt.getopt(sys.argv[1:], 'hp:i:w:n:u:',['help'])
-options = dict(options)
-
-# TODO process camera intrinsic and extrinsic parameters to obtain image to world homography, taking example from Work/src/python/generate-homography.py script
-# cameraMat = load(videoFilenamePrefix+'-camera.txt');
-# T1 = cameraMat[3:6,:].copy();
-# A = cameraMat[0:3,0:3].copy();
-
-# # pay attention, rotation may be the transpose
-# # R = T1[:,0:3].T;
-# R = T1[:,0:3];
-# rT = dot(R, T1[:,3]/1000);
-# T = zeros((3,4),'f');
-# T[:,0:3] = R[:];
-# T[:,3] = rT;
-
-# AT = dot(A,T);
-
-# nPoints = 4;
-# worldPoints = cvCreateMat(nPoints, 3, CV_64FC1);
-# imagePoints = cvCreateMat(nPoints, 3, CV_64FC1);
-
-# # extract homography from the camera calibration
-# worldPoints = cvCreateMat(4, 3, CV_64FC1);
-# imagePoints = cvCreateMat(4, 3, CV_64FC1);
-
-# worldPoints[0,:] = [[1, 1, 0]];
-# worldPoints[1,:] = [[1, 2, 0]];
-# worldPoints[2,:] = [[2, 1, 0]];
-# worldPoints[3,:] = [[2, 2, 0]];
-
-# wPoints = [[1,1,2,2],
-#            [1,2,1,2],
-#            [0,0,0,0]];
-# iPoints = utils.worldToImage(AT, wPoints);
-
-# for i in range(nPoints):
-#     imagePoints[i,:] = [iPoints[:,i].tolist()];
-
-# H = cvCreateMat(3, 3, CV_64FC1);
-
-# cvFindHomography(imagePoints, worldPoints, H);
-
-if '--help' in options.keys() or '-h' in options.keys() or len(options) == 0:
-    print('Usage: {0} --help|-h [-p point-correspondences.txt] [ -i video-frame] [ -w world-frame] [n number-points] [-u unit-per-pixel=1]'.format(sys.argv[0]))
-    print('''The input data can be provided either as point correspondences already saved
- in a text file or inputed by clicking a certain number of points (>=4)
- in a video frame and a world image.
-
-The point correspondence file contains at least 4 non-colinear point coordinates 
-with the following format:
- - the first two lines are the x and y coordinates in the projected space (usually world space)
- - the last two lines are the x and y coordinates in the origin space (usually image space)
-
-If providing video and world images, with a number of points to input
-and a ration to convert pixels to world distance unit (eg meters per pixel), 
-the images will be shown in turn and the user should click 
-in the same order the corresponding points in world and image spaces. ''')
-    sys.exit()
-
-homography = np.array([])
-if '-p' in options.keys():
-    worldPts, videoPts = cvutils.loadPointCorrespondences(options['-p'])
-    homography, mask = cv2.findHomography(videoPts, worldPts) # method=0, ransacReprojThreshold=3
-elif '-i' in options.keys() and '-w' in options.keys():
-    nPoints = 4
-    if '-n' in options.keys():
-        nPoints = int(options['-n'])
-    unitsPerPixel = 1
-    if '-u' in options.keys():
-        unitsPerPixel = float(options['-u'])
-    worldImg = plt.imread(options['-w'])
-    videoImg = plt.imread(options['-i'])
-    print('Click on {0} points in the video frame'.format(nPoints))
-    plt.figure()
-    plt.imshow(videoImg)
-    videoPts = np.array(plt.ginput(nPoints))
-    print('Click on {0} points in the world image'.format(nPoints))
-    plt.figure()
-    plt.imshow(worldImg)
-    worldPts = unitsPerPixel*np.array(plt.ginput(nPoints))
-    plt.close('all')
-    homography, mask = cv2.findHomography(videoPts, worldPts)
-    # save the points in file
-    f = open('point-correspondences.txt', 'a')
-    np.savetxt(f, worldPts.T)
-    np.savetxt(f, videoPts.T)
-    f.close()
-
-if homography.size>0:
-    np.savetxt('homography.txt',homography)
-
-if '-i' in options.keys() and homography.size>0:
-    videoImg = cv2.imread(options['-i'])
-    worldImg = cv2.imread(options['-w'])
-    invHomography = np.linalg.inv(homography)
-    projectedWorldPts = cvutils.projectArray(invHomography, worldPts.T).T
-    if '-u' in options.keys():
-        unitsPerPixel = float(options['-u'])        
-        projectedVideoPts = cvutils.projectArray(invHomography, videoPts.T).T
-    for i in range(worldPts.shape[0]):
-        cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))),2,cvutils.cvRed)
-        cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))),2,cvutils.cvBlue)
-        if '-u' in options.keys():
-            cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/unitsPerPixel))),2,cvutils.cvRed)
-            cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/unitsPerPixel))),2,cvutils.cvRed)
-        #print('img: {0} / projected: {1}'.format(videoPts[i], p))
-    cv2.imshow('video frame',videoImg)
-    if '-u' in options.keys():
-        cv2.imshow('world image',worldImg)
-    cv2.waitKey()
--- a/python/compute-object-from-features.py	Fri Jun 14 10:25:00 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,141 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-
-import matplotlib.mlab as pylab
-import matplotlib.pyplot as plt
-import numpy as np
-
-import cv
-import utils
-import cvutils
-import ubc_utils
-import moving
-
-# use something like getopt to manage arguments if necessary
-
-if len(sys.argv) < 3:
-    print('Usage: {0} <video-filename> <n-objects>'.format(sys.argv[0]))
-    sys.exit()
-
-if sys.argv[1].endswith('.avi'):
-    videoFilenamePrefix = utils.removeExtension(sys.argv[1],'.')
-else:
-    videoFilenamePrefix = sys.argv[1]
-
-objectNum = int(sys.argv[2])
-
-objects = ubc_utils.loadTrajectories(videoFilenamePrefix+'-objects.txt', objectNum+1)
-obj = objects[objectNum]
-features = ubc_utils.loadTrajectories(videoFilenamePrefix+'-features.txt', max(obj.featureNumbers)+1)
-h = np.loadtxt(videoFilenamePrefix+'-homography.txt')
-
-invh = cvutils.invertHomography(h)
-
-def computeGroundTrajectory(features, homography, timeInterval = None):
-    '''Computes a trajectory for the set of features as the closes point to the ground
-    using the homography in image space'''
-    if not timeInterval:
-        raise Exception('not implemented') # compute from the features
-    
-    yCoordinates = -np.ones((len(features),int(timeInterval.length())))
-    for i,f in enumerate(features):
-        traj = f.getPositions().asArray()
-        imgTraj = cvutils.projectArray(homography, traj)
-        yCoordinates[i,f.getFirstInstant()-timeInterval.first:f.getLastInstant()+1-timeInterval.first] = imgTraj[1,:]
-
-    indices = np.argmax(yCoordinates,0)
-    newTraj = moving.Trajectory()
-    for j,idx in enumerate(indices):
-        newTraj.addPosition(features[idx].getPositionAtInstant(j+timeInterval.first))
-        #newVelocities.addPosition(features[obj.featureNumbers[idx]].getVelocityAtInstant(j+obj.getFirstInstant()))
-
-    return newTraj
-
-def computeMedianTrajectory(features, timeInterval = None):
-    if not timeInterval:
-        raise Exception('not implemented') # compute from the features
-    
-    newTraj = moving.Trajectory()
-    for t in timeInterval:
-        points = []
-        for f in features:
-            if f.existsAtInstant(t):
-                points.append(f.getPositionAtInstant(t).aslist())
-        med = np.median(np.array(points), 0)
-        newTraj.addPositionXY(med[0], med[1])
-
-    return newTraj
-
-# TODO version median: conversion to large matrix will not work, have to do it frame by frame
-
-def kalmanFilter(positions, velocities, processNoiseCov, measurementNoiseCov):
-    kalman=cv.CreateKalman(6, 4)
-    kalman.transition_matrix[0,2]=1
-    kalman.transition_matrix[0,4]=1./2
-    kalman.transition_matrix[1,3]=1
-    kalman.transition_matrix[1,5]=1./2
-    kalman.transition_matrix[2,4]=1
-    kalman.transition_matrix[3,5]=1
-
-    cv.SetIdentity(kalman.measurement_matrix, 1.)
-    cv.SetIdentity(kalman.process_noise_cov, processNoiseCov)
-    cv.SetIdentity(kalman.measurement_noise_cov, measurementNoiseCov)
-    cv.SetIdentity(kalman.error_cov_post, 1.)
-
-    p = positions[0]
-    v = velocities[0]
-    v2 = velocities[2]
-    a = (v2-v).multiply(0.5)
-    kalman.state_post[0,0]=p.x
-    kalman.state_post[1,0]=p.y
-    kalman.state_post[2,0]=v.x
-    kalman.state_post[3,0]=v.y
-    kalman.state_post[4,0]=a.x
-    kalman.state_post[5,0]=a.y
-    
-    filteredPositions = moving.Trajectory()
-    filteredVelocities = moving.Trajectory()
-    measurement = cv.CreateMat(4,1,cv.CV_32FC1)
-    for i in xrange(positions.length()):
-        cv.KalmanPredict(kalman) # no control
-        p = positions[i]
-        v = velocities[i]
-        measurement[0,0] = p.x
-        measurement[1,0] = p.y
-        measurement[2,0] = v.x
-        measurement[3,0] = v.y
-        cv.KalmanCorrect(kalman, measurement)
-        filteredPositions.addPositionXY(kalman.state_post[0,0], kalman.state_post[1,0])
-        filteredVelocities.addPositionXY(kalman.state_post[2,0], kalman.state_post[3,0])
-
-    return (filteredPositions, filteredVelocities)
-
-groundTrajectory = computeGroundTrajectory([features[i] for i in obj.featureNumbers], invh, obj.getTimeInterval())
-(filteredPositions, filteredVelocities) = kalmanFilter(groundTrajectory, obj.getVelocities(), 0.1, 0.1)
-
-#medianTrajectory = computeMedianTrajectory([features[i] for i in obj.featureNumbers], obj.getTimeInterval())
-
-delta = []
-for t in obj.getTimeInterval():
-    p1 = obj.getPositionAtInstant(t)
-    p2 = groundTrajectory[t-obj.getFirstInstant()]
-    delta.append((p1-p2).aslist())
-
-delta = np.median(delta, 0)
-
-translated = moving.Trajectory()
-for t in obj.getTimeInterval():
-    p1 = obj.getPositionAtInstant(t)
-    p1.x -= delta[0]
-    p1.y -= delta[1]
-    translated.addPosition(p1)
-
-plt.clf()
-obj.draw('rx-')
-for fnum in obj.featureNumbers: features[fnum].draw()
-groundTrajectory.draw('bx-')
-filteredPositions.draw('gx-')
-translated.draw('kx-')
-#medianTrajectory.draw('kx-')
-plt.axis('equal')
--- a/python/delete-object-tables.py	Fri Jun 14 10:25:00 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,15 +0,0 @@
-#! /usr/bin/env python
-
-import sys,getopt
-
-import utils
-import storage
-
-options, args = getopt.getopt(sys.argv[1:], 'h',['help'])
-options = dict(options)
-
-if '--help' in options.keys() or '-h' in options.keys() or len(args) == 0:
-    print('Usage: {0} --help|-h <database-filename.sqlite>'.format(sys.argv[0]))
-    sys.exit()
-
-storage.removeObjectsFromSqlite(args[0])
--- a/python/display-trajectories.py	Fri Jun 14 10:25:00 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-#! /usr/bin/env python
-
-import sys,getopt
-
-import storage, cvutils, utils
-
-from numpy.linalg.linalg import inv
-from numpy import loadtxt
-
-options, args = getopt.getopt(sys.argv[1:], 'hi:d:t:o:f:',['help']) 
-# alternative long names are a pain to support ,'video-filename=','database-filename=', 'type='
-
-options = dict(options)
-
-print options, args
-
-if '--help' in options.keys() or '-h' in options.keys() or len(sys.argv) == 1:
-    print('Usage: '+sys.argv[0]+' --help|-h -i video-filename -d database-filename [-t object_type] [-o image2world_homography] [-f first_frame]\n'
-          +'Or   : '+sys.argv[0]+' [-t object_type] config_file.cfg\n\n'
-          'Order matters between positional and named arguments\n'
-          'object_type can be feature or object')
-    sys.exit()
-
-objectType = 'feature'
-if '-t' in options.keys():
-    objectType = options['-t']
-
-if len(args)>0: # consider there is a configuration file
-    params = utils.TrackingParameters()
-    params.loadConfigFile(args[0])
-    videoFilename = params.videoFilename
-    databaseFilename = params.databaseFilename
-    homography = inv(params.homography)
-    firstFrameNum = params.firstFrameNum
-else:
-    videoFilename = options['-i']
-    databaseFilename = options['-d']
-    homography = None
-    if '-o' in options.keys():
-        homography = inv(loadtxt(options['-o']))            
-    firstFrameNum = 0
-    if '-f' in options.keys():
-        firstFrameNum = int(options['-f'])
-
-objects = storage.loadTrajectoriesFromSqlite(databaseFilename, objectType)
-cvutils.displayTrajectories(videoFilename, objects, homography, firstFrameNum)
--- a/python/offset-trajectories.py	Fri Jun 14 10:25:00 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-#! /usr/bin/env python
-import sys
-
-import ubc_utils
-import utils
-
-if len(sys.argv) < 3:
-    print("Usage: %s filename offsetframes")
-    sys.exit()
-
-nFrames = int(sys.argv[2])
-
-def modifyLines(objectNum, lines):
-    result = lines
-    tmp = lines[0].split(" ")
-    firstInstant = int(tmp[1])+nFrames
-    lastInstant = int(tmp[2])+nFrames
-    tmp[1] = str(firstInstant)
-    tmp[2] = str(lastInstant)
-    result[0] = " ".join(tmp)
-    return result
-
-ubc_utils.modifyTrajectoryFile(modifyLines, sys.argv[1], sys.argv[1]+".new")
--- a/python/play-video.py	Fri Jun 14 10:25:00 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-#! /usr/bin/env python
-
-import sys,getopt
-import cvutils
-
-options, args = getopt.getopt(sys.argv[1:], 'hi:f:',['help', 'fps=']) 
-options = dict(options)
-print options
-
-if '--help' in options.keys() or '-h' in options.keys() or len(sys.argv) == 1:
-    print('Usage: '+sys.argv[0]+' --help|-h -i video-filename [-f first_frame] [--fps frame_rate]')
-    sys.exit()
-
-firstFrameNum = 0
-if '-f' in options.keys():
-    firstFrameNum = int(options['-f'])
-
-frameRate = -1
-if '--fps' in options.keys():
-    frameRate = int(options['--fps'])
-
-cvutils.playVideo(options['-i'], firstFrameNum, frameRate)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/python/poly-utils.py	Fri Jun 14 10:34:11 2013 -0400
@@ -0,0 +1,49 @@
+#! /usr/bin/env python
+'''Various utilities to load data saved by the POLY new output(s)'''
+import sys
+import utils
+from moving import  TimeInterval
+import numpy as np
+
+__metaclass__ = type
+
+# inputs variables	
+#dirname= 'G:/0-phdstart/Code/trial/indicatorsNew/'
+#extension= '-indicatorsNew.csv'
+#indicatorsNames= {1:'Distance',2:'Cosine',3:'collision Course Angle',4:'Velocity Cosine',5:'Velocity Angle',6:'Speed Differential',7:'Collision Probability',8:'Severity Index',9:'TTC'}
+''' min Distance case'''
+dirname= 'G:/0-phdstart/Code/trial/minDistanceIndicator/'
+extension= '-minDistanceInd.csv'
+indicatorsNames= {1:'minDistance'}
+
+def loadNewInteractions(videoFilename,interactionType, roaduserNum1,roaduserNum2, selectedIndicators=[]):
+    '''Loads interactions from the POLY traffic event format'''
+    from events import Interaction 
+    from indicators import SeverityIndicator
+    #filename= dirname + videoFilename + extension
+    filename= dirname + interactionType+ '-' + videoFilename + extension # case of min distance todo: change the saving format to be matched with all outputs
+    file = utils.openCheck(filename)
+    if (not file):
+        return []
+    interactions = []
+    interactionNum = 0
+    data= np.loadtxt(filename)
+    indicatorFrameNums= data[:,0]
+    inter = Interaction(interactionNum, TimeInterval(indicatorFrameNums[0],indicatorFrameNums[-1]), roaduserNum1, roaduserNum2) 
+    inter.addVideoFilename(videoFilename)
+    inter.addInteractionType(interactionType)
+    for key in indicatorsNames.keys():
+        values= {}
+        for i,t in enumerate(indicatorFrameNums):
+            values[t] = data[i,key]
+        inter.addIndicator(SeverityIndicator(indicatorsNames[key], values))
+    if selectedIndicators !=[]:
+        values= {}
+        for i,t in enumerate(indicatorFrameNums):
+            values[t] = [data[i,index] for index in selectedIndicators]
+        inter.addIndicator(SeverityIndicator('selectedIndicators', values))	
+		
+    interactions.append(inter)
+    file.close()
+    return interactions
+
--- a/python/poly_utils.py	Fri Jun 14 10:25:00 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-#! /usr/bin/env python
-'''Various utilities to load data saved by the POLY new output(s)'''
-import sys
-import utils
-from moving import  TimeInterval
-import numpy as np
-
-__metaclass__ = type
-
-# inputs variables	
-#dirname= 'G:/0-phdstart/Code/trial/indicatorsNew/'
-#extension= '-indicatorsNew.csv'
-#indicatorsNames= {1:'Distance',2:'Cosine',3:'collision Course Angle',4:'Velocity Cosine',5:'Velocity Angle',6:'Speed Differential',7:'Collision Probability',8:'Severity Index',9:'TTC'}
-''' min Distance case'''
-dirname= 'G:/0-phdstart/Code/trial/minDistanceIndicator/'
-extension= '-minDistanceInd.csv'
-indicatorsNames= {1:'minDistance'}
-
-def loadNewInteractions(videoFilename,interactionType, roaduserNum1,roaduserNum2, selectedIndicators=[]):
-    '''Loads interactions from the POLY traffic event format'''
-    from events import Interaction 
-    from indicators import SeverityIndicator
-    #filename= dirname + videoFilename + extension
-    filename= dirname + interactionType+ '-' + videoFilename + extension # case of min distance todo: change the saving format to be matched with all outputs
-    file = utils.openCheck(filename)
-    if (not file):
-        return []
-    interactions = []
-    interactionNum = 0
-    data= np.loadtxt(filename)
-    indicatorFrameNums= data[:,0]
-    inter = Interaction(interactionNum, TimeInterval(indicatorFrameNums[0],indicatorFrameNums[-1]), roaduserNum1, roaduserNum2) 
-    inter.addVideoFilename(videoFilename)
-    inter.addInteractionType(interactionType)
-    for key in indicatorsNames.keys():
-        values= {}
-        for i,t in enumerate(indicatorFrameNums):
-            values[t] = data[i,key]
-        inter.addIndicator(SeverityIndicator(indicatorsNames[key], values))
-    if selectedIndicators !=[]:
-        values= {}
-        for i,t in enumerate(indicatorFrameNums):
-            values[t] = [data[i,index] for index in selectedIndicators]
-        inter.addIndicator(SeverityIndicator('selectedIndicators', values))	
-		
-    interactions.append(inter)
-    file.close()
-    return interactions
-
--- a/python/safety-analysis.py	Fri Jun 14 10:25:00 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,29 +0,0 @@
-#! /usr/bin/env python
-
-import sys,getopt
-
-import matplotlib.pyplot as plt
-import numpy as np
-
-from ConfigParser import ConfigParser
-
-options, args = getopt.getopt(sys.argv[1:], 'hi:d:t:o:f:',['help']) 
-
-options = dict(options)
-
-print options, args
-
-if '--help' in options.keys() or '-h' in options.keys() or len(sys.argv) == 1:
-    print('Usage: '+sys.argv[0]+' --help|-h config_file.cfg\n'
-          'The program processes indicators for all pairs of road users in the scene\n\n'
-          'Order matters between positional and named arguments')
-    sys.exit()
-
-# TODO work on the way to indicate an interaction definition
-
-if len(args)>0: # consider there is a configuration file
-    params = utils.TrackingParameters()
-    params.loadConfigFile(args[0])
-
-
-    
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/compute-homography.py	Fri Jun 14 10:34:11 2013 -0400
@@ -0,0 +1,122 @@
+#! /usr/bin/env python
+
+import sys,getopt
+
+import matplotlib.pyplot as plt
+import numpy as np
+import cv2
+
+import cvutils
+import utils
+
+options, args = getopt.getopt(sys.argv[1:], 'hp:i:w:n:u:',['help'])
+options = dict(options)
+
+# TODO process camera intrinsic and extrinsic parameters to obtain image to world homography, taking example from Work/src/python/generate-homography.py script
+# cameraMat = load(videoFilenamePrefix+'-camera.txt');
+# T1 = cameraMat[3:6,:].copy();
+# A = cameraMat[0:3,0:3].copy();
+
+# # pay attention, rotation may be the transpose
+# # R = T1[:,0:3].T;
+# R = T1[:,0:3];
+# rT = dot(R, T1[:,3]/1000);
+# T = zeros((3,4),'f');
+# T[:,0:3] = R[:];
+# T[:,3] = rT;
+
+# AT = dot(A,T);
+
+# nPoints = 4;
+# worldPoints = cvCreateMat(nPoints, 3, CV_64FC1);
+# imagePoints = cvCreateMat(nPoints, 3, CV_64FC1);
+
+# # extract homography from the camera calibration
+# worldPoints = cvCreateMat(4, 3, CV_64FC1);
+# imagePoints = cvCreateMat(4, 3, CV_64FC1);
+
+# worldPoints[0,:] = [[1, 1, 0]];
+# worldPoints[1,:] = [[1, 2, 0]];
+# worldPoints[2,:] = [[2, 1, 0]];
+# worldPoints[3,:] = [[2, 2, 0]];
+
+# wPoints = [[1,1,2,2],
+#            [1,2,1,2],
+#            [0,0,0,0]];
+# iPoints = utils.worldToImage(AT, wPoints);
+
+# for i in range(nPoints):
+#     imagePoints[i,:] = [iPoints[:,i].tolist()];
+
+# H = cvCreateMat(3, 3, CV_64FC1);
+
+# cvFindHomography(imagePoints, worldPoints, H);
+
+if '--help' in options.keys() or '-h' in options.keys() or len(options) == 0:
+    print('Usage: {0} --help|-h [-p point-correspondences.txt] [ -i video-frame] [ -w world-frame] [n number-points] [-u unit-per-pixel=1]'.format(sys.argv[0]))
+    print('''The input data can be provided either as point correspondences already saved
+ in a text file or inputed by clicking a certain number of points (>=4)
+ in a video frame and a world image.
+
+The point correspondence file contains at least 4 non-colinear point coordinates 
+with the following format:
+ - the first two lines are the x and y coordinates in the projected space (usually world space)
+ - the last two lines are the x and y coordinates in the origin space (usually image space)
+
+If providing video and world images, with a number of points to input
+and a ration to convert pixels to world distance unit (eg meters per pixel), 
+the images will be shown in turn and the user should click 
+in the same order the corresponding points in world and image spaces. ''')
+    sys.exit()
+
+homography = np.array([])
+if '-p' in options.keys():
+    worldPts, videoPts = cvutils.loadPointCorrespondences(options['-p'])
+    homography, mask = cv2.findHomography(videoPts, worldPts) # method=0, ransacReprojThreshold=3
+elif '-i' in options.keys() and '-w' in options.keys():
+    nPoints = 4
+    if '-n' in options.keys():
+        nPoints = int(options['-n'])
+    unitsPerPixel = 1
+    if '-u' in options.keys():
+        unitsPerPixel = float(options['-u'])
+    worldImg = plt.imread(options['-w'])
+    videoImg = plt.imread(options['-i'])
+    print('Click on {0} points in the video frame'.format(nPoints))
+    plt.figure()
+    plt.imshow(videoImg)
+    videoPts = np.array(plt.ginput(nPoints))
+    print('Click on {0} points in the world image'.format(nPoints))
+    plt.figure()
+    plt.imshow(worldImg)
+    worldPts = unitsPerPixel*np.array(plt.ginput(nPoints))
+    plt.close('all')
+    homography, mask = cv2.findHomography(videoPts, worldPts)
+    # save the points in file
+    f = open('point-correspondences.txt', 'a')
+    np.savetxt(f, worldPts.T)
+    np.savetxt(f, videoPts.T)
+    f.close()
+
+if homography.size>0:
+    np.savetxt('homography.txt',homography)
+
+if '-i' in options.keys() and homography.size>0:
+    videoImg = cv2.imread(options['-i'])
+    worldImg = cv2.imread(options['-w'])
+    invHomography = np.linalg.inv(homography)
+    projectedWorldPts = cvutils.projectArray(invHomography, worldPts.T).T
+    if '-u' in options.keys():
+        unitsPerPixel = float(options['-u'])        
+        projectedVideoPts = cvutils.projectArray(invHomography, videoPts.T).T
+    for i in range(worldPts.shape[0]):
+        cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))),2,cvutils.cvRed)
+        cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))),2,cvutils.cvBlue)
+        if '-u' in options.keys():
+            cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/unitsPerPixel))),2,cvutils.cvRed)
+            cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/unitsPerPixel))),2,cvutils.cvRed)
+        #print('img: {0} / projected: {1}'.format(videoPts[i], p))
+    cv2.imshow('video frame',videoImg)
+    if '-u' in options.keys():
+        cv2.imshow('world image',worldImg)
+    cv2.waitKey()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/delete-object-tables.py	Fri Jun 14 10:34:11 2013 -0400
@@ -0,0 +1,15 @@
+#! /usr/bin/env python
+
+import sys,getopt
+
+import utils
+import storage
+
+options, args = getopt.getopt(sys.argv[1:], 'h',['help'])
+options = dict(options)
+
+if '--help' in options.keys() or '-h' in options.keys() or len(args) == 0:
+    print('Usage: {0} --help|-h <database-filename.sqlite>'.format(sys.argv[0]))
+    sys.exit()
+
+storage.removeObjectsFromSqlite(args[0])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/display-trajectories.py	Fri Jun 14 10:34:11 2013 -0400
@@ -0,0 +1,46 @@
+#! /usr/bin/env python
+
+import sys,getopt
+
+import storage, cvutils, utils
+
+from numpy.linalg.linalg import inv
+from numpy import loadtxt
+
+options, args = getopt.getopt(sys.argv[1:], 'hi:d:t:o:f:',['help']) 
+# alternative long names are a pain to support ,'video-filename=','database-filename=', 'type='
+
+options = dict(options)
+
+print options, args
+
+if '--help' in options.keys() or '-h' in options.keys() or len(sys.argv) == 1:
+    print('Usage: '+sys.argv[0]+' --help|-h -i video-filename -d database-filename [-t object_type] [-o image2world_homography] [-f first_frame]\n'
+          +'Or   : '+sys.argv[0]+' [-t object_type] config_file.cfg\n\n'
+          'Order matters between positional and named arguments\n'
+          'object_type can be feature or object')
+    sys.exit()
+
+objectType = 'feature'
+if '-t' in options.keys():
+    objectType = options['-t']
+
+if len(args)>0: # consider there is a configuration file
+    params = utils.TrackingParameters()
+    params.loadConfigFile(args[0])
+    videoFilename = params.videoFilename
+    databaseFilename = params.databaseFilename
+    homography = inv(params.homography)
+    firstFrameNum = params.firstFrameNum
+else:
+    videoFilename = options['-i']
+    databaseFilename = options['-d']
+    homography = None
+    if '-o' in options.keys():
+        homography = inv(loadtxt(options['-o']))            
+    firstFrameNum = 0
+    if '-f' in options.keys():
+        firstFrameNum = int(options['-f'])
+
+objects = storage.loadTrajectoriesFromSqlite(databaseFilename, objectType)
+cvutils.displayTrajectories(videoFilename, objects, homography, firstFrameNum)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/play-video.py	Fri Jun 14 10:34:11 2013 -0400
@@ -0,0 +1,22 @@
+#! /usr/bin/env python
+
+import sys,getopt
+import cvutils
+
+options, args = getopt.getopt(sys.argv[1:], 'hi:f:',['help', 'fps=']) 
+options = dict(options)
+print options
+
+if '--help' in options.keys() or '-h' in options.keys() or len(sys.argv) == 1:
+    print('Usage: '+sys.argv[0]+' --help|-h -i video-filename [-f first_frame] [--fps frame_rate]')
+    sys.exit()
+
+firstFrameNum = 0
+if '-f' in options.keys():
+    firstFrameNum = int(options['-f'])
+
+frameRate = -1
+if '--fps' in options.keys():
+    frameRate = int(options['--fps'])
+
+cvutils.playVideo(options['-i'], firstFrameNum, frameRate)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/safety-analysis.py	Fri Jun 14 10:34:11 2013 -0400
@@ -0,0 +1,29 @@
+#! /usr/bin/env python
+
+import sys,getopt
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from ConfigParser import ConfigParser
+
+options, args = getopt.getopt(sys.argv[1:], 'hi:d:t:o:f:',['help']) 
+
+options = dict(options)
+
+print options, args
+
+if '--help' in options.keys() or '-h' in options.keys() or len(sys.argv) == 1:
+    print('Usage: '+sys.argv[0]+' --help|-h config_file.cfg\n'
+          'The program processes indicators for all pairs of road users in the scene\n\n'
+          'Order matters between positional and named arguments')
+    sys.exit()
+
+# TODO work on the way to indicate an interaction definition
+
+if len(args)>0: # consider there is a configuration file
+    params = utils.TrackingParameters()
+    params.loadConfigFile(args[0])
+
+
+    
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/test-compute-object-position-from-features.py	Fri Jun 14 10:34:11 2013 -0400
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+
+import sys
+
+import matplotlib.mlab as pylab
+import matplotlib.pyplot as plt
+import numpy as np
+
+import cv
+import utils
+import cvutils
+import ubc_utils
+import moving
+
+# use something like getopt to manage arguments if necessary
+
+if len(sys.argv) < 3:
+    print('Usage: {0} <video-filename> <n-objects>'.format(sys.argv[0]))
+    sys.exit()
+
+if sys.argv[1].endswith('.avi'):
+    videoFilenamePrefix = utils.removeExtension(sys.argv[1],'.')
+else:
+    videoFilenamePrefix = sys.argv[1]
+
+objectNum = int(sys.argv[2])
+
+objects = ubc_utils.loadTrajectories(videoFilenamePrefix+'-objects.txt', objectNum+1)
+obj = objects[objectNum]
+features = ubc_utils.loadTrajectories(videoFilenamePrefix+'-features.txt', max(obj.featureNumbers)+1)
+h = np.loadtxt(videoFilenamePrefix+'-homography.txt')
+
+invh = cvutils.invertHomography(h)
+
+def computeGroundTrajectory(features, homography, timeInterval = None):
+    '''Computes a trajectory for the set of features as the closes point to the ground
+    using the homography in image space'''
+    if not timeInterval:
+        raise Exception('not implemented') # compute from the features
+    
+    yCoordinates = -np.ones((len(features),int(timeInterval.length())))
+    for i,f in enumerate(features):
+        traj = f.getPositions().asArray()
+        imgTraj = cvutils.projectArray(homography, traj)
+        yCoordinates[i,f.getFirstInstant()-timeInterval.first:f.getLastInstant()+1-timeInterval.first] = imgTraj[1,:]
+
+    indices = np.argmax(yCoordinates,0)
+    newTraj = moving.Trajectory()
+    for j,idx in enumerate(indices):
+        newTraj.addPosition(features[idx].getPositionAtInstant(j+timeInterval.first))
+        #newVelocities.addPosition(features[obj.featureNumbers[idx]].getVelocityAtInstant(j+obj.getFirstInstant()))
+
+    return newTraj
+
+def computeMedianTrajectory(features, timeInterval = None):
+    if not timeInterval:
+        raise Exception('not implemented') # compute from the features
+    
+    newTraj = moving.Trajectory()
+    for t in timeInterval:
+        points = []
+        for f in features:
+            if f.existsAtInstant(t):
+                points.append(f.getPositionAtInstant(t).aslist())
+        med = np.median(np.array(points), 0)
+        newTraj.addPositionXY(med[0], med[1])
+
+    return newTraj
+
+# TODO version median: conversion to large matrix will not work, have to do it frame by frame
+
+def kalmanFilter(positions, velocities, processNoiseCov, measurementNoiseCov):
+    kalman=cv.CreateKalman(6, 4)
+    kalman.transition_matrix[0,2]=1
+    kalman.transition_matrix[0,4]=1./2
+    kalman.transition_matrix[1,3]=1
+    kalman.transition_matrix[1,5]=1./2
+    kalman.transition_matrix[2,4]=1
+    kalman.transition_matrix[3,5]=1
+
+    cv.SetIdentity(kalman.measurement_matrix, 1.)
+    cv.SetIdentity(kalman.process_noise_cov, processNoiseCov)
+    cv.SetIdentity(kalman.measurement_noise_cov, measurementNoiseCov)
+    cv.SetIdentity(kalman.error_cov_post, 1.)
+
+    p = positions[0]
+    v = velocities[0]
+    v2 = velocities[2]
+    a = (v2-v).multiply(0.5)
+    kalman.state_post[0,0]=p.x
+    kalman.state_post[1,0]=p.y
+    kalman.state_post[2,0]=v.x
+    kalman.state_post[3,0]=v.y
+    kalman.state_post[4,0]=a.x
+    kalman.state_post[5,0]=a.y
+    
+    filteredPositions = moving.Trajectory()
+    filteredVelocities = moving.Trajectory()
+    measurement = cv.CreateMat(4,1,cv.CV_32FC1)
+    for i in xrange(positions.length()):
+        cv.KalmanPredict(kalman) # no control
+        p = positions[i]
+        v = velocities[i]
+        measurement[0,0] = p.x
+        measurement[1,0] = p.y
+        measurement[2,0] = v.x
+        measurement[3,0] = v.y
+        cv.KalmanCorrect(kalman, measurement)
+        filteredPositions.addPositionXY(kalman.state_post[0,0], kalman.state_post[1,0])
+        filteredVelocities.addPositionXY(kalman.state_post[2,0], kalman.state_post[3,0])
+
+    return (filteredPositions, filteredVelocities)
+
+groundTrajectory = computeGroundTrajectory([features[i] for i in obj.featureNumbers], invh, obj.getTimeInterval())
+(filteredPositions, filteredVelocities) = kalmanFilter(groundTrajectory, obj.getVelocities(), 0.1, 0.1)
+
+#medianTrajectory = computeMedianTrajectory([features[i] for i in obj.featureNumbers], obj.getTimeInterval())
+
+delta = []
+for t in obj.getTimeInterval():
+    p1 = obj.getPositionAtInstant(t)
+    p2 = groundTrajectory[t-obj.getFirstInstant()]
+    delta.append((p1-p2).aslist())
+
+delta = np.median(delta, 0)
+
+translated = moving.Trajectory()
+for t in obj.getTimeInterval():
+    p1 = obj.getPositionAtInstant(t)
+    p1.x -= delta[0]
+    p1.y -= delta[1]
+    translated.addPosition(p1)
+
+plt.clf()
+obj.draw('rx-')
+for fnum in obj.featureNumbers: features[fnum].draw()
+groundTrajectory.draw('bx-')
+filteredPositions.draw('gx-')
+translated.draw('kx-')
+#medianTrajectory.draw('kx-')
+plt.axis('equal')