changeset 614:5e09583275a4

Merged Nicolas/trafficintelligence into default
author Mohamed Gomaa <eng.m.gom3a@gmail.com>
date Fri, 05 Dec 2014 12:13:53 -0500
parents 11f96bd08552 (current diff) c5406edbcf12 (diff)
children 0954aaf28231
files Makefile python/calibration-translation.py python/compute-homography.py python/compute-object-from-features.py python/delete-object-tables.py python/display-trajectories.py python/offset-trajectories.py python/play-video.py python/poly-utils.py python/poly_utils.py
diffstat 78 files changed, 5330 insertions(+), 1589 deletions(-) [+]
line wrap: on
line diff
--- a/.hgignore	Thu Apr 18 15:29:33 2013 -0400
+++ b/.hgignore	Fri Dec 05 12:13:53 2014 -0500
@@ -22,6 +22,7 @@
 
 CMakeCache.txt
 *.cmake
+install_manifest.txt
 
 latex
 html
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/CHANGELOG	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,10 @@
+
+
+- Changeset 523
+Added functionality to classify tracked road users into cyclist, pedestrian or motorized vehicle, using appearance and speed
+
+- Changeset 515
+Draw methods have been renamed to plot.
+
+- Changeset 513 June 5th 2014
+Added functionlities to process video with visible radial distortion (such as GoPro). This means that the configuration file format has changed with several parameters added
\ No newline at end of file
--- a/CMakeLists.txt	Thu Apr 18 15:29:33 2013 -0400
+++ b/CMakeLists.txt	Fri Dec 05 12:13:53 2014 -0500
@@ -14,19 +14,36 @@
 	CMAKE_CXX_FLAGS "-g -Wall"
 )
 
-ADD_EXECUTABLE(
-  bin/feature-based-tracking
-  c/feature-based-tracking.cpp
-  )
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY bin)
+
+add_executable(feature-based-tracking
+	c/cvutils.cpp
+	c/feature-based-tracking.cpp
+	c/Motion.cpp
+	c/Parameters.cpp
+	c/utils.cpp
+	c/InputFrameListModule.cpp
+	c/InputVideoFileModule.cpp
+	)
+
+find_package(Boost REQUIRED program_options filesystem system)
+find_library(TrajectoryManagement_LIBRARY TrajectoryManagementAndAnalysis)
+find_path(TrajectoryManagement_INCLUDE_DIR src/Trajectory.h)
 
-INCLUDE_DIRECTORIES(
-#  bin/feature-based-tracking
-/home/nicolas/Research/Code/trajectorymanagementandanalysis/trunk/src/TrajectoryManagementAndAnalysis
-)
+add_definitions(
+	-DUSE_OPENCV
+	)
+
+include_directories(
+	${PROJECT_SOURCE_DIR}/include
+	${TrajectoryManagement_INCLUDE_DIR}
+	)
 
-#TARGET_LINK_LIBRARIES(
-#  bin/feature-based-tracking
-#  ${OpenCV_LIBS}
-#  $(TrajectoryManagement_DIR)
-  #	${SQLite3_LIBS}
-#  )
+target_link_libraries(feature-based-tracking
+	${TrajectoryManagement_LIBRARY}
+	${SQLite3_LIBS}
+	${OpenCV_LIBS}
+	${Boost_LIBRARIES}
+	)
+
+install(TARGETS feature-based-tracking DESTINATION bin)
--- a/Makefile	Thu Apr 18 15:29:33 2013 -0400
+++ b/Makefile	Fri Dec 05 12:13:53 2014 -0500
@@ -1,163 +1,25 @@
-# CMAKE generated file: DO NOT EDIT!
-# Generated by "Unix Makefiles" Generator, CMake Version 2.8
 
-# Default target executed when no arguments are given to make.
-default_target: all
-.PHONY : default_target
-
-#=============================================================================
-# Special targets provided by cmake.
-
-# Disable implicit rules so canoncical targets will work.
-.SUFFIXES:
-
-# Remove some rules from gmake that .SUFFIXES does not remove.
-SUFFIXES =
-
-.SUFFIXES: .hpux_make_needs_suffix_list
-
-# Suppress display of executed commands.
-$(VERBOSE).SILENT:
-
-# A target that is always out of date.
-cmake_force:
-.PHONY : cmake_force
-
-#=============================================================================
-# Set environment variables for the build.
-
-# The shell in which to execute make rules.
-SHELL = /bin/sh
-
-# The CMake executable.
-CMAKE_COMMAND = /usr/bin/cmake
-
-# The command to remove a file.
-RM = /usr/bin/cmake -E remove -f
-
-# The program to use to edit the cache.
-CMAKE_EDIT_COMMAND = /usr/bin/ccmake
+cexe:
+	@cd c && make feature-based-tracking
 
-# The top-level source directory on which CMake was run.
-CMAKE_SOURCE_DIR = /home/nicolas/Research/Code/traffic-intelligence
-
-# The top-level build directory on which CMake was run.
-CMAKE_BINARY_DIR = /home/nicolas/Research/Code/traffic-intelligence
-
-#=============================================================================
-# Targets provided globally by CMake.
-
-# Special rule for the target edit_cache
-edit_cache:
-	@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake cache editor..."
-	/usr/bin/ccmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)
-.PHONY : edit_cache
-
-# Special rule for the target edit_cache
-edit_cache/fast: edit_cache
-.PHONY : edit_cache/fast
+doc:
+	doxygen
 
-# Special rule for the target rebuild_cache
-rebuild_cache:
-	@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake to regenerate build system..."
-	/usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)
-.PHONY : rebuild_cache
-
-# Special rule for the target rebuild_cache
-rebuild_cache/fast: rebuild_cache
-.PHONY : rebuild_cache/fast
-
-# The main all target
-all: cmake_check_build_system
-	$(CMAKE_COMMAND) -E cmake_progress_start /home/nicolas/Research/Code/traffic-intelligence/CMakeFiles /home/nicolas/Research/Code/traffic-intelligence/CMakeFiles/progress.marks
-	$(MAKE) -f CMakeFiles/Makefile2 all
-	$(CMAKE_COMMAND) -E cmake_progress_start /home/nicolas/Research/Code/traffic-intelligence/CMakeFiles 0
-.PHONY : all
-
-# The main clean target
 clean:
-	$(MAKE) -f CMakeFiles/Makefile2 clean
-.PHONY : clean
+	@cd c && make clean
+	@cd python && rm *.pyc
 
-# The main clean target
-clean/fast: clean
-.PHONY : clean/fast
-
-# Prepare targets for installation.
-preinstall: all
-	$(MAKE) -f CMakeFiles/Makefile2 preinstall
-.PHONY : preinstall
-
-# Prepare targets for installation.
-preinstall/fast:
-	$(MAKE) -f CMakeFiles/Makefile2 preinstall
-.PHONY : preinstall/fast
-
-# clear depends
-depend:
-	$(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1
-.PHONY : depend
-
-#=============================================================================
-# Target rules for targets named bin/feature-based-tracking
-
-# Build rule for target.
-bin/feature-based-tracking: cmake_check_build_system
-	$(MAKE) -f CMakeFiles/Makefile2 bin/feature-based-tracking
-.PHONY : bin/feature-based-tracking
-
-# fast build rule for target.
-bin/feature-based-tracking/fast:
-	$(MAKE) -f CMakeFiles/bin/feature-based-tracking.dir/build.make CMakeFiles/bin/feature-based-tracking.dir/build
-.PHONY : bin/feature-based-tracking/fast
-
-c/feature-based-tracking.o: c/feature-based-tracking.cpp.o
-.PHONY : c/feature-based-tracking.o
-
-# target to build an object file
-c/feature-based-tracking.cpp.o:
-	$(MAKE) -f CMakeFiles/bin/feature-based-tracking.dir/build.make CMakeFiles/bin/feature-based-tracking.dir/c/feature-based-tracking.cpp.o
-.PHONY : c/feature-based-tracking.cpp.o
+install: cexe
+	@echo "========================================="
+	@echo "Installing for Linux"
+	@echo "========================================="
+	@echo "Copying feature-based tracking executable"
+	@cp bin/feature-based-tracking /usr/local/bin
+	@echo "========================================="
+	@echo "Copying Python scripts"
+	@cp scripts/* /usr/local/bin
 
-c/feature-based-tracking.i: c/feature-based-tracking.cpp.i
-.PHONY : c/feature-based-tracking.i
-
-# target to preprocess a source file
-c/feature-based-tracking.cpp.i:
-	$(MAKE) -f CMakeFiles/bin/feature-based-tracking.dir/build.make CMakeFiles/bin/feature-based-tracking.dir/c/feature-based-tracking.cpp.i
-.PHONY : c/feature-based-tracking.cpp.i
-
-c/feature-based-tracking.s: c/feature-based-tracking.cpp.s
-.PHONY : c/feature-based-tracking.s
-
-# target to generate assembly for a file
-c/feature-based-tracking.cpp.s:
-	$(MAKE) -f CMakeFiles/bin/feature-based-tracking.dir/build.make CMakeFiles/bin/feature-based-tracking.dir/c/feature-based-tracking.cpp.s
-.PHONY : c/feature-based-tracking.cpp.s
-
-# Help Target
-help:
-	@echo "The following are some of the valid targets for this Makefile:"
-	@echo "... all (the default if no target is provided)"
-	@echo "... clean"
-	@echo "... depend"
-	@echo "... bin/feature-based-tracking"
-	@echo "... edit_cache"
-	@echo "... rebuild_cache"
-	@echo "... c/feature-based-tracking.o"
-	@echo "... c/feature-based-tracking.i"
-	@echo "... c/feature-based-tracking.s"
-.PHONY : help
-
-
-
-#=============================================================================
-# Special targets to cleanup operation of make.
-
-# Special rule to run CMake to check the build system integrity.
-# No rule that depends on this can have commands that come from listfiles
-# because they might be regenerated.
-cmake_check_build_system:
-	$(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0
-.PHONY : cmake_check_build_system
-
+uninstall:
+	@echo "Uninstalling for Linux"
+	rm /usr/local/bin/feature-based-tracking 
+	@cd scripts && ./uninstall-scripts.sh
\ No newline at end of file
--- a/OpenCV.props	Thu Apr 18 15:29:33 2013 -0400
+++ b/OpenCV.props	Fri Dec 05 12:13:53 2014 -0500
@@ -13,7 +13,7 @@
     </ClCompile>
     <Link>
       <AdditionalLibraryDirectories>$(SolutionDir)\win32-depends\opencv\libs\$(PlatformName)\$(ConfigurationName)\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
-      <AdditionalDependencies>opencv_video243$(DebugSuffix).lib;opencv_ts243$(DebugSuffix).lib;opencv_objdetect243$(DebugSuffix).lib;opencv_ml243$(DebugSuffix).lib;opencv_legacy243$(DebugSuffix).lib;opencv_imgproc243$(DebugSuffix).lib;opencv_highgui243$(DebugSuffix).lib;opencv_flann243$(DebugSuffix).lib;opencv_features2d243$(DebugSuffix).lib;opencv_core243$(DebugSuffix).lib;opencv_contrib243$(DebugSuffix).lib;opencv_calib3d243$(DebugSuffix).lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>opencv_video246$(DebugSuffix).lib;opencv_ts246$(DebugSuffix).lib;opencv_objdetect246$(DebugSuffix).lib;opencv_ml246$(DebugSuffix).lib;opencv_legacy246$(DebugSuffix).lib;opencv_imgproc246$(DebugSuffix).lib;opencv_highgui246$(DebugSuffix).lib;opencv_flann246$(DebugSuffix).lib;opencv_features2d246$(DebugSuffix).lib;opencv_core246$(DebugSuffix).lib;opencv_contrib246$(DebugSuffix).lib;opencv_calib3d246$(DebugSuffix).lib;%(AdditionalDependencies)</AdditionalDependencies>
     </Link>
   </ItemDefinitionGroup>
   <ItemGroup />
--- a/README	Thu Apr 18 15:29:33 2013 -0400
+++ b/README	Fri Dec 05 12:13:53 2014 -0500
@@ -1,7 +1,15 @@
-This software project provides a set of tools developed by Nicolas Saunier and his collaborators for transportation data processing, in particular road traffic, motorized and non-motorized. The project consists in particular in tools for the most typical transportation data type, trajectories, i.e. temporal series of positions. 
-
-The code is licensed under the MIT open source license (http://www.opensource.org/licenses/mit-license).
+This software project provides a set of tools developed by Nicolas
+Saunier and his collaborators for transportation data processing, in
+particular road traffic, motorized and non-motorized. The project
+consists in particular in tools for the most typical transportation
+data type, trajectories, i.e. temporal series of positions.
 
-Contact me at nicolas.saunier@polymtl.ca and learn more about my work at http://nicolas.saunier.confins.net.
+The code is licensed under the MIT open source license
+(http://www.opensource.org/licenses/mit-license).
 
-Please consult the project website on Bitbucket for more information and step-by-step guides https://bitbucket.org/Nicolas/trafficintelligence/wiki/Home
\ No newline at end of file
+Contact me at nicolas.saunier@polymtl.ca and learn more about my work
+at http://nicolas.saunier.confins.net.
+
+Please consult the project website on Bitbucket for more information
+and step-by-step guides
+https://bitbucket.org/Nicolas/trafficintelligence/wiki/Home
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/c/InputFrameListModule.cpp	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,61 @@
+#include "InputFrameListModule.h"
+#include "utils.hpp"
+
+#include <fstream>
+#include <ostream>
+#include <iostream>
+#include <algorithm>
+
+//#include <boost/algorithm/string.hpp>
+#include <boost/filesystem.hpp>
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+
+namespace fs = boost::filesystem;
+
+InputFrameListModule::InputFrameListModule(const std::string& _dirname)
+  : mCurrentIdx(0), mInit(false), dirname(_dirname){
+  loadImageList();
+}
+
+InputFrameListModule::~InputFrameListModule(void) { }
+
+
+void InputFrameListModule::setFrameNumber(const unsigned int& frameNumber) {
+  if (frameNumber < filenames.size())
+    mCurrentIdx = frameNumber;
+  else
+    mCurrentIdx = filenames.size()-1;
+}
+
+bool InputFrameListModule::getNextFrame(cv::Mat& mat)
+{
+  bool success = false;
+  if(mCurrentIdx < filenames.size()) {
+      mat = cv::imread(dirname+filenames[mCurrentIdx++]);
+
+      if(!mat.empty())
+	success = true;
+    }
+	
+  return success;
+}
+
+unsigned int InputFrameListModule::getNbFrames(void) {
+  return filenames.size();
+}
+
+void InputFrameListModule::loadImageList(void) {
+  for (fs::directory_iterator iter(dirname); iter!=fs::directory_iterator(); iter++)
+    filenames.push_back(iter->path().filename().string());
+
+  sort(filenames.begin(), filenames.end());
+
+  if(!filenames.empty()) {
+    std::cout << dirname+filenames[0] << std::endl;
+    cv::Mat tmpImg = cv::imread(dirname+filenames[0]);
+    mSize = cv::Size(tmpImg.cols, tmpImg.rows);
+    mInit = true;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/c/InputVideoFileModule.cpp	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,31 @@
+#include "InputVideoFileModule.h"
+
+InputVideoFileModule::InputVideoFileModule(const std::string& videoPath) 
+  : mInit(false)
+  , mNumberOfFrame(0)
+{
+  mInit = mVideoCapture.open(videoPath.c_str());
+  double frameCount;
+  frameCount = mVideoCapture.get(CV_CAP_PROP_FRAME_COUNT);	
+  mSize = cv::Size(mVideoCapture.get(CV_CAP_PROP_FRAME_WIDTH), mVideoCapture.get(CV_CAP_PROP_FRAME_HEIGHT));
+  mNumberOfFrame = (unsigned int)frameCount;
+}
+
+InputVideoFileModule::~InputVideoFileModule(void) { }
+
+
+void InputVideoFileModule::setFrameNumber(const unsigned int& frameNumber) {
+  mVideoCapture.set(CV_CAP_PROP_POS_FRAMES, frameNumber);
+}
+
+bool InputVideoFileModule::getNextFrame(cv::Mat& outputPicture)
+{
+  bool success = false;
+  if(mInit)
+    {		
+      mVideoCapture >> outputPicture;
+      success = !outputPicture.empty();
+    }
+  return success;
+}
+
--- a/c/Makefile	Thu Apr 18 15:29:33 2013 -0400
+++ b/c/Makefile	Fri Dec 05 12:13:53 2014 -0500
@@ -10,8 +10,7 @@
 
 LDFLAGS = -lm
 LDFLAGS += -lTrajectoryManagementAndAnalysis -lsqlite3
-LDFLAGS += -lboost_program_options
-# -lboost_filesystem-mt -lboost_system-mt
+LDFLAGS += -lboost_program_options -lboost_filesystem -lboost_system
 #LDFLAGS += -lfltk
 
 CFLAGS = -Wall -W -Wextra
@@ -49,7 +48,7 @@
 CXXFLAGS = $(INCLUDE) $(CFLAGS)
 
 #GUI_OBJS = 
-CV_OBJS = cvutils.o
+CV_OBJS = cvutils.o InputFrameListModule.o InputVideoFileModule.o
 COMMON_OBJS = utils.o Motion.o Parameters.o utils.o
 OBJS = $(COMMON_OBJS) $(CV_OBJS)
 TESTS_OBJS = test_feature.o test_graph.o
@@ -72,6 +71,7 @@
 	$(EXE_DIR)/$@
 
 feature-based-tracking: feature-based-tracking.o $(OBJS)
+	@$(SCRIPTS_DIR)/createdirectory.sh $(EXE_DIR)
 	$(CXX) $(CFLAGS) $(LIBS) $^ -o $(EXE_DIR)/$@ $(LDFLAGS)
 
 track-features.o: track-features.cpp
--- a/c/Motion.cpp	Thu Apr 18 15:29:33 2013 -0400
+++ b/c/Motion.cpp	Fri Dec 05 12:13:53 2014 -0500
@@ -51,7 +51,7 @@
     float disp = 0;
     for (unsigned int i=0; i<nDisplacements; i++)
       disp += displacementDistances[nPositions-2-i];
-    result = disp < minTotalFeatureDisplacement;
+    result = disp <= minTotalFeatureDisplacement;
   }
   return result;
 }
@@ -66,9 +66,9 @@
     else
       ratio = displacementDistances[nPositions-3] / displacementDistances[nPositions-2];
 
-    float cosine = scalarProduct((*velocities)[nPositions-3],(*velocities)[nPositions-2]) / (displacementDistances[nPositions-3] * displacementDistances[nPositions-2]);
+    float cosine = (*velocities)[nPositions-3].dot((*velocities)[nPositions-2]) / (displacementDistances[nPositions-3] * displacementDistances[nPositions-2]);
     
-    result &= (ratio < accelerationBound) & (cosine > deviationBound);
+    result = (ratio < accelerationBound) & (cosine > deviationBound);
   }
   return result;
 }
@@ -111,6 +111,11 @@
   displacementDistances.pop_back();
 }
 
+void FeatureTrajectory::movingAverage(const unsigned int& nFramesSmoothing) {
+  positions->movingAverage(nFramesSmoothing);
+  velocities->movingAverage(nFramesSmoothing);
+}
+
 void FeatureTrajectory::write(TrajectoryDBAccess<Point2f>& trajectoryDB, const string& positionsTableName, const string& velocitiesTableName) const {
   trajectoryDB.write(*positions, positionsTableName);
   trajectoryDB.write(*velocities, velocitiesTableName);
@@ -164,7 +169,7 @@
 	if (ft->minMaxSimilarity(*ft2, firstInstant, lastInstant, connectionDistance, segmentationDistance)) {
 	  UndirectedGraph::edge_descriptor e;
 	  bool unused;
-	  tie(e, unused) = add_edge(newVertex, *vi, graph);
+	  boost::tuples::tie(e, unused) = add_edge(newVertex, *vi, graph);
 	  // no need to add measures to graph[e] (edge properties)
 	}
       }
@@ -185,7 +190,7 @@
   vector<vector<vertex_descriptor> > tmpobjects(num), objects; // vector of components (component = vector of vertex descriptors)
 
   graph_traits<UndirectedGraph>::vertex_iterator vi, vend;
-  for(tie(vi,vend) = vertices(graph); vi != vend; ++vi) {
+  for(boost::tuples::tie(vi,vend) = vertices(graph); vi != vend; ++vi) {
     unsigned int id = components[*vi];
     lastInstants[id] = max(lastInstants[id], graph[*vi].feature->getLastInstant());
     tmpobjects[id].push_back(*vi);
@@ -201,8 +206,8 @@
   }
 }
 
-vector<vector<unsigned int> > FeatureGraph::getFeatureGroups(void) {
-  vector<vector<unsigned int> > featureGroups;
+void FeatureGraph::getFeatureGroups(vector<vector<FeatureTrajectoryPtr> >& featureGroups) {
+  featureGroups.clear();
 
   for (unsigned int i=0; i<objectHypotheses.size(); ++i) {
     // check that there is on average at least minNFeaturesPerGroup features at each frame in the group
@@ -218,9 +223,9 @@
 #if DEBUG
       cout << "save group " << i << " of " << objectHypotheses[i].size() << " features " << endl;
 #endif
-      featureGroups.push_back(vector<unsigned int>());
+      featureGroups.push_back(vector<FeatureTrajectoryPtr>());
       for (unsigned int j=0; j<objectHypotheses[i].size(); ++j) {
-	featureGroups.back().push_back(graph[objectHypotheses[i][j]].feature->getId());
+	featureGroups.back().push_back(graph[objectHypotheses[i][j]].feature);
 #if DEBUG
 	cout << featureGroups.size() << " " << objectHypotheses[i][j] << endl;
 #endif
@@ -229,8 +234,6 @@
       }
     }
   }
-
-  return featureGroups;
 }
 
 string FeatureGraph::informationString(void) const {
@@ -246,6 +249,6 @@
 void FeatureGraph::computeVertexIndex(void) {
   graph_traits<FeatureGraph::UndirectedGraph>::vertex_iterator vi, vend;
   graph_traits<FeatureGraph::UndirectedGraph>::vertices_size_type cnt = 0;
-  for(tie(vi,vend) = vertices(graph); vi != vend; ++vi)
+  for(boost::tuples::tie(vi,vend) = vertices(graph); vi != vend; ++vi)
     graph[*vi].index = cnt++;
 }
--- a/c/Parameters.cpp	Thu Apr 18 15:29:33 2013 -0400
+++ b/c/Parameters.cpp	Fri Dec 05 12:13:53 2014 -0500
@@ -18,7 +18,7 @@
     ("help,h", "displays this help message")
     ("tf", "tracks features")
     ("gf", "groups features")
-    ("config-file", po::value<string>(&configurationFilename)->default_value("tracking.cfg"), "configuration file")
+    ("config-file", po::value<string>(&configurationFilename), "configuration file")
     ;
 
   po::positional_options_description p;
@@ -29,38 +29,55 @@
     ("video-filename", po::value<string>(&videoFilename), "filename of the video to process")
     ("database-filename", po::value<string>(&databaseFilename), "filename of the database where results are saved")
     ("homography-filename", po::value<string>(&homographyFilename), "filename of the homography matrix")
+    ("intrinsic-camera-filename", po::value<string>(&intrinsicCameraFilename), "filename of the homography matrix")
+    ("distortion-coefficients", po::value<std::vector<float> >(&distortionCoefficients)->multitoken(), "")
+    ("undistorted-size-multiplication", po::value<float>(&undistortedImageMultiplication), "undistorted image multiplication")
+    ("interpolation-method", po::value<int>(&interpolationMethod), "Interpolation method for remapping image when correcting for distortion: 0 for INTER_NEAREST - a nearest-neighbor interpolation; 1 for INTER_LINEAR - a bilinear interpolation (used by default); 2 for INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood; 3 for INTER_LANCZOS4")
     ("mask-filename", po::value<string>(&maskFilename), "filename of the mask image (where features are detected)")
+    ("undistort", po::value<bool>(&undistort), "undistort the video for feature tracking")
     ("load-features", po::value<bool>(&loadFeatures), "load features from database")
     ("display", po::value<bool>(&display), "display trajectories on the video")
     ("video-fps", po::value<float>(&videoFPS), "original video frame rate")
     ("frame1", po::value<unsigned int>(&frame1), "first frame to process")
     ("nframes", po::value<int>(&nFrames), "number of frame to process")
     // feature tracking
-    ("max-nfeatures", po::value<int>(&maxNFeatures), "maximum number of features added at each frame")
-    ("feature-quality", po::value<float>(&featureQuality), "quality level of the good features to track")
-    ("min-feature-distanceklt", po::value<float>(&minFeatureDistanceKLT), "minimum distance between features")
-    ("window-size", po::value<int>(&windowSize), "size of the search window at each pyramid level")
+    ("max-nfeatures", po::value<int>(&maxNFeatures), "maximum number of features added at each frame (1000s)")
+    ("feature-quality", po::value<float>(&featureQuality), "quality level of the good features to track (]0. 1?])")
+    ("min-feature-distanceklt", po::value<float>(&minFeatureDistanceKLT), "minimum distance between features (]0. 10?])")
+    ("block-size", po::value<int>(&blockSize), "size of the block for feature characteristics ([1 ?])")
     ("use-harris-detector", po::value<bool>(&useHarrisDetector), "use of Harris corner detector")
     ("k", po::value<float>(&k), "k parameter to detect good features to track (OpenCV)")
-    ("pyramid-level", po::value<int>(&pyramidLevel), "maximal pyramid level in the feature tracking algorithm")
-    ("ndisplacements", po::value<unsigned int>(&nDisplacements), "number of displacement to test minimum feature motion")
-    ("min-feature-displacement", po::value<float>(&minFeatureDisplacement), "minimum displacement to keep features")
-    ("acceleration-bound", po::value<float>(&accelerationBound), "maximum feature acceleration")
-    ("deviation-bound", po::value<float>(&deviationBound), "maximum feature deviation")
-    ("smoothing-halfwidth", po::value<int>(&nFramesSmoothing), "number of frames to smooth positions (half window)")
-    ("max-number-iterations", po::value<int>(&maxNumberTrackingIterations), "maximum number of iterations to stop feature tracking")
-    ("min-tracking-error", po::value<float>(&minTrackingError), "minimum error to reach to stop feature tracking")
-    ("min-feature-time", po::value<unsigned int>(&minFeatureTime), "minimum length of a feature (number of frames) to consider a feature for grouping")
-    ("mm-connection-distance", po::value<float>(&mmConnectionDistance), "connection distance in feature grouping")
-    ("mm-segmentation-distance", po::value<float>(&mmSegmentationDistance), "segmentation distance in feature grouping")
-    ("max-distance", po::value<float>(&maxDistance), "maximum distance between features for grouping")
-    ("min-velocity-cosine", po::value<float>(&minVelocityCosine), "minimum cosine of the angle between the velocity vectors for grouping")
-    ("min-nfeatures-group", po::value<float>(&minNFeaturesPerGroup), "minimum average number of features per frame to create a vehicle hypothesis")
-    ;
+    ("window-size", po::value<int>(&windowSize), "size of the search window at each pyramid level ([1 ?])")
+    ("pyramid-level", po::value<int>(&pyramidLevel), "maximal pyramid level in the feature tracking algorithm ([0 maxLevel=5?])")
+    ("ndisplacements", po::value<unsigned int>(&nDisplacements), "number of displacements to test minimum feature motion ([2 4])")
+    ("min-feature-displacement", po::value<float>(&minFeatureDisplacement), "minimum displacement per frame (in world space) to keep features (]0. 0.1?])")
+    ("acceleration-bound", po::value<float>(&accelerationBound), "maximum feature acceleration (]1 3+])")
+    ("deviation-bound", po::value<float>(&deviationBound), "maximum feature deviation (on cosine) (]0 1])")
+    ("smoothing-halfwidth", po::value<int>(&nFramesSmoothing), "number of frames to smooth positions (half window) ([0 inf[")
+    ("max-number-iterations", po::value<int>(&maxNumberTrackingIterations), "maximum number of iterations to stop optical flow (20-30?)")
+    ("min-tracking-error", po::value<float>(&minTrackingError), "minimum error to reach to stop optical flow (0.3-0.01)")
+    ("min-feature-eig-threshold", po::value<float>(&minFeatureEigThreshold)->default_value(1e-4), "minimum eigen value of a 2x2 normal matrix of optical flow equations (10^-4)")
+    ("min-feature-time", po::value<unsigned int>(&minFeatureTime), "minimum length of a feature (number of frames) to consider a feature for grouping [5 20+]")
+    ("mm-connection-distance", po::value<float>(&mmConnectionDistance), "connection distance in feature grouping (in world space) (ped: [0.5m 2m+], cars: [1.7m 4m+])")
+    ("mm-segmentation-distance", po::value<float>(&mmSegmentationDistance), "segmentation distance in feature grouping (in world space) (< mm-connection-distance, empirically ~ mm-connection-distance / 2.5)")
+    ("max-distance", po::value<float>(&maxDistance), "maximum distance between features for grouping (in world space) (unused)")
+    ("min-velocity-cosine", po::value<float>(&minVelocityCosine), "minimum cosine of the angle between the velocity vectors for grouping (unused)")
+    ("min-nfeatures-group", po::value<float>(&minNFeaturesPerGroup), "minimum average number of features per frame to create a vehicle hypothesis (]1 3+])")
     // ("max-uturn-cosine", po::value<float>(&maxUTurnCosine), "maximum cosine value to detect U-turn")
     // ("nframes-avoid-uturn", po::value<int>(&nFramesAvoidUTurn), "number of frames over which a feature should not make a U-turn")
-
-
+    // Safety Analysis
+    ("max-predicted-speed", po::value<float>(&maxPredictedSpeed)->default_value(50.), "maximum speed when predicting future motion (km/h)")
+    ("prediction-time-horizon", po::value<float>(&predictionTimeHorizon)->default_value(5.), "time horizon for collision prediction (s)")
+    ("collision-distance", po::value<float>(&collisionDistance)->default_value(1.8), "collision distance threshold (m)")
+    ("crossing-zones", po::value<bool>(&crossingZones)->default_value(false), "option to compute crossing zones and predicted PET")
+    ("prediction-method", po::value<string>(&predictionMethod)->default_value("na"), "prediction method")
+    ("npredicted-trajectories", po::value<int>(&nPredictedTrajectories)->default_value(1), "number of predicted trajectories (use depends on prediction method)")
+    ("min-acceleration", po::value<float>(&minAcceleration)->default_value(-9.1), "minimum acceleration for input distribution (m/s2) (used only for evasive action distributions)")
+    ("max-acceleration", po::value<float>(&maxAcceleration)->default_value(2.), "maximum acceleration for input distribution (m/s2)")
+    ("max-steering", po::value<float>(&maxSteering)->default_value(0.5), "maximum steering for input distribution (rad/s)")
+    ("use-features-prediction", po::value<bool>(&useFeaturesForPrediction)->default_value(false), "use feature positions and velocities for prediction")
+    ;
+  
   po::options_description cmdLine;
   cmdLine.add(onlyCmdLine).add(cmdLineAndFile);
   try {
@@ -69,10 +86,16 @@
 	  options(cmdLine).positional(p).allow_unregistered().run(), vm);
     notify(vm);
 
+    if (vm.count("config-file") == 0) {
+      cout << "Missing configuration file" << endl;
+      cout << cmdLine << endl;
+      exit(0);      
+    }
+      
     cout << "Using configuration file " << configurationFilename << endl;
 
     ifstream configurationFile(configurationFilename.c_str());
-    store(po::parse_config_file(configurationFile, cmdLineAndFile), vm);
+    store(po::parse_config_file(configurationFile, cmdLineAndFile, true), vm);
     notify(vm);
 
     parameterDescription = getParameterDescription(cmdLineAndFile, vm);
@@ -108,7 +131,10 @@
       stream << boost::any_cast<float>(value) << separator;
     else if (value.type() == typeid(string))
       stream << boost::any_cast<string>(value) << separator;
-    else
+    else if (value.type() == typeid(vector<float>)) {
+      for (unsigned int j=0; j<boost::any_cast<vector<float> >(value).size(); j++)
+	     stream << boost::any_cast<vector<float> >(value)[j] << separator;
+    } else
       cerr << "the type of the option " << optionsVec[i]->long_name() << " (" << i << ") is not int, float or string." << endl;
   }
 
--- a/c/cvutils.cpp	Thu Apr 18 15:29:33 2013 -0400
+++ b/c/cvutils.cpp	Fri Dec 05 12:13:53 2014 -0500
@@ -49,34 +49,34 @@
     pts.push_back(kpts[i].pt);
 }
 
-IplImage* allocateImage(const int& width, const int& height, const int& depth, const int& channels) { return ::allocateImage(cvSize(width, height), depth, channels);}
+// IplImage* allocateImage(const int& width, const int& height, const int& depth, const int& channels) { return ::allocateImage(cvSize(width, height), depth, channels);}
 
-IplImage* allocateImage(const CvSize& size, const int& depth, const int& channels) {
-  IplImage* image = cvCreateImage(size, depth, channels);
+// IplImage* allocateImage(const CvSize& size, const int& depth, const int& channels) {
+//   IplImage* image = cvCreateImage(size, depth, channels);
 
-  if (!image) {
-    cerr << "Error: Couldn't allocate image.  Out of memory?\n" << endl;
-    exit(-1);
-  }
+//   if (!image) {
+//     cerr << "Error: Couldn't allocate image.  Out of memory?\n" << endl;
+//     exit(-1);
+//   }
 
-  return image;
-}
+//   return image;
+// }
 
-int goToFrameNum(CvCapture* inputVideo, const int& currentFrameNum, const int& targetFrameNum) {
-  int frameNum = currentFrameNum;
-  if (currentFrameNum > targetFrameNum) {
-    cerr << "Current frame number " << currentFrameNum << " is after the target frame number " << targetFrameNum << "." << endl;
-  } else if (currentFrameNum < targetFrameNum) {
-    IplImage* frame = cvQueryFrame(inputVideo);
-    frameNum++;
-    while (frame && frameNum<targetFrameNum) {
-      frame = cvQueryFrame(inputVideo);
-      frameNum++;
-    }
-  }
+// int goToFrameNum(CvCapture* inputVideo, const int& currentFrameNum, const int& targetFrameNum) {
+//   int frameNum = currentFrameNum;
+//   if (currentFrameNum > targetFrameNum) {
+//     cerr << "Current frame number " << currentFrameNum << " is after the target frame number " << targetFrameNum << "." << endl;
+//   } else if (currentFrameNum < targetFrameNum) {
+//     IplImage* frame = cvQueryFrame(inputVideo);
+//     frameNum++;
+//     while (frame && frameNum<targetFrameNum) {
+//       frame = cvQueryFrame(inputVideo);
+//       frameNum++;
+//     }
+//   }
   
-  return frameNum;
-}
+//   return frameNum;
+// }
 
 const Scalar Colors::color[] = {Colors::red(),
 				Colors::green(),
@@ -87,14 +87,15 @@
 				Colors::white(), 
 				Colors::black()};
 
+// Blue, Green, Red
 Scalar Colors::black(void) { return Scalar(0,0,0);}
-Scalar Colors::red(void) { return Scalar(255,0,0);}
+Scalar Colors::blue(void) { return Scalar(255,0,0);}
 Scalar Colors::green(void) { return Scalar(0,255,0);}
-Scalar Colors::blue(void) { return Scalar(0,0,255);}
+Scalar Colors::red(void) { return Scalar(0,0,255);}
 Scalar Colors::white(void) { return Scalar(255,255,255);}
 Scalar Colors::magenta(void) { return Scalar(255,0,255);}
-Scalar Colors::cyan(void) { return Scalar(0,255,255);}
-Scalar Colors::yellow(void) { return Scalar(255,255,0);}
+Scalar Colors::yellow(void) { return Scalar(0,255,255);}
+Scalar Colors::cyan(void) { return Scalar(255,255,0);}
 
 Scalar Colors::color3(const int& num) { return Colors::color[num%3];}
 Scalar Colors::color8(const int& num) { return Colors::color[num%Colors::nColors];}
--- a/c/feature-based-tracking.cpp	Thu Apr 18 15:29:33 2013 -0400
+++ b/c/feature-based-tracking.cpp	Fri Dec 05 12:13:53 2014 -0500
@@ -2,6 +2,8 @@
 #include "Parameters.hpp"
 #include "cvutils.hpp"
 #include "utils.hpp"
+#include "InputVideoFileModule.h"
+#include "InputFrameListModule.h"
 
 #include "src/Trajectory.h"
 #include "src/TrajectoryDBAccessList.h"
@@ -16,13 +18,16 @@
 
 #include <boost/shared_ptr.hpp>
 #include <boost/foreach.hpp>
+#include <boost/filesystem.hpp>
 
 #include <iostream>
 #include <vector>
 #include <ctime>
+#include <cmath>
 
 using namespace std;
 using namespace cv;
+namespace fs = boost::filesystem;
 
 void drawMatchesRelative(const vector<KeyPoint>& train, const vector<KeyPoint>& query, std::vector<cv::DMatch>& matches, Mat& img) {
   for (int i = 0; i < (int)matches.size(); i++)
@@ -54,12 +59,9 @@
     feature(_feature), pointNum(_pointNum) {}
 };
 
-inline void saveFeatures(vector<FeatureTrajectoryPtr>& features, TrajectoryDBAccess<Point2f>& db, const string& positionsTableName, const string& velocitiesTableName, const unsigned int& minNFeatures = 0) {
-  /// \todo smoothing
-  if (features.size() >= minNFeatures) {
-    BOOST_FOREACH(FeatureTrajectoryPtr f, features) f->write(db, positionsTableName, velocitiesTableName);
-    features.clear();
-  }
+inline void saveFeatures(vector<FeatureTrajectoryPtr>& features, TrajectoryDBAccess<Point2f>& db, const string& positionsTableName, const string& velocitiesTableName) {
+  BOOST_FOREACH(FeatureTrajectoryPtr f, features) f->write(db, positionsTableName, velocitiesTableName);
+  features.clear();
 }
 
 void trackFeatures(const KLTFeatureTrackingParameters& params) {
@@ -75,34 +77,59 @@
   float minTotalFeatureDisplacement = params.nDisplacements*params.minFeatureDisplacement;
   Size window = Size(params.windowSize, params.windowSize);
 
+  int interpolationMethod = -1;
+  if (params.interpolationMethod == 0)
+    interpolationMethod = INTER_NEAREST;
+  else if (params.interpolationMethod == 1)
+    interpolationMethod = INTER_LINEAR;
+  else if (params.interpolationMethod == 2)
+    interpolationMethod = INTER_CUBIC;
+  else if (params.interpolationMethod == 3)
+    interpolationMethod = INTER_LANCZOS4;
+  else
+    cout << "Unsupported option " << interpolationMethod << " for interpolation method" << endl;
+
   // BruteForceMatcher<Hamming> descMatcher;
   // vector<DMatch> matches;
 
-  VideoCapture capture;
-  Size videoSize;
-  unsigned int nFrames = 0;
-  capture.open(params.videoFilename);
-  if(capture.isOpened()) {
-    videoSize = Size(capture.get(CV_CAP_PROP_FRAME_WIDTH), capture.get(CV_CAP_PROP_FRAME_HEIGHT));
-    nFrames = capture.get(CV_CAP_PROP_FRAME_COUNT);
-    cout << "Video " << params.videoFilename <<
-      ": width=" << videoSize.width <<
-      ", height=" << videoSize.height <<
-      ", nframes=" << nFrames << endl;
-  } else {
+  boost::shared_ptr<InputFrameProviderIface> capture;
+  if (fs::is_directory(fs::path(params.videoFilename)))
+    capture = boost::shared_ptr<InputFrameListModule>(new InputFrameListModule(params.videoFilename));
+  else if(!params.videoFilename.empty())
+    capture = boost::shared_ptr<InputVideoFileModule>(new InputVideoFileModule(params.videoFilename));
+  else
+    cout << "No valid input parameters" << endl;
+  
+  if(!capture->isOpen()) {
     cout << "Video filename " << params.videoFilename << " could not be opened. Exiting." << endl;
     exit(0);
   }
-  // if (!capture.isOpened())
-  //   {
-  //     //help(argv);
-  //     cout << "capture device " << argv[1] << " failed to open!" << endl;
-  //     return 1;
-  //   }
+  
+  Size videoSize = capture->getSize();
+  unsigned int nFrames = capture->getNbFrames();
+  cout << "Video " << params.videoFilename <<
+	  ": width=" << videoSize.width <<
+	  ", height=" << videoSize.height <<
+	  ", nframes=" << nFrames << endl;
 
+  Mat map1, map2;
+  if (params.undistort) {
+    Mat intrinsicCameraMatrix = ::loadMat(params.intrinsicCameraFilename, " ");
+    Mat newIntrinsicCameraMatrix = intrinsicCameraMatrix.clone(); 
+    videoSize = Size(static_cast<int>(round(videoSize.width*params.undistortedImageMultiplication)), static_cast<int>(round(videoSize.height*params.undistortedImageMultiplication)));
+    newIntrinsicCameraMatrix.at<float>(0,2) = videoSize.width/2.;
+    newIntrinsicCameraMatrix.at<float>(1,2) = videoSize.height/2.;
+    initUndistortRectifyMap(intrinsicCameraMatrix, params.distortionCoefficients, Mat::eye(3,3, CV_32FC1), newIntrinsicCameraMatrix, videoSize, CV_32FC1, map1, map2);
+    
+    cout << "Undistorted width=" << videoSize.width <<
+      ", height=" << videoSize.height << endl;
+  }
+  
   Mat mask = imread(params.maskFilename, 0);
-  if (mask.empty())
+  if (mask.empty()) {
+    cout << "Mask filename " << params.maskFilename << " could not be opened." << endl;
     mask = Mat::ones(videoSize, CV_8UC1);
+  }
 
   boost::shared_ptr<TrajectoryDBAccess<Point2f> > trajectoryDB = boost::shared_ptr<TrajectoryDBAccess<Point2f> >(new TrajectoryDBAccessList<Point2f>());
   //TrajectoryDBAccess<Point2f>* trajectoryDB = new TrajectoryDBAccessBlob<Point2f>();
@@ -120,46 +147,39 @@
   std::vector<FeatureTrajectoryPtr> lostFeatures;
   std::vector<FeaturePointMatch> featurePointMatches;
 
-  HOGDescriptor hog;
-  hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
-
   int key = '?';
   unsigned int savedFeatureId=0;
-  Mat frame = Mat::zeros(1, 1, CV_8UC1), currentFrameBW, previousFrameBW;
+  Mat frame = Mat::zeros(1, 1, CV_8UC1), currentFrameBW, previousFrameBW, undistortedFrame;
 
   unsigned int lastFrameNum = nFrames;
   if (params.nFrames > 0)
     lastFrameNum = MIN(params.frame1+static_cast<unsigned int>(params.nFrames), nFrames);
-  
-  //capture.set(CV_CAP_PROP_POS_FRAMES, params.frame1);
+
+  capture->setFrameNumber(params.frame1);
   for (unsigned int frameNum = params.frame1; (frameNum < lastFrameNum) && !::interruptionKey(key); frameNum++) {
-      capture >> frame;
-
-      if (frame.empty() || frame.size() != videoSize)
+      bool success = capture->getNextFrame(frame);
+      if (!success || frame.empty()) {
+	cout << "Empty frame " << frameNum << ", breaking (" << success << " " << frame.empty() << " [" << frame.size().width << "x" << frame.size().height << "])" << endl;
 	break;
-
-      if (frameNum%50 ==0)
+      } else if (frameNum%50 ==0)
 	cout << "frame " << frameNum << endl;
 
-      //capture.get(CV_CAP_PROP_POS_FRAMES) << " " << prevPts.size() << endl;
+      if (params.undistort) {
+	remap(frame, undistortedFrame, map1, map2, interpolationMethod, BORDER_CONSTANT, 0.);
+	frame = undistortedFrame;
 
-      // int emptyFrameNum = 0;
-      // while (frame.empty()) {
-      // 	cerr << "empty frame " << emptyFrameNum  << " " << capture.get(CV_CAP_PROP_POS_FRAMES)<< endl;
-      // 	capture >> frame;//break;
-      // 	emptyFrameNum++;
-      // 	if (emptyFrameNum>=3000)
-      // 	  exit(0);
-      // }
+	if (frame.size() != videoSize) {
+	  cout << "Different frame size " << frameNum << ", breaking ([" << frame.size().width << "x" << frame.size().height << "])" << endl;
+	break;
+	}
+      }
+
       
       cvtColor(frame, currentFrameBW, CV_RGB2GRAY);
-      // "normal" feature detectors: detect features here
-      // detector.detect(currentFrameBW, currKpts); // see video_homography c++ sample
       
       if (!prevPts.empty()) {
-	//::keyPoints2Points(prevKpts, prevPts);
 	currPts.clear();
-	calcOpticalFlowPyrLK(previousFrameBW, currentFrameBW, prevPts, currPts, status, errors, window, params.pyramidLevel, TermCriteria(3 /*static_cast<int>(TermCriteria::COUNT)+static_cast<int>(TermCriteria::EPS)*/, params.maxNumberTrackingIterations, params.minTrackingError), 0.5 /* unused */, 0); // OPTFLOW_USE_INITIAL_FLOW
+	calcOpticalFlowPyrLK(previousFrameBW, currentFrameBW, prevPts, currPts, status, errors, window, params.pyramidLevel, TermCriteria(static_cast<int>(TermCriteria::COUNT)+static_cast<int>(TermCriteria::EPS) /* = 3 */, params.maxNumberTrackingIterations, params.minTrackingError), /* int flags = */ 0, params.minFeatureEigThreshold);
 	/// \todo try calcOpticalFlowFarneback
 
 	std::vector<Point2f> trackedPts;
@@ -170,7 +190,7 @@
 	  if (status[iter->pointNum]) {
 	    iter->feature->addPoint(frameNum, currPts[iter->pointNum], homography);
 
-	    deleteFeature |= iter->feature->isDisplacementSmall(params.nDisplacements, minTotalFeatureDisplacement)
+	    deleteFeature = iter->feature->isDisplacementSmall(params.nDisplacements, minTotalFeatureDisplacement)
 	      || !iter->feature->isMotionSmooth(params.accelerationBound, params.deviationBound);
 	    if (deleteFeature)
 	      iter->feature->shorten();
@@ -181,6 +201,7 @@
 	    if (iter->feature->length() >= params.minFeatureTime) {
 	      iter->feature->setId(savedFeatureId);
 	      savedFeatureId++;
+	      iter->feature->movingAverage(params.nFramesSmoothing);
 	      lostFeatures.push_back(iter->feature);
 	    }
 	    iter = featurePointMatches.erase(iter);
@@ -203,12 +224,6 @@
 	  // BOOST_FOREACH(Rect r, locations)
 	  //   rectangle(frame, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
 	}
-	//drawOpticalFlow(prevPts, currPts, status, frame);
-	
-	// cout << matches.size() << " matches" << endl;
-	// descMatcher.match(currDesc, prevDesc, matches);
-	// cout << matches.size() << " matches" << endl;
-	//drawMatchesRelative(prevKpts, currKpts, matches, frame);
       }
       
       // adding new features, using mask around existing feature positions
@@ -217,27 +232,33 @@
 	for (int j=MAX(0, currPts[n].x-params.minFeatureDistanceKLT); j<MIN(videoSize.width, currPts[n].x+params.minFeatureDistanceKLT+1); j++)
 	  for (int i=MAX(0, currPts[n].y-params.minFeatureDistanceKLT); i<MIN(videoSize.height, currPts[n].y+params.minFeatureDistanceKLT+1); i++)
 	    featureMask.at<uchar>(i,j)=0;
-      goodFeaturesToTrack(currentFrameBW, newPts, params.maxNFeatures, params.featureQuality, params.minFeatureDistanceKLT, featureMask, params.windowSize, params.useHarrisDetector, params.k);
+      goodFeaturesToTrack(currentFrameBW, newPts, params.maxNFeatures, params.featureQuality, params.minFeatureDistanceKLT, featureMask, params.blockSize, params.useHarrisDetector, params.k);
       BOOST_FOREACH(Point2f p, newPts) { //for (unsigned int i=0; i<newPts.size(); i++) {
 	FeatureTrajectoryPtr f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, p, homography));
 	featurePointMatches.push_back(FeaturePointMatch(f, currPts.size()));
 	currPts.push_back(p);
       }
-      // currPts.insert(currPts.end(), newPts.begin(), newPts.end());
-      //::keyPoints2Points(currKpts, currPts, false);
-
-      //brief.compute(currentFrameBW, currKpts, currDesc); //Compute brief descriptors at each keypoint location
       
       if (params.display) {
+	imshow("mask", featureMask*256);
 	imshow("frame", frame);
-	imshow("mask", featureMask*256);
 	key = waitKey(2);
       }
       previousFrameBW = currentFrameBW.clone();
       prevPts = currPts;
-      //prevKpts = currKpts;
-      //currDesc.copyTo(prevDesc);
-    }  
+  }
+
+  // save the remaining currently tracked features
+  std::vector<FeaturePointMatch>::iterator iter = featurePointMatches.begin();
+  while (iter != featurePointMatches.end()) {
+    if (iter->feature->length() >= params.minFeatureTime) {
+      iter->feature->setId(savedFeatureId);
+      savedFeatureId++;
+      iter->feature->movingAverage(params.nFramesSmoothing);
+      iter->feature->write(*trajectoryDB, "positions", "velocities");
+    }
+    iter++;
+  }
   
   trajectoryDB->endTransaction();
   trajectoryDB->disconnect();
@@ -312,9 +333,13 @@
     int lastInstant = frameNum+params.minFeatureTime-maxTrajectoryLength;
     if (lastInstant > 0 && frameNum%10==0) {
       featureGraph.connectedComponents(lastInstant);
-      vector<vector<unsigned int> > featureGroups = featureGraph.getFeatureGroups();
+      vector<vector<FeatureTrajectoryPtr> > featureGroups;
+      featureGraph.getFeatureGroups(featureGroups);
       for (unsigned int i=0; i<featureGroups.size(); ++i) {
-	trajectoryDB->writeObject(savedObjectId, featureGroups[i], -1, 1, string("objects"), string("objects_features"));
+	vector<unsigned int> featureNumbers;
+	for (unsigned int j=0; j<featureGroups[i].size(); ++j)
+	  featureNumbers.push_back(featureGroups[i][j]->getId());
+	trajectoryDB->writeObject(savedObjectId, featureNumbers, 0 /* unknown */, 1, string("objects"), string("objects_features"));
 	savedObjectId++;
       }
     }
@@ -325,9 +350,13 @@
 
   // save remaining objects
   featureGraph.connectedComponents(frameNum+maxTrajectoryLength+1);
-  vector<vector<unsigned int> > featureGroups = featureGraph.getFeatureGroups();
+  vector<vector<FeatureTrajectoryPtr> > featureGroups;
+  featureGraph.getFeatureGroups(featureGroups);
   for (unsigned int i=0; i<featureGroups.size(); ++i) {
-    trajectoryDB->writeObject(savedObjectId, featureGroups[i], -1, 1, string("objects"), string("objects_features"));
+    vector<unsigned int> featureNumbers;
+    for (unsigned int j=0; j<featureGroups[i].size(); ++j)
+      featureNumbers.push_back(featureGroups[i][j]->getId());
+    trajectoryDB->writeObject(savedObjectId, featureNumbers, 0 /* unknown */, 1, string("objects"), string("objects_features"));
     savedObjectId++;
   }
 
@@ -345,6 +374,8 @@
   } else if (params.groupFeatures) {
     cout << "The program groups features" << endl;
     groupFeatures(params);
+  } else {
+    cout << "Main option missing or misspelt" << endl;
   }
 
   return 0;
--- a/c/test_feature.cpp	Thu Apr 18 15:29:33 2013 -0400
+++ b/c/test_feature.cpp	Fri Dec 05 12:13:53 2014 -0500
@@ -10,6 +10,38 @@
 using namespace std;
 using namespace cv;
 
+TEST_CASE("trajectory/smoothing", "test trajectory smoothing (from trajectory management library)") {
+  TrajectoryPoint2f t1;
+  for(int i=0; i<20;++i)
+    t1.add(i, cv::Point2f(1+i, 1+0.5*i));
+
+  TrajectoryPoint2f t2(t1);
+  t2.movingAverage(3);
+  for(int i=0; i<20;++i) 
+    REQUIRE(t1.getPoint(i) == t2.getPoint(i));
+  t1.clear();
+  cv::Point2f p0(1,1);
+  cv::Point2f p1(2,2);
+  cv::Point2f p2(2.4,3);
+  cv::Point2f p3(3.1,3.4);
+  cv::Point2f p4(3.4,4);
+  cv::Point2f p5(3.6,4.5);
+  
+  t1.add(0, p0);
+  t1.add(1, p1);
+  t1.add(2, p2);
+  t1.add(3, p3);
+  t1.add(4, p4);
+  t1.add(5, p5);
+  t1.movingAverage(2);
+  REQUIRE(t1.getPoint(0) == p0);
+  REQUIRE(t1.getPoint(1) == (p0+p1+p2)*(1./3.));
+  REQUIRE(t1.getPoint(2) == (p0+p1+p2+p3+p4)*(1./5.));
+  REQUIRE(t1.getPoint(3) == (p1+p2+p3+p4+p5)*(1./5.));
+  REQUIRE(t1.getPoint(4) == (p3+p4+p5)*(1./3.));
+  REQUIRE(t1.getPoint(5) == p5);
+}
+
 TEST_CASE("features/similarity", "test feature similarity measure") {
   FeatureTrajectoryPtr ft1 = createFeatureTrajectory(1, 10, 20, Point2f(1,1), Point2f(0, 1));
   FeatureTrajectoryPtr ft2 = createFeatureTrajectory(2, 10, 20, Point2f(2,1), Point2f(0, 1));
--- a/c/test_graph.cpp	Thu Apr 18 15:29:33 2013 -0400
+++ b/c/test_graph.cpp	Fri Dec 05 12:13:53 2014 -0500
@@ -11,10 +11,14 @@
 using namespace cv;
 
 TEST_CASE("graph/connected_components", "test graph connected components") {
-  FeatureGraph featureGraph(5, 1, 5 , 1.); // (float _connectionDistance, float _segmentationDistance, unsigned int _minFeatureTime, float _minNFeaturesPerGroup)
-  unsigned int lastInstant = 20;
-  FeatureTrajectoryPtr ft1 = createFeatureTrajectory(1, 10, lastInstant, Point2f(1,1), Point2f(0.5, 0.));
-  FeatureTrajectoryPtr ft2 = createFeatureTrajectory(2, 10, lastInstant, Point2f(1.1,1), Point2f(0.5, 0.));
+  float connectionDistance = 5.;
+  float segmentationDistance = 1.;
+  unsigned int minFeatureTime = 5;
+  float minNFeaturesPerGroup = 0.99;
+  FeatureGraph featureGraph(connectionDistance, segmentationDistance, minFeatureTime, minNFeaturesPerGroup);
+  unsigned int firstInstant = 10, lastInstant = 20;
+  FeatureTrajectoryPtr ft1 = createFeatureTrajectory(1, firstInstant, lastInstant, Point2f(1,1), Point2f(0.5, 0.));
+  FeatureTrajectoryPtr ft2 = createFeatureTrajectory(2, firstInstant, lastInstant, Point2f(1.1,1), Point2f(0.5, 0.));
 
   featureGraph.addFeature(ft1);
   REQUIRE(featureGraph.getNVertices() == 1);
@@ -25,13 +29,14 @@
   REQUIRE(featureGraph.getNEdges() == 1);
 
   featureGraph.connectedComponents(lastInstant);
-  vector<vector<unsigned int> > components = featureGraph.getFeatureGroups();
+  vector<vector<FeatureTrajectoryPtr> > components;
+  featureGraph.getFeatureGroups(components);
   REQUIRE(components.size() == 0);
   REQUIRE(featureGraph.getNVertices() == 2);
   REQUIRE(featureGraph.getNEdges() == 1);
 
   featureGraph.connectedComponents(lastInstant+1);
-  components = featureGraph.getFeatureGroups();
+  featureGraph.getFeatureGroups(components);
   REQUIRE(components.size() == 1);
   REQUIRE(components[0].size() == 2);
   REQUIRE(featureGraph.getNVertices() == 0);
@@ -40,16 +45,23 @@
   // test connection distance
   featureGraph.addFeature(ft1);
   featureGraph.addFeature(ft2);
-  FeatureTrajectoryPtr ft3 = createFeatureTrajectory(3, 10, lastInstant, Point2f(6.05,1), Point2f(0.5, 0.)); // connected to ft2 only
+  FeatureTrajectoryPtr ft3 = createFeatureTrajectory(3, firstInstant, lastInstant, Point2f(6.05,1), Point2f(0.5, 0.)); // connected to ft2 only
   featureGraph.addFeature(ft3);
-  FeatureTrajectoryPtr ft4 = createFeatureTrajectory(4, 10, lastInstant, Point2f(11.1,1), Point2f(0.5, 0.)); // not connected
+  FeatureTrajectoryPtr ft4 = createFeatureTrajectory(4, firstInstant, lastInstant, Point2f(11.1,1), Point2f(0.5, 0.)); // not connected
   featureGraph.addFeature(ft4);
 
+  REQUIRE(ft1->minMaxSimilarity(*ft2, firstInstant, lastInstant, connectionDistance, segmentationDistance));
+  REQUIRE(ft2->minMaxSimilarity(*ft3, firstInstant, lastInstant, connectionDistance, segmentationDistance));
+  REQUIRE_FALSE(ft1->minMaxSimilarity(*ft3, firstInstant, lastInstant, connectionDistance, segmentationDistance));
+  REQUIRE_FALSE(ft1->minMaxSimilarity(*ft4, firstInstant, lastInstant, connectionDistance, segmentationDistance));
+  REQUIRE_FALSE(ft2->minMaxSimilarity(*ft4, firstInstant, lastInstant, connectionDistance, segmentationDistance));
+  REQUIRE_FALSE(ft3->minMaxSimilarity(*ft4, firstInstant, lastInstant, connectionDistance, segmentationDistance));
+  
   REQUIRE(featureGraph.getNVertices() == 4);
   REQUIRE(featureGraph.getNEdges() == 2);
 
   featureGraph.connectedComponents(lastInstant+1);
-  components = featureGraph.getFeatureGroups();
+  featureGraph.getFeatureGroups(components);
   REQUIRE(components.size() == 2);
   REQUIRE(components[0].size() == 3);
   REQUIRE(components[1].size() == 1);
--- a/c/utils.cpp	Thu Apr 18 15:29:33 2013 -0400
+++ b/c/utils.cpp	Fri Dec 05 12:13:53 2014 -0500
@@ -41,7 +41,7 @@
     getline(f, s);
   }
   
-  if (s[0] == ::commentChar)
+  if (!s.empty() && s[0] == ::commentChar)
     s.clear();
   return s;
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/include/InputFrameListModule.h	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,30 @@
+#ifndef INPUT_FRAME_LIST_MODULE_H
+#define INPUT_FRAME_LIST_MODULE_H
+
+#include "InputFrameProviderIface.h"
+
+#include <string>
+#include <vector>
+
+class InputFrameListModule : public InputFrameProviderIface
+{
+ public:
+  InputFrameListModule(const std::string& _dirname);
+  ~InputFrameListModule();
+
+  bool getNextFrame(cv::Mat&);
+  unsigned int getNbFrames();
+  bool isOpen() const { return mInit;}
+  void setFrameNumber(const unsigned int& frameNumber);
+
+  virtual const cv::Size& getSize() const { return mSize;}
+ private:
+  void loadImageList(void);
+  std::vector<std::string> filenames;
+  unsigned int mCurrentIdx;
+  bool mInit;
+  std::string dirname;
+  cv::Size mSize;
+};
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/include/InputFrameProviderIface.h	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,19 @@
+#ifndef INPUT_FRAME_PROVIDER_IFACE_H
+#define INPUT_FRAME_PROVIDER_IFACE_H
+
+#include "opencv2/core/core.hpp"
+#include <string>
+
+
+class InputFrameProviderIface
+{
+public: 	
+  virtual ~InputFrameProviderIface(){}
+  virtual bool getNextFrame(cv::Mat&)=0;
+  virtual unsigned int getNbFrames() = 0;	
+  virtual bool isOpen() const = 0;
+  virtual void setFrameNumber(const unsigned int& frameNumber) = 0;
+  virtual const cv::Size& getSize() const = 0;
+};
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/include/InputVideoFileModule.h	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,32 @@
+#ifndef INPUT_VIDEO_FILE_MODULE_H
+#define INPUT_VIDEO_FILE_MODULE_H
+
+#include "InputFrameProviderIface.h"
+#include <string>
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+
+class InputVideoFileModule : public InputFrameProviderIface
+{
+public:
+  InputVideoFileModule(const std::string& videoPath);
+  ~InputVideoFileModule();
+
+  bool getNextFrame(cv::Mat&);
+
+  unsigned int getNbFrames(){		return mNumberOfFrame;}
+
+  bool isOpen() const { return mInit;}
+
+  void setFrameNumber(const unsigned int& frameNumber);
+
+  const cv::Size& getSize() const { return mSize;}
+	
+private:
+  cv::Size mSize;
+  cv::VideoCapture mVideoCapture;
+  bool mInit;
+  int mNumberOfFrame;
+};
+
+#endif
--- a/include/Motion.hpp	Thu Apr 18 15:29:33 2013 -0400
+++ b/include/Motion.hpp	Fri Dec 05 12:13:53 2014 -0500
@@ -5,6 +5,7 @@
 #include <boost/shared_ptr.hpp>
 #include <boost/graph/adjacency_list.hpp>
 
+
 template<typename T> class TrajectoryDBAccess;
 template<typename T> class TrajectoryDBAccessList;
 
@@ -13,7 +14,8 @@
 /** Class for feature data
     positions, velocities and other statistics to evaluate their quality
     before saving. */
-class FeatureTrajectory {
+class FeatureTrajectory 
+{
 public:
   FeatureTrajectory(const unsigned int& frameNum, const cv::Point2f& p, const cv::Mat& homography);
 
@@ -47,13 +49,15 @@
 
   void shorten(void);
 
+  void movingAverage(const unsigned int& nFramesSmoothing);
+
   void write(TrajectoryDBAccess<cv::Point2f>& trajectoryDB, const std::string& positionsTableName, const std::string& velocitiesTableName) const;
 
 #ifdef USE_OPENCV
   void draw(cv::Mat& img, const cv::Mat& homography, const cv::Scalar& color) const;
 #endif
 
-  friend std::stringstream& operator<<(std::stringstream& out, const FeatureTrajectory& ft);
+  friend std::ostream& operator<<(std::ostream& out, const FeatureTrajectory& ft);
 
 protected:
   /// first frame number
@@ -63,7 +67,7 @@
 
   TrajectoryPoint2fPtr positions;
   /** one fewer velocity than position
-      v_n = p_n+1 - p_n*/
+      v_n = p_n - p_n-1*/
   TrajectoryPoint2fPtr velocities;
   
   /// norms of velocities for feature constraints, one fewer positions than positions
@@ -75,10 +79,12 @@
 typedef boost::shared_ptr<FeatureTrajectory> FeatureTrajectoryPtr;
 
 // inlined
-inline std::stringstream& operator<<(std::stringstream& out, const FeatureTrajectory& ft) {
+inline std::ostream& operator<<(std::ostream& out, const FeatureTrajectory& ft) 
+{
   out << *(ft.positions);
   out << "\n";
   out << *(ft.velocities);
+
   return out;
 }
 
@@ -116,7 +122,7 @@
 
   /** Performs some checks on groups of features and return their lists of ids if correct
       Removes the vertices from the graph */
-  std::vector<std::vector<unsigned int> > getFeatureGroups(void);
+  void getFeatureGroups(std::vector<std::vector<FeatureTrajectoryPtr> >& featureGroups);
 
   std::string informationString(void) const;
 
--- a/include/Parameters.hpp	Thu Apr 18 15:29:33 2013 -0400
+++ b/include/Parameters.hpp	Fri Dec 05 12:13:53 2014 -0500
@@ -4,6 +4,7 @@
 /// \todo Class for parameters, with utilities to save and load from configuration files
 
 #include <string>
+#include <vector>
 
 namespace boost{
   namespace program_options {
@@ -19,7 +20,12 @@
   std::string videoFilename;
   std::string databaseFilename;
   std::string homographyFilename;
+  std::string intrinsicCameraFilename;
+  std::vector<float> distortionCoefficients;
+  float undistortedImageMultiplication;
+  int interpolationMethod;
   std::string maskFilename;
+  bool undistort;
   bool loadFeatures;
   bool display;
   float videoFPS;
@@ -27,27 +33,62 @@
   unsigned int frame1;
   int nFrames;
   // feature tracking
+  /// "Maximum number of corners to return" (OpenCV goodFeaturesToTrack) (should be large enough not to limit the potential number of features)
   int maxNFeatures;
+  /// "Parameter characterizing the minimal accepted quality of image corners" (OpenCV goodFeaturesToTrack )
   float featureQuality;
+  /// "Minimum possible Euclidean distance between the returned corners" (OpenCV goodFeaturesToTrack)
   float minFeatureDistanceKLT;
-  int windowSize;
+  /// "Size of an average block for computing a derivative covariation matrix over each pixel neighborhood" (OpenCV goodFeaturesToTrack)
+  int blockSize;
+  /// "Parameter indicating whether to use a Harris detector" (OpenCV goodFeaturesToTrack)
   bool useHarrisDetector;
+  /// "Free parameter of the Harris detector" (OpenCV goodFeaturesToTrack)
   float k;
+  /// "size of the search window at each pyramid level" (OpenCV calcOpticalFlowPyrLK)
+  int windowSize;
+  /// "0-based maximal pyramid level number" (OpenCV calcOpticalFlowPyrLK) higher is higher quality
   int pyramidLevel;
+  /// Number of displacements (number of frames-1) over which minimum motion is computed 
   unsigned int nDisplacements;
+  /// Minimum displacement per frame (in world space) to keep features
   float minFeatureDisplacement;
+  /// Maximum feature acceleration
   float accelerationBound;
+  /// Maximum feature deviation
   float deviationBound;
+  /// Number of frames to smooth positions (half window)
   int nFramesSmoothing;
   //int nFramesVelocity;
+  /// Maximum number of iterations to stop optical flow (OpenCV calcOpticalFlowPyrLK)
   int maxNumberTrackingIterations;
+  /// Minimum error to reach to stop optical flow (OpenCV calcOpticalFlowPyrLK)
   float minTrackingError;
+  /// Minimum eigen value of a 2x2 normal matrix of optical flow equations (OpenCV calcOpticalFlowPyrLK)
+  float minFeatureEigThreshold;
+  /// Minimum length of a feature (number of frames) to consider a feature for grouping
   unsigned int minFeatureTime;
+  /// Connection distance in feature grouping (in world space)
   float mmConnectionDistance;
+  /// Segmentation distance in feature grouping (in world space)
   float mmSegmentationDistance;
+  /// Maximum distance between features for grouping (in world space) (unused)
   float maxDistance;
+  /// Minimum cosine of the angle between the velocity vectors for grouping (unused)
   float minVelocityCosine;
+  /// Minimum average number of features per frame to create a vehicle hypothesis
   float minNFeaturesPerGroup;
+  // safety analysis
+  float maxPredictedSpeed;
+  float predictionTimeHorizon;
+  float collisionDistance;
+  bool crossingZones;
+  std::string predictionMethod;
+  int nPredictedTrajectories;
+  float minAcceleration;
+  float maxAcceleration;
+  float maxSteering;
+  bool useFeaturesForPrediction;
 
   std::string parameterDescription;
 
--- a/include/cvutils.hpp	Thu Apr 18 15:29:33 2013 -0400
+++ b/include/cvutils.hpp	Fri Dec 05 12:13:53 2014 -0500
@@ -17,20 +17,20 @@
 /** Loads a cv mat from a text file where the numbers are saved line by line separated by separator */
 cv::Mat loadMat(const std::string& filename, const std::string& separator);
 
-template<typename T> 
-float scalarProduct(const cv::Point_<T>& v1, const cv::Point_<T>& v2) { return v1.x*v2.x+v1.y*v2.y;}
+//template<typename T> 
+//float scalarProduct(const cv::Point_<T>& v1, const cv::Point_<T>& v2) { return v1.x*v2.x+v1.y*v2.y;}
 
 void keyPoints2Points(const std::vector<cv::KeyPoint>& kpts, std::vector<cv::Point2f>& pts, const bool& clearPts = true);
 
 /** Allocates a new IplImage. */
-IplImage* allocateImage(const int& width, const int& height, const int& depth, const int& channels);
+// IplImage* allocateImage(const int& width, const int& height, const int& depth, const int& channels);
 
-IplImage* allocateImage(const CvSize& size, const int& depth, const int& channels);
+// IplImage* allocateImage(const CvSize& size, const int& depth, const int& channels);
 
 /** Goes to the target frame number, by querying frame, 
     supposing the video input is currently at current frame number.
     Returns the frame number that was reached.*/
-int goToFrameNum(CvCapture* inputVideo, const int& currentFrameNum, const int& targetFrameNum);
+// int goToFrameNum(CvCapture* inputVideo, const int& currentFrameNum, const int& targetFrameNum);
 
 /// Pre-defined colors
 class Colors {
--- a/python/calibration-translation.py	Thu Apr 18 15:29:33 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,116 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-import os
-
-import matplotlib.mlab as pylab
-import matplotlib.pyplot as plt
-import numpy as np
-
-import cv2
-import utils
-import cvutils
-
-# development for the data collected and stabilized by Paul in Summer 2011
-# todo write help, add options to control the parameters for matching (n points and distance)
-
-options = utils.parseCLIOptions('Program to re-calibrate an initial calibration based on point correspondences by adjusting the points to slightly different viewpoints, where all the points are still visible\n\nUsage: ', ['ref_video=', 'ref_points='], sys.argv, ['mask_img='])
-
-referenceVideoFilename=options['--ref_video']
-wldPts, imgPts = cvutils.loadPointCorrespondences(options['--ref_points'])
-
-def translatePoints(points, t):
-    'points is Nx2, t is [x,y]'
-    translated = points.copy()
-    for i in xrange(2):
-        translated[i] += t[i]
-    return translated
-
-filenames = [f for f in utils.listfiles('.','avi')] # directory to examine should be current directory
-
-referenceVideoIndex = filenames.index(referenceVideoFilename)
-indices = set(range(len(filenames)))
-indices.discard(referenceVideoIndex)
-
-images = {}
-captures = {}
-
-captures[referenceVideoFilename] = cv2.VideoCapture(referenceVideoFilename)
-(ret, img) = captures[referenceVideoFilename].read()
-images[referenceVideoFilename] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
-
-# load a mask image to compute the translation
-if '--mask_img' in options.keys():
-    maskImg = cv2.imread('mask.png', cv2.CV_LOAD_IMAGE_GRAYSCALE) # todo add possibility to look in the whole image if not providing mask
-else:
-    maskImg = np.ones(images[referenceVideoFilename].shape, dtype=np.uint8)
-
-referenceFeatures = cv2.goodFeaturesToTrack(images[referenceVideoFilename], 1000, 0.02, 2, useHarrisDetector = True, mask=maskImg)
-displayRef = cv2.cvtColor(images[referenceVideoFilename], cv2.COLOR_GRAY2RGB)
-for j,p in enumerate(imgPts):
-    cv2.circle(displayRef, tuple(p), 3, (255,0,0))
-    cv2.putText(displayRef, str(j+1), tuple(p), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0))
-cv2.imshow('Reference',displayRef)
-
-# get suitable image references for each video
-for f in filenames: 
-    captures[f] = cv2.VideoCapture(f)
-    frameFilename = utils.removeExtension(f)+'-frame.png' # TODO if frame image already exists, no need to search for it again
-    if not os.path.exists(frameFilename):
-        key = -1
-        while chr(key&255) != 'y':
-            (ret, img) = captures[f].read()
-            cv2.imshow('Image',img)
-            print('Can one see the reference points in the image? (y/n)')
-            key = cv2.waitKey(0)
-
-        images[f] = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
-        cv2.imwrite(frameFilename, img)
-    else:
-        images[f] = cv2.imread(frameFilename, cv2.CV_LOAD_IMAGE_GRAYSCALE)
-    #features[f] = cv2.goodFeaturesToTrack(images[f], 1000, 0.02, 2, useHarrisDetector = True, mask=maskImg) # todo put parameters on the command line ? 
-    # goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]])
-    # display features
-    # if False:
-    #     display = img.copy()#cv2.cvtColor(images[f], cv2.COLOR_GRAY2RGB) #.copy()
-    #     for p in features[f]:
-    #         cv2.circle(display, tuple(p[0]), 3, (255,0,0))
-    #     cv2.imshow('Reference',display)
-    #     cv2.waitKey()
-
-plt.close('all')
-
-# validate or input point correspondences and compute homography
-for i in indices:
-    t = cvutils.computeTranslation(images[filenames[referenceVideoIndex]], images[filenames[i]], referenceFeatures, 100, 10)
-    print filenames[i],t
-    key = -1
-    if t != None: # show translated points and ask if ok
-        displayImg = cv2.cvtColor(images[filenames[i]], cv2.COLOR_GRAY2RGB) #.copy()
-        for p in imgPts:
-            cv2.circle(displayImg, tuple(p+t[0]), 3, (255,0,0))
-        cv2.imshow('Image',displayImg)
-
-        while not(chr(key&255) == 'y' or chr(key&255) == 'n'):
-            print('Are the translated points rightly located (y/n)?')
-            key = cv2.waitKey(0)
-        if chr(key&255) == 'y': # compute homography with translated numbers
-            newImgPts = np.array([p+t[0] for p in imgPts])
-    else:
-        print('No translation could be found automatically. You will have to manually input world reference points.')
-
-    if t==None or chr(key&255) != 'y':# if no translation could computed or it is not satisfactory
-        print('Select the corresponding points in the same order as in the reference image')
-        plt.figure(1)
-        plt.imshow(displayRef)
-        plt.figure(2)
-        plt.imshow(img)
-        plt.show()
-        newImgPts = np.array([list(p) for p in plt.ginput(n=wldPts.shape[0], timeout=-1)], dtype = np.float32)
-
-    homography, mask = cv2.findHomography(newImgPts, wldPts) # method=0, ransacReprojThreshold=3
-    print homography
-    np.savetxt(utils.removeExtension(filenames[i])+'-homography.txt',homography)
-    np.savetxt(utils.removeExtension(filenames[i])+'-point-correspondences.txt', np.append(wldPts.T, newImgPts.T, axis=0))
-
-cv2.destroyAllWindows()
--- a/python/compute-homography.py	Thu Apr 18 15:29:33 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,122 +0,0 @@
-#! /usr/bin/env python
-
-import sys,getopt
-
-import matplotlib.pyplot as plt
-import numpy as np
-import cv2
-
-import cvutils
-import utils
-
-options, args = getopt.getopt(sys.argv[1:], 'hp:i:w:n:u:',['help'])
-options = dict(options)
-
-# TODO process camera intrinsic and extrinsic parameters to obtain image to world homography, taking example from Work/src/python/generate-homography.py script
-# cameraMat = load(videoFilenamePrefix+'-camera.txt');
-# T1 = cameraMat[3:6,:].copy();
-# A = cameraMat[0:3,0:3].copy();
-
-# # pay attention, rotation may be the transpose
-# # R = T1[:,0:3].T;
-# R = T1[:,0:3];
-# rT = dot(R, T1[:,3]/1000);
-# T = zeros((3,4),'f');
-# T[:,0:3] = R[:];
-# T[:,3] = rT;
-
-# AT = dot(A,T);
-
-# nPoints = 4;
-# worldPoints = cvCreateMat(nPoints, 3, CV_64FC1);
-# imagePoints = cvCreateMat(nPoints, 3, CV_64FC1);
-
-# # extract homography from the camera calibration
-# worldPoints = cvCreateMat(4, 3, CV_64FC1);
-# imagePoints = cvCreateMat(4, 3, CV_64FC1);
-
-# worldPoints[0,:] = [[1, 1, 0]];
-# worldPoints[1,:] = [[1, 2, 0]];
-# worldPoints[2,:] = [[2, 1, 0]];
-# worldPoints[3,:] = [[2, 2, 0]];
-
-# wPoints = [[1,1,2,2],
-#            [1,2,1,2],
-#            [0,0,0,0]];
-# iPoints = utils.worldToImage(AT, wPoints);
-
-# for i in range(nPoints):
-#     imagePoints[i,:] = [iPoints[:,i].tolist()];
-
-# H = cvCreateMat(3, 3, CV_64FC1);
-
-# cvFindHomography(imagePoints, worldPoints, H);
-
-if '--help' in options.keys() or '-h' in options.keys() or len(options) == 0:
-    print('Usage: {0} --help|-h [-p point-correspondences.txt] [ -i video-frame] [ -w world-frame] [n number-points] [-u unit-per-pixel=1]'.format(sys.argv[0]))
-    print('''The input data can be provided either as point correspondences already saved
- in a text file or inputed by clicking a certain number of points (>=4)
- in a video frame and a world image.
-
-The point correspondence file contains at least 4 non-colinear point coordinates 
-with the following format:
- - the first two lines are the x and y coordinates in the projected space (usually world space)
- - the last two lines are the x and y coordinates in the origin space (usually image space)
-
-If providing video and world images, with a number of points to input
-and a ration to convert pixels to world distance unit (eg meters per pixel), 
-the images will be shown in turn and the user should click 
-in the same order the corresponding points in world and image spaces. ''')
-    sys.exit()
-
-homography = np.array([])
-if '-p' in options.keys():
-    worldPts, videoPts = cvutils.loadPointCorrespondences(options['-p'])
-    homography, mask = cv2.findHomography(videoPts, worldPts) # method=0, ransacReprojThreshold=3
-elif '-i' in options.keys() and '-w' in options.keys():
-    nPoints = 4
-    if '-n' in options.keys():
-        nPoints = int(options['-n'])
-    unitsPerPixel = 1
-    if '-u' in options.keys():
-        unitsPerPixel = float(options['-u'])
-    worldImg = plt.imread(options['-w'])
-    videoImg = plt.imread(options['-i'])
-    print('Click on {0} points in the video frame'.format(nPoints))
-    plt.figure()
-    plt.imshow(videoImg)
-    videoPts = np.array(plt.ginput(nPoints))
-    print('Click on {0} points in the world image'.format(nPoints))
-    plt.figure()
-    plt.imshow(worldImg)
-    worldPts = unitsPerPixel*np.array(plt.ginput(nPoints))
-    plt.close('all')
-    homography, mask = cv2.findHomography(videoPts, worldPts)
-    # save the points in file
-    f = open('point-correspondences.txt', 'a')
-    np.savetxt(f, worldPts.T)
-    np.savetxt(f, videoPts.T)
-    f.close()
-
-if homography.size>0:
-    np.savetxt('homography.txt',homography)
-
-if '-i' in options.keys() and homography.size>0:
-    videoImg = cv2.imread(options['-i'])
-    worldImg = cv2.imread(options['-w'])
-    invHomography = np.linalg.inv(homography)
-    projectedWorldPts = cvutils.projectArray(invHomography, worldPts.T).T
-    if '-u' in options.keys():
-        unitsPerPixel = float(options['-u'])        
-        projectedVideoPts = cvutils.projectArray(invHomography, videoPts.T).T
-    for i in range(worldPts.shape[0]):
-        cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))),2,cvutils.cvRed)
-        cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))),2,cvutils.cvBlue)
-        if '-u' in options.keys():
-            cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/unitsPerPixel))),2,cvutils.cvRed)
-            cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/unitsPerPixel))),2,cvutils.cvRed)
-        #print('img: {0} / projected: {1}'.format(videoPts[i], p))
-    cv2.imshow('video frame',videoImg)
-    if '-u' in options.keys():
-        cv2.imshow('world image',worldImg)
-    cv2.waitKey()
--- a/python/compute-object-from-features.py	Thu Apr 18 15:29:33 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,141 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-
-import matplotlib.mlab as pylab
-import matplotlib.pyplot as plt
-import numpy as np
-
-import cv
-import utils
-import cvutils
-import ubc_utils
-import moving
-
-# use something like getopt to manage arguments if necessary
-
-if len(sys.argv) < 3:
-    print('Usage: {0} <video-filename> <n-objects>'.format(sys.argv[0]))
-    sys.exit()
-
-if sys.argv[1].endswith('.avi'):
-    videoFilenamePrefix = utils.removeExtension(sys.argv[1],'.')
-else:
-    videoFilenamePrefix = sys.argv[1]
-
-objectNum = int(sys.argv[2])
-
-objects = ubc_utils.loadTrajectories(videoFilenamePrefix+'-objects.txt', objectNum+1)
-obj = objects[objectNum]
-features = ubc_utils.loadTrajectories(videoFilenamePrefix+'-features.txt', max(obj.featureNumbers)+1)
-h = np.loadtxt(videoFilenamePrefix+'-homography.txt')
-
-invh = cvutils.invertHomography(h)
-
-def computeGroundTrajectory(features, homography, timeInterval = None):
-    '''Computes a trajectory for the set of features as the closes point to the ground
-    using the homography in image space'''
-    if not timeInterval:
-        raise Exception('not implemented') # compute from the features
-    
-    yCoordinates = -np.ones((len(features),int(timeInterval.length())))
-    for i,f in enumerate(features):
-        traj = f.getPositions().asArray()
-        imgTraj = cvutils.projectArray(homography, traj)
-        yCoordinates[i,f.getFirstInstant()-timeInterval.first:f.getLastInstant()+1-timeInterval.first] = imgTraj[1,:]
-
-    indices = np.argmax(yCoordinates,0)
-    newTraj = moving.Trajectory()
-    for j,idx in enumerate(indices):
-        newTraj.addPosition(features[idx].getPositionAtInstant(j+timeInterval.first))
-        #newVelocities.addPosition(features[obj.featureNumbers[idx]].getVelocityAtInstant(j+obj.getFirstInstant()))
-
-    return newTraj
-
-def computeMedianTrajectory(features, timeInterval = None):
-    if not timeInterval:
-        raise Exception('not implemented') # compute from the features
-    
-    newTraj = moving.Trajectory()
-    for t in timeInterval:
-        points = []
-        for f in features:
-            if f.existsAtInstant(t):
-                points.append(f.getPositionAtInstant(t).aslist())
-        med = np.median(np.array(points), 0)
-        newTraj.addPositionXY(med[0], med[1])
-
-    return newTraj
-
-# TODO version median: conversion to large matrix will not work, have to do it frame by frame
-
-def kalmanFilter(positions, velocities, processNoiseCov, measurementNoiseCov):
-    kalman=cv.CreateKalman(6, 4)
-    kalman.transition_matrix[0,2]=1
-    kalman.transition_matrix[0,4]=1./2
-    kalman.transition_matrix[1,3]=1
-    kalman.transition_matrix[1,5]=1./2
-    kalman.transition_matrix[2,4]=1
-    kalman.transition_matrix[3,5]=1
-
-    cv.SetIdentity(kalman.measurement_matrix, 1.)
-    cv.SetIdentity(kalman.process_noise_cov, processNoiseCov)
-    cv.SetIdentity(kalman.measurement_noise_cov, measurementNoiseCov)
-    cv.SetIdentity(kalman.error_cov_post, 1.)
-
-    p = positions[0]
-    v = velocities[0]
-    v2 = velocities[2]
-    a = (v2-v).multiply(0.5)
-    kalman.state_post[0,0]=p.x
-    kalman.state_post[1,0]=p.y
-    kalman.state_post[2,0]=v.x
-    kalman.state_post[3,0]=v.y
-    kalman.state_post[4,0]=a.x
-    kalman.state_post[5,0]=a.y
-    
-    filteredPositions = moving.Trajectory()
-    filteredVelocities = moving.Trajectory()
-    measurement = cv.CreateMat(4,1,cv.CV_32FC1)
-    for i in xrange(positions.length()):
-        cv.KalmanPredict(kalman) # no control
-        p = positions[i]
-        v = velocities[i]
-        measurement[0,0] = p.x
-        measurement[1,0] = p.y
-        measurement[2,0] = v.x
-        measurement[3,0] = v.y
-        cv.KalmanCorrect(kalman, measurement)
-        filteredPositions.addPositionXY(kalman.state_post[0,0], kalman.state_post[1,0])
-        filteredVelocities.addPositionXY(kalman.state_post[2,0], kalman.state_post[3,0])
-
-    return (filteredPositions, filteredVelocities)
-
-groundTrajectory = computeGroundTrajectory([features[i] for i in obj.featureNumbers], invh, obj.getTimeInterval())
-(filteredPositions, filteredVelocities) = kalmanFilter(groundTrajectory, obj.getVelocities(), 0.1, 0.1)
-
-#medianTrajectory = computeMedianTrajectory([features[i] for i in obj.featureNumbers], obj.getTimeInterval())
-
-delta = []
-for t in obj.getTimeInterval():
-    p1 = obj.getPositionAtInstant(t)
-    p2 = groundTrajectory[t-obj.getFirstInstant()]
-    delta.append((p1-p2).aslist())
-
-delta = np.median(delta, 0)
-
-translated = moving.Trajectory()
-for t in obj.getTimeInterval():
-    p1 = obj.getPositionAtInstant(t)
-    p1.x -= delta[0]
-    p1.y -= delta[1]
-    translated.addPosition(p1)
-
-plt.clf()
-obj.draw('rx-')
-for fnum in obj.featureNumbers: features[fnum].draw()
-groundTrajectory.draw('bx-')
-filteredPositions.draw('gx-')
-translated.draw('kx-')
-#medianTrajectory.draw('kx-')
-plt.axis('equal')
--- a/python/cvutils.py	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/cvutils.py	Fri Dec 05 12:13:53 2014 -0500
@@ -1,13 +1,19 @@
 #! /usr/bin/env python
 '''Image/Video utilities'''
 
-import Image, ImageDraw # PIL
 try:
     import cv2
-    opencvExists = True
+    opencvAvailable = True
 except ImportError:
-    print('OpenCV library could not be loaded')
-    opencvExists = False
+    print('OpenCV library could not be loaded (video replay functions will not be available)') # TODO change to logging module
+    opencvAvailable = False
+try:
+    import skimage
+    skimageAvailable = True
+except ImportError:
+    print('Scikit-image library could not be loaded (HoG-based classification methods will not be available)')
+    skimageAvailable = False
+    
 from sys import stdout
 
 import utils
@@ -30,8 +36,9 @@
 def saveKey(key):
     return chr(key&255) == 's'
 
-def drawLines(filename, origins, destinations, w = 1, resultFilename='image.png'):
+def plotLines(filename, origins, destinations, w = 1, resultFilename='image.png'):
     '''Draws lines over the image '''
+    import Image, ImageDraw # PIL
     
     img = Image.open(filename)
 
@@ -41,7 +48,7 @@
     for p1, p2 in zip(origins, destinations):
         draw.line([p1.x, p1.y, p2.x, p2.y], width = w, fill = (256,0,0))
         #draw.line([p1.x, p1.y, p2.x, p2.y], pen)
-    del draw 
+    del draw
 
     #out = utils.openCheck(resultFilename)
     img.save(resultFilename)
@@ -63,6 +70,7 @@
 
 def cvMatToArray(cvmat):
     '''Converts an OpenCV CvMat to numpy array.'''
+    print('Deprecated, use new interface')
     from numpy.core.multiarray import zeros
     a = zeros((cvmat.rows, cvmat.cols))#array([[0.0]*cvmat.width]*cvmat.height)
     for i in xrange(cvmat.rows):
@@ -70,32 +78,56 @@
             a[i,j] = cvmat[i,j]
     return a
 
-if opencvExists:
-    def computeHomography(srcPoints, dstPoints, method=0, ransacReprojThreshold=0.0):
+if opencvAvailable:
+    def computeHomography(srcPoints, dstPoints, method=0, ransacReprojThreshold=3.0):
         '''Returns the homography matrix mapping from srcPoints to dstPoints (dimension Nx2)'''
         H, mask = cv2.findHomography(srcPoints, dstPoints, method, ransacReprojThreshold)
         return H
 
-    def arrayToCvMat(a, t = cv2.cv.CV_64FC1):
+    def arrayToCvMat(a, t = cv2.CV_64FC1):
         '''Converts a numpy array to an OpenCV CvMat, with default type CV_64FC1.'''
+        print('Deprecated, use new interface')
         cvmat = cv2.cv.CreateMat(a.shape[0], a.shape[1], t)
         for i in range(cvmat.rows):
             for j in range(cvmat.cols):
                 cvmat[i,j] = a[i,j]
         return cvmat
 
-    def draw(img, positions, color, lastCoordinate = None):
+    def cvPlot(img, positions, color, lastCoordinate = None):
         last = lastCoordinate+1
         if lastCoordinate != None and lastCoordinate >=0:
             last = min(positions.length()-1, lastCoordinate)
         for i in range(0, last-1):
             cv2.line(img, positions[i].asint().astuple(), positions[i+1].asint().astuple(), color)
 
-    def playVideo(filename, firstFrameNum = 0, frameRate = -1):
+    def cvImshow(windowName, img, rescale = 1.0):
+        'Rescales the image (in particular if too large)'
+        from cv2 import resize
+        if rescale != 1.:
+            size = (int(round(img.shape[1]*rescale)), int(round(img.shape[0]*rescale)))
+            resizedImg = resize(img, size)
+            cv2.imshow(windowName, resizedImg)
+        else:
+            cv2.imshow(windowName, img)
+
+    def computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients):
+        from copy import deepcopy
+        from numpy import identity, array
+        newImgSize = (int(round(width*undistortedImageMultiplication)), int(round(height*undistortedImageMultiplication)))
+        newCameraMatrix = deepcopy(intrinsicCameraMatrix)
+        newCameraMatrix[0,2] = newImgSize[0]/2.
+        newCameraMatrix[1,2] = newImgSize[1]/2.
+        return cv2.initUndistortRectifyMap(intrinsicCameraMatrix, array(distortionCoefficients), identity(3), newCameraMatrix, newImgSize, cv2.CV_32FC1)
+
+    def playVideo(filename, firstFrameNum = 0, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1.):
         '''Plays the video'''
+        windowName = 'frame'
+        cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
         wait = 5
         if frameRate > 0:
             wait = int(round(1000./frameRate))
+        if interactive:
+            wait = 0
         capture = cv2.VideoCapture(filename)
         if capture.isOpened():
             key = -1
@@ -105,49 +137,114 @@
             while ret and not quitKey(key):
                 ret, img = capture.read()
                 if ret:
-                    print('frame {0}'.format(frameNum))
+                    if printFrames:
+                        print('frame {0}'.format(frameNum))
                     frameNum+=1
-                    cv2.imshow('frame', img)
+                    if text != None:
+                       cv2.putText(img, text, (10,50), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed) 
+                    cvImshow(windowName, img, rescale)
                     key = cv2.waitKey(wait)
+            cv2.destroyAllWindows()
+        else:
+            print('Video capture for {} failed'.format(filename))
 
-    def getImagesFromVideo(filename, nImages = 1, saveImage = False):
-        '''Returns nImages images from the video sequence'''
+    def getImagesFromVideo(videoFilename, firstFrameNum = 0, nFrames = 1, saveImage = False, outputPrefix = 'image'):
+        '''Returns nFrames images from the video sequence'''
+        from math import floor, log10
         images = []
-        capture = cv2.VideoCapture(filename)
-        if capture.isOpened():        
+        capture = cv2.VideoCapture(videoFilename)
+        if capture.isOpened():
+            nDigits = int(floor(log10(capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))))+1
             ret = False
-            numImg = 0
-            while numImg<nImages:
+            capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum)
+            imgNum = 0
+            while imgNum<nFrames:
                 ret, img = capture.read()
                 i = 0
                 while not ret and i<10:
                     ret, img = capture.read()
                     i += 1
                 if img.size>0:
-                    numImg +=1
                     if saveImage:
-                        cv2.imwrite('image{0:04d}.png'.format(numImg), img)
+                        imgNumStr = format(firstFrameNum+imgNum, '0{}d'.format(nDigits))
+                        cv2.imwrite(outputPrefix+imgNumStr+'.png', img)
                     else:
                         images.append(img)
+                    imgNum +=1
+            capture.release()
+        else:
+            print('Video capture for {} failed'.format(videoFilename))
         return images
+    
+    def getFPS(videoFilename):
+        capture = cv2.VideoCapture(videoFilename)
+        if capture.isOpened():
+            fps = capture.get(cv2.cv.CV_CAP_PROP_FPS)
+            capture.release()
+            return fps
+        else:
+            print('Video capture for {} failed'.format(videoFilename))
+            return None
 
-    def displayTrajectories(videoFilename, objects, homography = None, firstFrameNum = 0, lastFrameNumArg = None):
+    def imageBox(img, obj, frameNum, homography, width, height, px = 0.2, py = 0.2, pixelThreshold = 800):
+        'Computes the bounding box of object at frameNum'
+        x = []
+        y = []
+        for f in obj.features:
+            if f.existsAtInstant(frameNum):
+                projectedPosition = f.getPositionAtInstant(frameNum).project(homography)
+                x.append(projectedPosition.x)
+                y.append(projectedPosition.y)
+        xmin = min(x)
+        xmax = max(x)
+        ymin = min(y)
+        ymax = max(y)
+        xMm = px * (xmax - xmin)
+        yMm = py * (ymax - ymin)
+        a = max(ymax - ymin + (2 * yMm), xmax - (xmin + 2 * xMm))
+        yCropMin = int(max(0, .5 * (ymin + ymax - a)))
+        yCropMax = int(min(height - 1, .5 * (ymin + ymax + a)))
+        xCropMin = int(max(0, .5 * (xmin + xmax - a)))
+        xCropMax = int(min(width - 1, .5 * (xmin + xmax + a)))
+        if yCropMax != yCropMin and xCropMax != xCropMin and (yCropMax - yCropMin) * (xCropMax - xCropMin) > pixelThreshold:
+            croppedImg = img[yCropMin : yCropMax, xCropMin : xCropMax]
+        else:
+            croppedImg = []
+        return croppedImg, yCropMin, yCropMax, xCropMin, xCropMax
+
+
+    def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1.):
         '''Displays the objects overlaid frame by frame over the video '''
+        from moving import userTypeNames
+        from math import ceil, log10
+
         capture = cv2.VideoCapture(videoFilename)
+        width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
+        height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
+
+        windowName = 'frame'
+        #cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
+
+        if undistort: # setup undistortion
+            [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
         if capture.isOpened():
             key = -1
             ret = True
             frameNum = firstFrameNum
             capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum)
-            if not lastFrameNumArg:
+            if lastFrameNumArg == None:
                 from sys import maxint
                 lastFrameNum = maxint
             else:
                 lastFrameNum = lastFrameNumArg
+            nZerosFilename = int(ceil(log10(lastFrameNum)))
             while ret and not quitKey(key) and frameNum < lastFrameNum:
                 ret, img = capture.read()
                 if ret:
-                    print('frame {0}'.format(frameNum))
+                    if undistort:
+                        img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
+                    if printFrames:
+                        print('frame {0}'.format(frameNum))
                     for obj in objects:
                         if obj.existsAtInstant(frameNum):
                             if not hasattr(obj, 'projectedPositions'):
@@ -155,25 +252,168 @@
                                     obj.projectedPositions = obj.positions.project(homography)
                                 else:
                                     obj.projectedPositions = obj.positions
-                            draw(img, obj.projectedPositions, cvRed, frameNum-obj.getFirstInstant())
-                            cv2.putText(img, '{0}'.format(obj.num), obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvRed)
-                    cv2.imshow('frame', img)
-                    key = cv2.waitKey()
-                    if saveKey(key):
-                        cv2.imwrite('image.png', img)
-                    frameNum += 1
-    
+                            cvPlot(img, obj.projectedPositions, cvRed, frameNum-obj.getFirstInstant())
+                            if frameNum in boundingBoxes.keys():
+                                for rect in boundingBoxes[frameNum]:
+                                    cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvRed)
+                            elif len(obj.features) != 0:
+                                imgcrop, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, obj, frameNum, homography, width, height)
+                                cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue, 1)
+                            objDescription = '{} '.format(obj.num)
+                            if userTypeNames[obj.userType] != 'unknown':
+                                objDescription += userTypeNames[obj.userType][0].upper()
+                            cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed)
+                    if not saveAllImages:
+                        cvImshow(windowName, img, rescale)
+                        key = cv2.waitKey()
+                    if saveAllImages or saveKey(key):
+                        cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img)
+                    frameNum += nFramesStep
+                    if nFramesStep > 1:
+                        capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
+            cv2.destroyAllWindows()
+        else:
+            print 'Cannot load file ' + videoFilename
+
+    def computeHomographyFromPDTV(cameraFilename, method=0, ransacReprojThreshold=3.0):
+        '''Returns the homography matrix at ground level from PDTV format
+        https://bitbucket.org/hakanardo/pdtv'''
+        import pdtv
+        from numpy import array
+        camera = pdtv.load(cameraFilename)
+        srcPoints = [[x,y] for x, y in zip([1.,2.,2.,1.],[1.,1.,2.,2.])] # need floats!!
+        dstPoints = []
+        for srcPoint in srcPoints:
+            projected = camera.image_to_world(tuple(srcPoint))
+            dstPoints.append([projected[0], projected[1]])
+        H, mask = cv2.findHomography(array(srcPoints), array(dstPoints), method, ransacReprojThreshold)
+        return H
+
+    def undistortedCoordinates(map1, map2, x, y, maxDistance = 1.):
+        '''Returns the coordinates of a point in undistorted image
+        map1 and map2 are the mapping functions from undistorted image
+        to distorted (original image)
+        map1(x,y) = originalx, originaly'''
+        from numpy import abs, logical_and, unravel_index, dot, sum
+        from matplotlib.mlab import find
+        distx = abs(map1-x)
+        disty = abs(map2-y)
+        indices = logical_and(distx<maxDistance, disty<maxDistance)
+        closeCoordinates = unravel_index(find(indices), distx.shape) # returns i,j, ie y,x
+        xWeights = 1-distx[indices]
+        yWeights = 1-disty[indices]
+        return dot(xWeights, closeCoordinates[1])/sum(xWeights), dot(yWeights, closeCoordinates[0])/sum(yWeights)
+
+    def undistortTrajectoryFromCVMapping(map1, map2, t):
+        '''test 'perfect' inversion'''
+        from moving import Trajectory
+        from numpy import isnan
+        undistortedTrajectory = Trajectory()
+        for i,p in enumerate(t):
+            res = undistortedCoordinates(map1, map2, p.x,p.y)
+            if not isnan(res).any():
+                undistortedTrajectory.addPositionXY(res[0], res[1])
+            else:
+                print i,p,res
+        return undistortedTrajectory
+
+    def computeInverseMapping(originalImageSize, map1, map2):
+        'Computes inverse mapping from maps provided by cv2.initUndistortRectifyMap'
+        from numpy import ones, isnan
+        invMap1 = -ones(originalImageSize)
+        invMap2 = -ones(originalImageSize)
+        for x in range(0,originalImageSize[1]):
+            for y in range(0,originalImageSize[0]):
+                res = undistortedCoordinates(x,y, map1, map2)
+                if not isnan(res).any():
+                    invMap1[y,x] = res[0]
+                    invMap2[y,x] = res[1]
+        return invMap1, invMap2
+
+    def cameraIntrinsicCalibration(path, checkerBoardSize=[6,7], secondPassSearch=False, display=False):
+        ''' Camera calibration searches through all the images (jpg or png) located
+            in _path_ for matches to a checkerboard pattern of size checkboardSize.
+            These images should all be of the same camera with the same resolution.
+
+            For best results, use an asymetric board and ensure that the image has
+            very high contrast, including the background. Suitable checkerboard:
+            http://ftp.isr.ist.utl.pt/pub/roswiki/attachments/camera_calibration(2f)Tutorials(2f)StereoCalibration/check-108.png
+
+            The code below is based off of:
+            https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
+            Modified by Paul St-Aubin
+            '''
+        from numpy import zeros, mgrid, float32, savetxt
+        import glob, os
+
+        # termination criteria
+        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
+
+        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
+        objp = zeros((checkerBoardSize[0]*checkerBoardSize[1],3), float32)
+        objp[:,:2] = mgrid[0:checkerBoardSize[1],0:checkerBoardSize[0]].T.reshape(-1,2)
+
+        # Arrays to store object points and image points from all the images.
+        objpoints = [] # 3d point in real world space
+        imgpoints = [] # 2d points in image plane.
+
+        ## Loop throuhg all images in _path_
+        images = glob.glob(os.path.join(path,'*.[jJ][pP][gG]'))+glob.glob(os.path.join(path,'*.[jJ][pP][eE][gG]'))+glob.glob(os.path.join(path,'*.[pP][nN][gG]'))
+        for fname in images:
+            img = cv2.imread(fname)
+            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+
+            # Find the chess board corners
+            ret, corners = cv2.findChessboardCorners(gray, (checkerBoardSize[1],checkerBoardSize[0]), None)
+
+            # If found, add object points, image points (after refining them)
+            if ret:
+                print 'Found pattern in '+fname
+                
+                if(secondPassSearch): 
+                    corners = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
+
+                objpoints.append(objp)
+                imgpoints.append(corners)
+
+                # Draw and display the corners
+                if(display):
+                    img = cv2.drawChessboardCorners(img, (checkerBoardSize[1],checkerBoardSize[0]), corners, ret)
+                    if(img):
+                        cv2.imshow('img',img)
+                        cv2.waitKey(0)
+
+        ## Close up image loading and calibrate
+        cv2.destroyAllWindows()
+        if len(objpoints) == 0 or len(imgpoints) == 0: 
+            return False
+        try:
+            ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
+        except NameError:
+            return False
+        savetxt('intrinsic-camera.txt', camera_matrix)
+        return camera_matrix, dist_coeffs
+
+    def undistortImage(img, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., interpolation=cv2.INTER_LINEAR):
+        '''Undistorts the image passed in argument'''
+        width = img.shape[1]
+        height = img.shape[0]
+        [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
+        return cv2.remap(img, map1, map2, interpolation=interpolation)
+
+
 def printCvMat(cvmat, out = stdout):
     '''Prints the cvmat to out'''
+    print('Deprecated, use new interface')
     for i in xrange(cvmat.rows):
         for j in xrange(cvmat.cols):
             out.write('{0} '.format(cvmat[i,j]))
         out.write('\n')
 
 def projectArray(homography, points):
-    '''Returns the coordinates of the projected points (format 2xN points)
-    through homography'''
-    from numpy.core._dotblas import dot
+    '''Returns the coordinates of the projected points through homography
+    (format: array 2xN points)'''
+    from numpy.core import dot
     from numpy.core.multiarray import array
     from numpy.lib.function_base import append
 
@@ -185,10 +425,10 @@
         prod = dot(homography, augmentedPoints)
         return prod[0:2]/prod[2]
     else:
-        return p
+        return points
 
 def project(homography, p):
-    '''Returns the coordinates of the projection of the point p
+    '''Returns the coordinates of the projection of the point p with coordinates p[0], p[1]
     through homography'''
     from numpy import array
     return projectArray(homography, array([[p[0]],[p[1]]]))
@@ -201,13 +441,39 @@
     return projectArray(homography, array(trajectory))
 
 def invertHomography(homography):
-    'Returns an inverted homography'
+    '''Returns an inverted homography
+    Unnecessary for reprojection over camera image'''
     from numpy.linalg.linalg import inv
     invH = inv(homography)
     invH /= invH[2,2]
     return invH
 
-if opencvExists:
+def undistortTrajectory(invMap1, invMap2, positions):
+    from numpy import floor, ceil
+    floorPositions = floor(positions)
+    #ceilPositions = ceil(positions)
+    undistortedTrajectory = [[],[]]
+    for i in xrange(len(positions[0])):
+        x,y = None, None
+        if positions[0][i]+1 < invMap1.shape[1] and positions[1][i]+1 < invMap1.shape[0]:
+            floorX = invMap1[floorPositions[1][i], floorPositions[0][i]]
+            floorY = invMap2[floorPositions[1][i], floorPositions[0][i]]
+            ceilX = invMap1[floorPositions[1][i]+1, floorPositions[0][i]+1]
+            ceilY = invMap2[floorPositions[1][i]+1, floorPositions[0][i]+1]
+            #ceilX = invMap1[ceilPositions[1][i], ceilPositions[0][i]]
+            #ceilY = invMap2[ceilPositions[1][i], ceilPositions[0][i]]
+            if floorX >=0 and floorY >=0 and ceilX >=0 and ceilY >=0:
+                x = floorX+(positions[0][i]-floorPositions[0][i])*(ceilX-floorX)
+                y = floorY+(positions[1][i]-floorPositions[1][i])*(ceilY-floorY)
+        undistortedTrajectory[0].append(x)
+        undistortedTrajectory[1].append(y)
+    return undistortedTrajectory
+
+def projectGInputPoints(homography, points):
+    from numpy import array
+    return projectTrajectory(homography, array(points+[points[0]]).T)
+
+if opencvAvailable:
     def computeTranslation(img1, img2, img1Points, maxTranslation2, minNMatches, windowSize = (5,5), level = 5, criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)):
         '''Computes the translation of img2 with respect to img1
         (loaded using OpenCV as numpy arrays)
@@ -233,3 +499,38 @@
         else:
             print(dp)
             return None
+
+if skimageAvailable:
+    def HOG(image, rescaleSize = (64, 64), orientations=9, pixelsPerCell=(8, 8), cellsPerBlock=(2, 2), visualize=False, normalize=False):
+        from skimage.feature import hog
+        from skimage import color, transform
+
+        bwImg = color.rgb2gray(image)
+        inputImg = transform.resize(bwImg, rescaleSize)
+        features = hog(inputImg, orientations, pixelsPerCell, cellsPerBlock, visualize, normalize)
+        if visualize:
+            from matplotlib.pyplot import imshow, figure, subplot
+            hogViz = features[1]
+            features = features[0]
+            figure()
+            subplot(1,2,1)
+            imshow(img)
+            subplot(1,2,2)
+            imshow(hogViz)
+        return features
+
+    def createHOGTrainingSet(imageDirectory, classLabel, rescaleSize = (64, 64), orientations=9, pixelsPerCell=(8, 8), cellsPerBlock=(2, 2), visualize=False, normalize=False):
+        from os import listdir
+        from numpy import array, float32
+        from matplotlib.pyplot import imread
+
+        inputData = []
+        for filename in listdir(imageDirectory):
+            img = imread(imageDirectory+filename)
+            features = HOG(img, rescaleSize, orientations, pixelsPerCell, cellsPerBlock, visualize, normalize)
+            inputData.append(features)
+
+        nImages = len(inputData)
+        return array(inputData, dtype = float32), array([classLabel]*nImages, dtype = float32)
+
+        
--- a/python/delete-object-tables.py	Thu Apr 18 15:29:33 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,15 +0,0 @@
-#! /usr/bin/env python
-
-import sys,getopt
-
-import utils
-import storage
-
-options, args = getopt.getopt(sys.argv[1:], 'h',['help'])
-options = dict(options)
-
-if '--help' in options.keys() or '-h' in options.keys() or len(args) == 0:
-    print('Usage: {0} --help|-h <database-filename.sqlite>'.format(sys.argv[0]))
-    sys.exit()
-
-storage.removeObjectsFromSqlite(args[0])
--- a/python/display-trajectories.py	Thu Apr 18 15:29:33 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-#! /usr/bin/env python
-
-import sys,getopt
-
-import storage
-import cvutils
-from utils import FakeSecHead
-
-from numpy.linalg.linalg import inv
-from numpy import loadtxt
-from ConfigParser import ConfigParser
-
-options, args = getopt.getopt(sys.argv[1:], 'hi:d:t:o:f:',['help']) 
-# alternative long names are a pain to support ,'video-filename=','database-filename=', 'type='
-
-options = dict(options)
-
-print options, args
-
-if '--help' in options.keys() or '-h' in options.keys() or len(sys.argv) == 1:
-    print('Usage: '+sys.argv[0]+' --help|-h -i video-filename -d database-filename [-t object_type] [-o image2world_homography] [-f first_frame]\n'
-          +'Or   : '+sys.argv[0]+' [-t object_type] config_file.cfg\n\n'
-          'Order matters between positional and named arguments\n'
-          'object_type can be feature or object')
-    sys.exit()
-
-objectType = 'feature'
-if '-t' in options.keys():
-    objectType = options['-t']
-
-if len(args)>0: # consider there is a configuration file
-    config = ConfigParser()
-    config.readfp(FakeSecHead(open(args[0])))
-    sectionHeader = config.sections()[0]
-    videoFilename = config.get(sectionHeader, 'video-filename')
-    databaseFilename = config.get(sectionHeader, 'database-filename')
-    homography = inv(loadtxt(config.get(sectionHeader, 'homography-filename')))
-    firstFrameNum = config.getint(sectionHeader, 'frame1')
-else:
-    videoFilename = options['-i']
-    databaseFilename = options['-d']
-    homography = None
-    if '-o' in options.keys():
-        homography = inv(loadtxt(options['-o']))            
-    firstFrameNum = 0
-    if '-f' in options.keys():
-        firstFrameNum = int(options['-f'])
-
-objects = storage.loadTrajectoriesFromSqlite(databaseFilename, objectType)
-cvutils.displayTrajectories(videoFilename, objects, homography, firstFrameNum)
--- a/python/events.py	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/events.py	Fri Dec 05 12:13:53 2014 -0500
@@ -8,9 +8,7 @@
 import multiprocessing
 import itertools
 
-import moving
-import prediction
-import indicators
+import moving, prediction, indicators, utils
 
 __metaclass__ = type
 
@@ -22,75 +20,205 @@
     contains the indicators in a dictionary with the names as keys
     '''
 
-    categories = {'headon': 0,
+    categories = {'Head On': 0,
                   'rearend': 1,
                   'side': 2,
                   'parallel': 3}
 
-    def __init__(self, num = None, timeInterval = None, roaduserNum1 = None, roaduserNum2 = None, movingObject1 = None, movingObject2 = None, categoryNum = None):
+    indicatorNames = ['Collision Course Dot Product',
+                      'Collision Course Angle',
+                      'Distance',
+                      'Minimum Distance',
+                      'Velocity Angle',
+                      'Speed Differential',
+                      'Collision Probability',
+                      'Time to Collision',
+                      'Probability of Successful Evasive Action',
+                      'predicted Post Encroachment Time']
+
+    indicatorNameToIndices = utils.inverseEnumeration(indicatorNames)
+
+    indicatorShortNames = ['CCDP',
+                           'CCA',
+                           'Dist',
+                           'MinDist',
+                           'VA',
+                           'SD',
+                           'PoC',
+                           'TTC',
+                           'P(SEA)',
+                           'pPET']
+
+    indicatorUnits = ['',
+                      'rad',
+                      'm',
+                      'm',
+                      'rad',
+                      'm/s',
+                      '',
+                      's',
+                      '',
+                      '']
+
+    def __init__(self, num = None, timeInterval = None, roaduserNum1 = None, roaduserNum2 = None, roadUser1 = None, roadUser2 = None, categoryNum = None):
         moving.STObject.__init__(self, num, timeInterval)
-        self.roaduserNumbers = set([roaduserNum1, roaduserNum2])
-        self.movingObject1 = movingObject1
-        self.movingObject2 = movingObject2
+        if timeInterval == None and roadUser1 != None and roadUser2 != None:
+            self.timeInterval = roadUser1.commonTimeInterval(roadUser2)
+        self.roadUser1 = roadUser1
+        self.roadUser2 = roadUser2
+        if roaduserNum1 != None and roaduserNum2 != None:
+            self.roadUserNumbers = set([roaduserNum1, roaduserNum2])
+        elif roadUser1 != None and roadUser2 != None:
+            self.roadUserNumbers = set(roadUser1.getNum(), roadUser2.getNum())
+        else:
+            self.roadUserNumbers = None
         self.categoryNum = categoryNum
         self.indicators = {}
+        self.interactionInterval = None
+
+    def getRoadUserNumbers(self):
+        return self.roadUserNumbers
 
     def getIndicator(self, indicatorName):
-        return self.indicators[indicatorName]
+        return self.indicators.get(indicatorName, None)
 
     def addIndicator(self, indicator):
-        self.indicators[indicator.name] = indicator
+        if indicator:
+            self.indicators[indicator.name] = indicator
 
     def computeIndicators(self):
         '''Computes the collision course cosine only if the cosine is positive'''
         collisionCourseDotProducts = {}#[0]*int(self.timeInterval.length())
-        collisionCourseCosines = {}
+        collisionCourseAngles = {}
+        velocityAngles = {}
         distances = {}#[0]*int(self.timeInterval.length())
         speedDifferentials = {}
+        interactionInstants = []
         for instant in self.timeInterval:
-            deltap = self.movingObject1.getPositionAtInstant(instant)-self.movingObject2.getPositionAtInstant(instant)
-            deltav = self.movingObject2.getVelocityAtInstant(instant)-self.movingObject1.getVelocityAtInstant(instant)
+            deltap = self.roadUser1.getPositionAtInstant(instant)-self.roadUser2.getPositionAtInstant(instant)
+            v1 = self.roadUser1.getVelocityAtInstant(instant)
+            v2 = self.roadUser2.getVelocityAtInstant(instant)
+            deltav = v2-v1
+            velocityAngles[instant] = arccos(moving.Point.dot(v1, v2)/(v1.norm2()*v2.norm2()))
             collisionCourseDotProducts[instant] = moving.Point.dot(deltap, deltav)
-            distances[instant] = deltap.norm2() # todo compute closest feature distance, if features
+            distances[instant] = deltap.norm2()
             speedDifferentials[instant] = deltav.norm2()
             if collisionCourseDotProducts[instant] > 0:
-                collisionCourseCosines[instant] = arccos(collisionCourseDotProducts[instant]/(distances[instant]*speedDifferentials[instant]))
+                interactionInstants.append(instant)
+            collisionCourseAngles[instant] = arccos(collisionCourseDotProducts[instant]/(distances[instant]*speedDifferentials[instant]))
+
+        if len(interactionInstants) >= 2:
+            self.interactionInterval = moving.TimeInterval(interactionInstants[0], interactionInstants[-1])
+        else:
+            self.interactionInterval = moving.TimeInterval()
+        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[0], collisionCourseDotProducts))
+        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[1], collisionCourseAngles))
+        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[2], distances))
+        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[4], velocityAngles))
+        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[5], speedDifferentials))
+
+        # if we have features, compute other indicators
+        if len(self.roadUser1.features) != 0 and len(self.roadUser2.features) != 0:
+            minDistance={}
+            for instant in self.timeInterval:
+                minDistance[instant] = moving.MovingObject.minDistance(self.roadUser1, self.roadUser2, instant)
+            self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[3], minDistance))
 
-        # todo shorten the time intervals based on the interaction definition
-        self.addIndicator(indicators.SeverityIndicator('Collision Course Dot Product', collisionCourseDotProducts))
-        self.addIndicator(indicators.SeverityIndicator('Distance', distances))
-        self.addIndicator(indicators.SeverityIndicator('Speed Differential', speedDifferentials))
-        self.addIndicator(indicators.SeverityIndicator('Collision Course Cosine', collisionCourseCosines))
+    def computeCrossingsCollisions(self, predictionParameters, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, timeInterval = None, nProcesses = 1):
+        '''Computes all crossing and collision points at each common instant for two road users. '''
+        self.collisionPoints={}
+        self.crossingZones={}
+        TTCs = {}
 
-        # todo test for interaction instants and interval, compute indicators
+        if timeInterval:
+            commonTimeInterval = timeInterval
+        else:
+            commonTimeInterval = self.timeInterval
+        self.collisionPoints, self.crossingZones = prediction.computeCrossingsCollisions(predictionParameters, self.roadUser1, self.roadUser2, collisionDistanceThreshold, timeHorizon, computeCZ, debug, commonTimeInterval, nProcesses)
+        for i, cp in self.collisionPoints.iteritems():
+            TTCs[i] = prediction.SafetyPoint.computeExpectedIndicator(cp)
+        # add probability of collision, and probability of successful evasive action
+        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[7], TTCs))
+        
+        if computeCZ:
+            pPETs = {}
+            for i in list(commonTimeInterval)[:-1]:
+                if len(self.crossingZones[i]) > 0:
+                    pPETs[i] = prediction.SafetyPoint.computeExpectedIndicator(self.crossingZones[i])
+            self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[9], pPETs))
 
     def addVideoFilename(self,videoFilename):
         self.videoFilename= videoFilename	
 
     def addInteractionType(self,interactionType):
-	''' interaction types: conflict or collision if they are known'''
-        self.interactionType= interactionType			
+        ''' interaction types: conflict or collision if they are known'''
+        self.interactionType= interactionType
 
-def createInteractions(objects):
-    '''Create all interactions of two co-existing road users
+def createInteractions(objects, _others = None):
+    '''Create all interactions of two co-existing road users'''
+    if _others != None:
+        others = _others
 
-    todo add test to compute categories?'''
     interactions = []
     num = 0
     for i in xrange(len(objects)):
-        for j in xrange(i):
-            commonTimeInterval = objects[i].commonTimeInterval(objects[j])
+        if _others == None:
+            others = objects[:i]
+        for j in xrange(len(others)):
+            commonTimeInterval = objects[i].commonTimeInterval(others[j])
             if not commonTimeInterval.empty():
-                interactions.append(Interaction(num, commonTimeInterval, objects[i].num, objects[j].num, objects[i], objects[j]))
+                interactions.append(Interaction(num, commonTimeInterval, objects[i].num, others[j].num, objects[i], others[j]))
                 num += 1
     return interactions
 
+def prototypeCluster(interactions, similarityMatrix, alignmentMatrix, indicatorName, minSimilarity):
+    '''Finds exemplar indicator time series for all interactions
+    Returns the prototype indices (in the interaction list) and the label of each indicator (interaction)
+
+    if an indicator profile (time series) is different enough (<minSimilarity), 
+    it will become a new prototype. 
+    Non-prototype interactions will be assigned to an existing prototype'''
+
+    # sort indicators based on length
+    indices = range(similarityMatrix.shape[0])
+    def compare(i, j):
+        if len(interactions[i].getIndicator(indicatorName)) > len(interactions[j].getIndicator(indicatorName)):
+            return -1
+        elif len(interactions[i].getIndicator(indicatorName)) == len(interactions[j].getIndicator(indicatorName)):
+            return 0
+        else:
+            return 1
+    indices.sort(compare)
+    # go through all indicators
+    prototypeIndices = [indices[0]]
+    for i in indices[1:]:
+        if similarityMatrix[i][prototypeIndices].max() < minSimilarity:
+             prototypeIndices.append(i)
+
+    # assignment
+    labels = [-1]*similarityMatrix.shape[0]
+    indices = [i for i in range(similarityMatrix.shape[0]) if i not in prototypeIndices]
+    for i in prototypeIndices:
+        labels[i] = i
+    for i in indices:
+        prototypeIndex = similarityMatrix[i][prototypeIndices].argmax()
+        labels[i] = prototypeIndices[prototypeIndex]
+
+    return prototypeIndices, labels
+
+def prototypeMultivariateCluster(interactions, similarityMatrics, indicatorNames, minSimilarities, minClusterSize):
+    '''Finds exmaple indicator time series (several indicators) for all interactions
+
+    if any interaction indicator time series is different enough (<minSimilarity),
+    it will become a new prototype. 
+    Non-prototype interactions will be assigned to an existing prototype if all indicators are similar enough'''
+    pass
 
 # TODO:
 #http://stackoverflow.com/questions/3288595/multiprocessing-using-pool-map-on-a-function-defined-in-a-class
 #http://www.rueckstiess.net/research/snippets/show/ca1d7d90
 def calculateIndicatorPipe(pairs, predParam, timeHorizon=75,collisionDistanceThreshold=1.8):  
-    collisionPoints, crossingZones = prediction.computeCrossingsCollisions(pairs.movingObject1, pairs.movingObject2, predParam, collisionDistanceThreshold, timeHorizon)      
+    collisionPoints, crossingZones = prediction.computeCrossingsCollisions(pairs.roadUser1, pairs.roadUser2, predParam, collisionDistanceThreshold, timeHorizon)      
     #print pairs.num    
     # Ignore empty collision points
     empty = 1
@@ -137,7 +265,7 @@
             #prog = Tools.ProgressBar(0, len(self.pairs), 77) #Removed in traffic-intelligenc port
             for j in xrange(len(self.pairs)):
                 #prog.updateAmount(j) #Removed in traffic-intelligenc port
-                collisionPoints, crossingZones = prediction.computeCrossingsCollisions(self.pairs[j].movingObject1, self.pairs[j].movingObject2, predParam, collisionDistanceThreshold, timeHorizon)      
+                collisionPoints, crossingZones = prediction.computeCrossingsCollisions(self.pairs[j].roadUser1, self.pairs[j].roadUser2, predParam, collisionDistanceThreshold, timeHorizon)      
                 
                 # Ignore empty collision points
                 empty = 1
@@ -181,7 +309,7 @@
                 lists.append(j.num)
         return lists
     
-    def getCPlist(self,indicatorThreshold=99999):
+    def getCPlist(self,indicatorThreshold=float('Inf')):
         lists = []
         for j in self.pairs:
             if(j.hasCP):
@@ -190,7 +318,7 @@
                         lists.append([k,j.CP[k][0]])
         return lists
      
-    def getCZlist(self,indicatorThreshold=99999):
+    def getCZlist(self,indicatorThreshold=float('Inf')):
         lists = []
         for j in self.pairs:
             if(j.hasCZ):
@@ -229,7 +357,7 @@
 if __name__ == "__main__":
     import doctest
     import unittest
-    #suite = doctest.DocFileSuite('tests/moving.txt')
-    suite = doctest.DocTestSuite()
+    suite = doctest.DocFileSuite('tests/events.txt')
+    #suite = doctest.DocTestSuite()
     unittest.TextTestRunner().run(suite)
     
--- a/python/indicators.py	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/indicators.py	Fri Dec 05 12:13:53 2014 -0500
@@ -18,7 +18,6 @@
     
     def __init__(self, name, values, timeInterval=None, maxValue = None):
         self.name = name
-        self.isCosine = (name.find('Cosine') >= 0)
         if timeInterval:
             assert len(values) == timeInterval.length()
             self.timeInterval = timeInterval
@@ -69,52 +68,70 @@
     def getTimeInterval(self):
         return self.timeInterval
 
+    def getName(self):
+        return self.name
+
     def getValues(self):
         return [self.__getitem__(t) for t in self.timeInterval]
 
-    def getAngleValues(self):
-        '''if the indicator is a function of an angle, 
-        transform it to an angle (eg cos)
-        (no transformation otherwise)'''
-        from numpy import arccos
-        values = self.getValues()
-        if self.isCosine:
-            return [arccos(c) for c in values]
-        else: 
-            return values
-
-    def plot(self, options = '', xfactor = 1., **kwargs):
+    def plot(self, options = '', xfactor = 1., yfactor = 1., timeShift = 0, **kwargs):
         from matplotlib.pylab import plot,ylim
         if self.getTimeInterval().length() == 1:
             marker = 'o'
         else:
             marker = ''
         time = sorted(self.values.keys())
-        plot([x/xfactor for x in time], [self.values[i] for i in time], options+marker, **kwargs)
+        plot([(x+timeShift)/xfactor for x in time], [self.values[i]/yfactor for i in time], options+marker, **kwargs)
         if self.maxValue:
             ylim(ymax = self.maxValue)
+	
+    def valueSorted(self):
+	''' return the values after sort the keys in the indicator
+        This should probably not be used: to delete''' 
+        print('Deprecated: values should not be accessed in this way')
+        values=[]
+        keys = self.values.keys()
+        keys.sort()
+        for key in keys:
+            values.append(self.values[key]) 
+        return values
 
-def computeDLCSS(indicator1, indicator2, threshold, delta = float('inf'), method= 'min'):
-    ''' compute the distance between two indicators using LCSS
-    two common methods are used: min or mean of the indicators length'''
-    from utils import LCSS
+
+def l1Distance(x, y): # lambda x,y:abs(x-y)
+    if x == None or y == None:
+        return float('inf')
+    else:
+        return abs(x-y)
+
+from utils import LCSS as utilsLCSS
 
-    def distance(x, y): # lambda x,y:abs(x-y)
-        if x == None or y == None:
-            return float('inf')
-        else: 
-            return abs(x-y)
+class LCSS(utilsLCSS):
+    '''Adapted LCSS class for indicators, same pattern'''
+    def __init__(self, similarityFunc, delta = float('inf'), minLength = 0, aligned = False, lengthFunc = min):
+        utilsLCSS.__init__(self, similarityFunc, delta, aligned, lengthFunc)
+        self.minLength = minLength
+
+    def checkIndicator(self, indicator):
+        return indicator != None and len(indicator) >= self.minLength
 
-    lcss = LCSS(indicator1.getValues(), indicator2.getValues(), threshold, distance, delta)
-    if method == 'min':
-        denominator = min(len(indicator1), len(indicator2))
-    elif method == 'mean':
-        denominator = float(len(indicator1) + len(indicator2))/2
-    else:
-        print('Unknown denominator method name')
-        denominator = 1.
-    return 1-float(lcss)/denominator
+    def compute(self, indicator1, indicator2, computeSubSequence = False):
+        if self.checkIndicator(indicator1) and self.checkIndicator(indicator2):
+            return self._compute(indicator1.getValues(), indicator2.getValues(), computeSubSequence)
+        else:
+            return 0
 
+    def computeNormalized(self, indicator1, indicator2, computeSubSequence = False):
+        if self.checkIndicator(indicator1) and self.checkIndicator(indicator2):
+            return self._computeNormalized(indicator1.getValues(), indicator2.getValues(), computeSubSequence)
+        else:
+            return 0.
+
+    def computeDistance(self, indicator1, indicator2, computeSubSequence = False):
+        if self.checkIndicator(indicator1) and self.checkIndicator(indicator2):
+            return self._computeDistance(indicator1.getValues(), indicator2.getValues(), computeSubSequence)
+        else:
+            return 1.
+        
 class SeverityIndicator(TemporalIndicator):
     '''Class for severity indicators 
     field mostSevereIsMax is True 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/python/metadata.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,170 @@
+# from moving import Point
+
+from datetime import datetime
+from os import path
+
+from sqlalchemy import create_engine, Column, Integer, Float, DateTime, String, ForeignKey
+from sqlalchemy.orm import relationship, backref, sessionmaker
+from sqlalchemy.ext.declarative import declarative_base
+
+from utils import datetimeFormat
+
+Base = declarative_base()
+
+class Site(Base):
+    __tablename__ = 'sites'
+    idx = Column(Integer, primary_key=True)
+    name = Column(String) # same as path, relative to the database position
+    description = Column(String) # longer names, eg intersection of road1 and road2
+    xcoordinate = Column(Float)  # ideally moving.Point, but needs to be 
+    ycoordinate = Column(Float)
+    
+    def __init__(self, name, description = "", xcoordinate = None, ycoordinate = None):
+        self.name = name
+        self.description = description
+        self.xcoordinate = xcoordinate
+        self.ycoordinate = ycoordinate
+
+    def getFilename(self):
+        return self.name
+
+class EnvironementalFactors(Base):
+    '''Represents any environmental factors that may affect the results, in particular
+    * changing weather conditions
+    * changing road configuration, geometry, signalization, etc.
+    ex: sunny, rainy, before counter-measure, after counter-measure'''
+    __tablename__ = 'environmental_factors'
+    idx = Column(Integer, primary_key=True)
+    startTime = Column(DateTime)
+    endTime = Column(DateTime)
+    description = Column(String) # eg sunny, before, after
+    siteIdx = Column(Integer, ForeignKey('sites.idx'))
+
+    site = relationship("Site", backref=backref('environmental_factors', order_by = idx))
+
+    def __init__(self, startTime, endTime, description, site):
+        'startTime is passed as string in utils.datetimeFormat, eg 2011-06-22 10:00:39'
+        self.startTime = datetime.strptime(startTime, datetimeFormat)
+        self.endTime = datetime.strptime(endTime, datetimeFormat)
+        self.description = description
+        self.site = site
+
+class CameraView(Base):
+    __tablename__ = 'camera_views'
+    idx = Column(Integer, primary_key=True)
+    frameRate = Column(Float)
+    homographyFilename = Column(String) # path to homograph filename, relative to the site name
+    cameraCalibrationFilename = Column(String) # path to full camera calibration, relative to the site name
+    siteIdx = Column(Integer, ForeignKey('sites.idx'))
+    homographyDistanceUnit = Column(String, default = 'm') # make sure it is default in the database
+    configurationFilename = Column(String) # path to configuration .cfg file, relative to site name
+
+    site = relationship("Site", backref=backref('camera_views', order_by = idx))
+
+    def __init__(self, frameRate, homographyFilename, cameraCalibrationFilename, site, configurationFilename):
+        self.frameRate = frameRate
+        self.homographyFilename = homographyFilename
+        self.site = site
+        self.configurationFilename = configurationFilename
+
+    def getHomographyFilename(self, relativeToSiteFilename = True):
+        if relativeToSiteFilename:
+            return self.site.getFilename()+path.sep+self.homographyFilename
+        else:
+            return self.homographyFilename
+
+class Alignment(Base):
+    __tablename__ = 'alignments'
+    idx = Column(Integer, primary_key=True)
+    cameraViewIdx = Column(Integer, ForeignKey('camera_views.idx'))
+    
+    cameraView = relationship("CameraView", backref=backref('alignments', order_by = idx))
+
+    def __init__(self, cameraView):
+        self.cameraView = cameraView
+
+class Point(Base):
+    __tablename__ = 'points'
+    alignmentIdx = Column(Integer, ForeignKey('alignments.idx'), primary_key=True)
+    index = Column(Integer, primary_key=True) # order of points in this alignment
+    x = Column(Float)
+    y = Column(Float)
+
+    alignment = relationship("Alignment", backref=backref('points', order_by = index))
+    
+    def __init__(self, alignmentIdx, index, x, y):
+        self.alignmentIdx = alignmentIdx
+        self.index = index
+        self.x = x
+        self.y = y
+
+class VideoSequence(Base):
+    __tablename__ = 'video_sequences'
+    idx = Column(Integer, primary_key=True)
+    name = Column(String) # path relative to the the site name
+    startTime = Column(DateTime)
+    duration = Column(Float) # video sequence duration
+    durationUnit = Column(String, default = 's')
+    siteIdx = Column(Integer, ForeignKey('sites.idx'))
+    cameraViewIdx = Column(Integer, ForeignKey('camera_views.idx'))
+    configurationFilename = Column(String)
+
+    site = relationship("Site", backref=backref('video_sequences', order_by = idx))
+    cameraView = relationship("CameraView", backref=backref('video_sequences', order_by = idx))
+
+    def __init__(self, name, startTime, duration, site, cameraView, configurationFilename = None):
+        'startTime is passed as string in utils.datetimeFormat, eg 2011-06-22 10:00:39'
+        self.name = name
+        self.startTime = datetime.strptime(startTime, datetimeFormat)
+        self.duration = duration
+        self.site = site
+        self.cameraView = cameraView
+        self.configurationFilename = configurationFilename
+
+    def getVideoSequenceFilename(self, relativeToSiteFilename = True):
+        if relativeToSiteFilename:
+            return self.site.getFilename()+path.sep+self.name
+        else:
+            return self.name
+
+        #def getConfigurationFilename(self):
+        #'returns the local configuration filename, or the one of the camera view otherwise'
+
+# add class for Analysis: foreign key VideoSequenceId, dataFilename, configFilename (get the one from camera view by default), mask? (no, can be referenced in the tracking cfg file)
+
+# class SiteDescription(Base): # list of lines and polygons describing the site, eg for sidewalks, center lines
+
+# class Analysis(Base): # parameters necessary for processing the data: free form
+# eg bounding box depends on camera view, tracking configuration depends on camera view 
+# results: sqlite
+
+def createDatabase(filename):
+    'creates a session to query the filename'
+    engine = create_engine('sqlite:///'+filename)
+    Base.metadata.create_all(engine)
+    Session = sessionmaker(bind=engine)
+    return Session()
+
+def connectDatabase(filename):
+    'creates a session to query the filename'
+    engine = create_engine('sqlite:///'+filename)
+    Session = sessionmaker(bind=engine)
+    return Session()
+
+def initializeSites(session, directoryName):
+    '''Initializes default site objects and Camera Views
+    
+    eg somedirectory/montreal/ contains intersection1, intersection2, etc.
+    The site names would be somedirectory/montreal/intersection1, somedirectory/montreal/intersection2, etc.'''
+    from os import listdir, path
+    sites = []
+    cameraViews = []
+    names = listdir(directoryName)
+    for name in names:
+        if path.isdir(directoryName+'/'+name):
+            sites.append(Site(directoryName+'/'+name, None))
+            cameraViews.append(CameraView(-1, None, None, sites[-1], None))
+    session.add_all(sites)
+    session.add_all(cameraViews)
+    session.commit()
+# TODO crawler for video files?
--- a/python/ml.py	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/ml.py	Fri Dec 05 12:13:53 2014 -0500
@@ -5,6 +5,29 @@
 
 __metaclass__ = type
 
+class Model(object):
+    '''Abstract class for loading/saving model'''    
+    def load(self, fn):
+        self.model.load(fn)
+
+    def save(self, fn):
+        self.model.save(fn)
+
+class SVM(Model):
+    '''wrapper for OpenCV SimpleVectorMachine algorithm'''
+
+    def __init__(self, svm_type, kernel_type, degree = 0, gamma = 1, coef0 = 0, Cvalue = 1, nu = 0, p = 0):
+        import cv2
+        self.model = cv2.SVM()
+        self.params = dict(svm_type = svm_type, kernel_type = kernel_type, degree = degree, gamma = gamma, coef0 = coef0, Cvalue = Cvalue, nu = nu, p = p)
+
+    def train(self, samples, responses):
+        self.model.train(samples, responses, params = self.params)
+
+    def predict(self, samples):
+        return np.float32([self.model.predict(s) for s in samples])
+
+
 class Centroid:
     'Wrapper around instances to add a counter'
 
@@ -25,13 +48,17 @@
         inst.multiply(1/(self.nInstances+instance.nInstances))
         return Centroid(inst, self.nInstances+instance.nInstances)
 
-    def draw(self, options = ''):
+    def plot(self, options = ''):
         from matplotlib.pylab import text
-        self.instance.draw(options)
+        self.instance.plot(options)
         text(self.instance.position.x+1, self.instance.position.y+1, str(self.nInstances))
 
+def kMedoids(similarityMatrix, initialCentroids = None, k = None):
+    '''Algorithm that clusters any dataset based on a similarity matrix
+    Either the initialCentroids or k are passed'''
+    pass
 
-def clustering(data, similar, initialCentroids = []):
+def assignCluster(data, similarFunc, initialCentroids = None, shuffleData = True):
     '''k-means algorithm with similarity function
     Two instances should be in the same cluster if the sameCluster function returns true for two instances. It is supposed that the average centroid of a set of instances can be computed, using the function. 
     The number of clusters will be determined accordingly
@@ -42,14 +69,15 @@
     from random import shuffle
     from copy import copy, deepcopy
     localdata = copy(data) # shallow copy to avoid modifying data
-    shuffle(localdata)
-    if initialCentroids:
+    if shuffleData:
+        shuffle(localdata)
+    if initialCentroids == None:
+        centroids = [Centroid(localdata[0])]
+    else:
         centroids = deepcopy(initialCentroids)
-    else:
-        centroids = [Centroid(localdata[0])]
     for instance in localdata[1:]:
         i = 0
-        while i<len(centroids) and not similar(centroids[i].instance, instance):
+        while i<len(centroids) and not similarFunc(centroids[i].instance, instance):
             i += 1
         if i == len(centroids):
             centroids.append(Centroid(instance))
@@ -58,6 +86,8 @@
 
     return centroids
 
+# TODO recompute centroids for each cluster: instance that minimizes some measure to all other elements
+
 def spectralClustering(similarityMatrix, k, iter=20):
 	'''Spectral Clustering algorithm'''
 	n = len(similarityMatrix)
@@ -77,3 +107,14 @@
 	centroids,distortion = kmeans(features,k, iter)
 	code,distance = vq(features,centroids) # code starting from 0 (represent first cluster) to k-1 (last cluster)
 	return code,sigma	
+
+def motionPatterLearning(objects, maxDistance):
+    ''' 
+    Option to use only the (n?) longest features per object instead of all for speed up
+    TODO'''
+    pass
+
+def prototypeCluster():
+    ''' 
+    TODO'''
+    pass
--- a/python/moving.py	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/moving.py	Fri Dec 05 12:13:53 2014 -0500
@@ -5,8 +5,15 @@
 import cvutils
 
 from math import sqrt
+from numpy import median
 
-#from shapely.geometry import Polygon
+try:
+    from shapely.geometry import Polygon, Point as shapelyPoint
+    from shapely.prepared import prep
+    shapelyAvailable = True
+except ImportError:
+    print('Shapely library could not be loaded')
+    shapelyAvailable = False
 
 __metaclass__ = type
 
@@ -29,6 +36,9 @@
     def empty(self):
         return self.first > self.last
 
+    def center(self):
+        return (self.first+self.last)/2.
+
     def length(self):
         '''Returns the length of the interval'''
         return float(max(0,self.last-self.first))
@@ -90,7 +100,11 @@
 
     def __getitem__(self, i):
         if not self.empty():
-            return self.first+i
+            if isinstance(i, int):
+                return self.first+i
+            else:
+                raise TypeError, "Invalid argument type."
+            #elif isinstance( key, slice ):
 
     def __iter__(self):
         self.iterInstantNum = -1
@@ -128,7 +142,7 @@
     def empty(self):
         return self.timeInterval.empty() or not self.boudingPolygon
 
-    def getId(self):
+    def getNum(self):
         return self.num
 
     def getFirstInstant(self):
@@ -163,16 +177,31 @@
     def __sub__(self, other):
         return Point(self.x-other.x, self.y-other.y)
 
+    def __neg__(self):
+        return Point(-self.x, -self.y)
+
+    def __getitem__(self, i):
+        if i == 0:
+            return self.x
+        elif i == 1:
+            return self.y
+        else:
+            raise IndexError()
+    
+    def orthogonal(self):
+        return Point(self.y, -self.x)
+
     def multiply(self, alpha):
+        'Warning, returns a new Point'
         return Point(self.x*alpha, self.y*alpha)
 
-    def draw(self, options = 'o', **kwargs):
+    def plot(self, options = 'o', **kwargs):
         from matplotlib.pylab import plot
         plot([self.x], [self.y], options, **kwargs)
 
     def norm2Squared(self):
         '''2-norm distance (Euclidean distance)'''
-        return self.x*self.x+self.y*self.y
+        return self.x**2+self.y**2
 
     def norm2(self):
         '''2-norm distance (Euclidean distance)'''
@@ -193,36 +222,44 @@
     def asint(self):
         return Point(int(self.x), int(self.y))
 
+    if shapelyAvailable:
+        def asShapely(self):
+            return shapelyPoint(self.x, self.y)
+
     def project(self, homography):
         from numpy.core.multiarray import array
         projected = cvutils.projectArray(homography, array([[self.x], [self.y]]))
         return Point(projected[0], projected[1])
 
-    def inPolygon(self, poly):
-        '''Returns if the point x, y is inside the polygon.
-        The polygon is defined by the ordered list of points in poly
-        
+    def inPolygonNoShapely(self, polygon):
+        '''Indicates if the point x, y is inside the polygon
+        (array of Nx2 coordinates of the polygon vertices)
+
         taken from http://www.ariel.com.au/a/python-point-int-poly.html
 
-        Use points_inside_poly from matplotlib.nxutils'''
+        Use Polygon.contains if Shapely is installed'''
 
-        n = len(poly);
+        n = polygon.shape[0];
         counter = 0;
 
-        p1 = poly[0];
+        p1 = polygon[0,:];
         for i in range(n+1):
-            p2 = poly[i % n];
-            if self.y > min(p1.y,p2.y):
-                if self.y <= max(p1.y,p2.y):
-                    if self.x <= max(p1.x,p2.x):
-                        if p1.y != p2.y:
-                            xinters = (self.y-p1.y)*(p2.x-p1.x)/(p2.y-p1.y)+p1.x;
-                        if p1.x == p2.x or self.x <= xinters:
+            p2 = polygon[i % n,:];
+            if self.y > min(p1[1],p2[1]):
+                if self.y <= max(p1[1],p2[1]):
+                    if self.x <= max(p1[0],p2[0]):
+                        if p1[1] != p2[1]:
+                            xinters = (self.y-p1[1])*(p2[0]-p1[0])/(p2[1]-p1[1])+p1[0];
+                        if p1[0] == p2[0] or self.x <= xinters:
                             counter+=1;
             p1=p2
         return (counter%2 == 1);
 
     @staticmethod
+    def fromList(p):
+        return Point(p[0], p[1])
+
+    @staticmethod
     def dot(p1, p2):
         'Scalar product'
         return p1.x*p2.x+p1.y*p2.y
@@ -233,13 +270,201 @@
         return p1.x*p2.y-p1.y*p2.x
 
     @staticmethod
+    def cosine(p1, p2):
+        return Point.dot(p1,p2)/(p1.norm2()*p2.norm2())
+
+    @staticmethod
     def distanceNorm2(p1, p2):
         return (p1-p2).norm2()
 
     @staticmethod
-    def plotAll(points, color='r'):
+    def plotAll(points, **kwargs):
         from matplotlib.pyplot import scatter
-        scatter([p.x for p in points],[p.y for p in points], c=color)
+        scatter([p.x for p in points],[p.y for p in points], **kwargs)
+
+    def similarOrientation(self, refDirection, cosineThreshold):
+        'Indicates whether the cosine of the vector and refDirection is smaller than cosineThreshold'
+        return Point.cosine(self, refDirection) >= cosineThreshold
+
+    @staticmethod
+    def timeToCollision(p1, p2, v1, v2, collisionThreshold):
+        '''Computes exact time to collision with a distance threshold
+        The unknown of the equation is the time to reach the intersection
+        between the relative trajectory of one road user
+        and the circle of radius collisionThreshold around the other road user'''
+        from math import sqrt
+        dv = v1-v2
+        dp = p1-p2
+        a = dv.norm2Squared()#(v1.x-v2.x)**2 + (v1.y-v2.y)**2
+        b = 2*Point.dot(dv, dp)#2 * ((p1.x-p2.x) * (v1.x-v2.x) + (p1.y-p2.y) * (v1.y-v2.y))
+        c = dp.norm2Squared() - collisionThreshold**2#(p1.x-p2.x)**2 + (p1.y-p2.y)**2 - collisionThreshold**2
+
+        delta = b**2 - 4*a*c
+        if delta >= 0:
+            deltaRoot = sqrt(delta)
+            ttc1 = (-b + deltaRoot)/(2*a)
+            ttc2 = (-b - deltaRoot)/(2*a)
+            if ttc1 >= 0 and ttc2 >= 0:
+                ttc = min(ttc1,ttc2)
+            elif ttc1 >= 0:
+                ttc = ttc1
+            elif ttc2 >= 0:
+                ttc = ttc2
+            else: # ttc1 < 0 and ttc2 < 0:
+                ttc = None
+        else:
+            ttc = None
+        return ttc
+
+    @staticmethod   
+    def midPoint(p1, p2):
+        'Returns the middle of the segment [p1, p2]'
+        return Point(0.5*p1.x+0.5*p2.x, 0.5*p1.y+0.5*p2.y)
+
+if shapelyAvailable:
+    def pointsInPolygon(points, polygon):
+        '''Optimized tests of a series of points within (Shapely) polygon '''
+        prepared_polygon = prep(polygon)
+        return filter(prepared_polygon.contains, points)
+
+# Functions for coordinate transformation
+# From Paul St-Aubin's PVA tools
+def subsec_spline_dist(splines):
+    ''' Prepare list of spline subsegments from a spline list. 
+    
+    Output:
+    =======
+    ss_spline_d[spline #][mode][station]
+    
+    where:
+        mode=0: incremental distance
+        mode=1: cumulative distance
+        mode=2: cumulative distance with trailing distance
+    '''
+
+    from numpy import zeros
+    ss_spline_d = []
+    #Prepare subsegment distances
+    for spline in range(len(splines)):
+        ss_spline_d.append([[],[],[]])
+        ss_spline_d[spline][0] = zeros(len(splines[spline])-1)  #Incremental distance
+        ss_spline_d[spline][1] = zeros(len(splines[spline])-1)  #Cumulative distance
+        ss_spline_d[spline][2] = zeros(len(splines[spline]))  #Cumulative distance with trailing distance
+        for spline_p in range(len(splines[spline])):
+            if spline_p > (len(splines[spline]) - 2):
+                break
+            ss_spline_d[spline][0][spline_p] = utils.pointDistanceL2(splines[spline][spline_p][0],splines[spline][spline_p][1],splines[spline][(spline_p+1)][0],splines[spline][(spline_p+1)][1])
+            ss_spline_d[spline][1][spline_p] = sum(ss_spline_d[spline][0][0:spline_p])
+            ss_spline_d[spline][2][spline_p] = ss_spline_d[spline][1][spline_p]#sum(ss_spline_d[spline][0][0:spline_p])
+    
+    ss_spline_d[spline][2][-1] = ss_spline_d[spline][2][-2] + ss_spline_d[spline][0][-1]
+
+    return ss_spline_d
+
+def ppldb2p(qx,qy, p0x,p0y, p1x,p1y):
+    ''' Point-projection (Q) on line defined by 2 points (P0,P1). 
+        http://cs.nyu.edu/~yap/classes/visual/03s/hw/h2/math.pdf
+        '''
+    if(p0x == p1x and p0y == p1y):
+        return None
+    try:
+        #Approximate slope singularity by giving some slope roundoff; account for roundoff error
+        if(round(p0x, 10) == round(p1x, 10)):
+            p1x += 0.0000000001
+        if(round(p0y, 10) == round(p1y, 10)):
+            p1y += 0.0000000001            
+        #make the calculation
+        Y = (-(qx)*(p0y-p1y)-(qy*(p0y-p1y)**2)/(p0x-p1x)+p0x**2*(p0y-p1y)/(p0x-p1x)-p0x*p1x*(p0y-p1y)/(p0x-p1x)-p0y*(p0x-p1x))/(p1x-p0x-(p0y-p1y)**2/(p0x-p1x))
+        X = (-Y*(p1y-p0y)+qx*(p1x-p0x)+qy*(p1y-p0y))/(p1x-p0x)
+    except ZeroDivisionError:
+        print('Error: Division by zero in ppldb2p. Please report this error with the full traceback:')
+        print('qx={0}, qy={1}, p0x={2}, p0y={3}, p1x={4}, p1y={5}...'.format(qx, qy, p0x, p0y, p1x, p1y))
+        import pdb; pdb.set_trace()  
+    return Point(X,Y)
+
+def getSYfromXY(p, splines, goodEnoughSplineDistance = 0.5):
+    ''' Snap a point p to it's nearest subsegment of it's nearest spline (from the list splines). A spline is a list of points (class Point), most likely a trajectory. 
+        
+        Output:
+        =======
+        [spline index, 
+        subsegment leading point index, 
+        snapped point, 
+        subsegment distance, 
+        spline distance,
+        orthogonal point offset]
+    '''
+    minOffsetY = float('inf')
+    #For each spline
+    for spline in range(len(splines)):
+        #For each spline point index
+        for spline_p in range(len(splines[spline])-1):
+            #Get closest point on spline
+            closestPoint = ppldb2p(p.x,p.y,splines[spline][spline_p][0],splines[spline][spline_p][1],splines[spline][spline_p+1][0],splines[spline][spline_p+1][1])
+            if closestPoint == None:
+                print('Error: Spline {0}, segment {1} has identical bounds and therefore is not a vector. Projection cannot continue.'.format(spline, spline_p))
+                return None
+            # check if the 
+            if utils.inBetween(splines[spline][spline_p][0], splines[spline][spline_p+1][0], closestPoint.x) and utils.inBetween(splines[spline][spline_p][1], splines[spline][spline_p+1][1], closestPoint.y): 
+                offsetY = Point.distanceNorm2(closestPoint, p)
+                if offsetY < minOffsetY:
+                    minOffsetY = offsetY
+                    snappedSpline = spline
+                    snappedSplineLeadingPoint = spline_p
+                    snappedPoint = Point(closestPoint.x, closestPoint.y)
+                #Jump loop if significantly close
+                if offsetY < goodEnoughSplineDistance: 
+                    break
+    #Get sub-segment distance
+    if minOffsetY != float('inf'):
+        subsegmentDistance = Point.distanceNorm2(snappedPoint, splines[snappedSpline][snappedSplineLeadingPoint])
+        #Get cumulative alignment distance (total segment distance)
+        splineDistanceS = splines[snappedSpline].getCumulativeDistance(snappedSplineLeadingPoint) + subsegmentDistance
+        orthogonalSplineVector = (splines[snappedSpline][snappedSplineLeadingPoint+1]-splines[snappedSpline][snappedSplineLeadingPoint]).orthogonal()
+        offsetVector = p-snappedPoint
+        if Point.dot(orthogonalSplineVector, offsetVector) < 0:
+            minOffsetY = -minOffsetY
+        return [snappedSpline, snappedSplineLeadingPoint, snappedPoint, subsegmentDistance, splineDistanceS, minOffsetY]
+    else:
+        return None
+
+def getXYfromSY(s, y, splineNum, splines, mode = 0):
+    ''' Find X,Y coordinate from S,Y data. 
+    if mode = 0 : return Snapped X,Y
+    if mode !=0 : return Real X,Y
+    ''' 
+    
+    #(buckle in, it gets ugly from here on out)
+    ss_spline_d = subsec_spline_dist(splines)
+    
+    #Find subsegment
+    snapped_x = None
+    snapped_y = None
+    for spline_ss_index in range(len(ss_spline_d[splineNum][1])):
+        if(s < ss_spline_d[splineNum][1][spline_ss_index]):
+            ss_value = s - ss_spline_d[splineNum][1][spline_ss_index-1]
+            #Get normal vector and then snap
+            vector_l_x = (splines[splineNum][spline_ss_index][0] - splines[splineNum][spline_ss_index-1][0])
+            vector_l_y = (splines[splineNum][spline_ss_index][1] - splines[splineNum][spline_ss_index-1][1])
+            magnitude  = sqrt(vector_l_x**2 + vector_l_y**2)
+            n_vector_x = vector_l_x/magnitude
+            n_vector_y = vector_l_y/magnitude
+            snapped_x  = splines[splineNum][spline_ss_index-1][0] + ss_value*n_vector_x
+            snapped_y  = splines[splineNum][spline_ss_index-1][1] + ss_value*n_vector_y
+
+            #Real values (including orthogonal projection of y))
+            real_x = snapped_x - y*n_vector_y 
+            real_y = snapped_y + y*n_vector_x            
+            break
+    
+    if mode == 0 or (not snapped_x):
+        if(not snapped_x):
+            snapped_x = splines[splineNum][-1][0]
+            snapped_y = splines[splineNum][-1][1]                
+        return [snapped_x,snapped_y]
+    else:
+        return [real_x,real_y]
+
 
 class NormAngle(object):
     '''Alternate encoding of a point, by its norm and orientation'''
@@ -254,6 +479,8 @@
         norm = p.norm2()
         if norm > 0:
             angle = atan2(p.y, p.x)
+        else:
+            angle = 0.
         return NormAngle(norm, angle)
 
     def __add__(self, other):
@@ -294,44 +521,59 @@
     def multiply(self, alpha):
         return FlowVector(self.position.multiply(alpha), self.velocity.multiply(alpha))
 
-    def draw(self, options = '', **kwargs):
+    def plot(self, options = '', **kwargs):
         from matplotlib.pylab import plot
         plot([self.position.x, self.position.x+self.velocity.x], [self.position.y, self.position.y+self.velocity.y], options, **kwargs)
-        self.position.draw(options+'x', **kwargs)
+        self.position.plot(options+'x', **kwargs)
     
     @staticmethod
     def similar(f1, f2, maxDistance2, maxDeltavelocity2):
         return (f1.position-f2.position).norm2Squared()<maxDistance2 and (f1.velocity-f2.velocity).norm2Squared()<maxDeltavelocity2
 
+def intersection(p1, p2, p3, p4):
+    ''' Intersection point (x,y) of lines formed by the vectors p1-p2 and p3-p4
+        http://paulbourke.net/geometry/pointlineplane/'''
+    dp12 = p2-p1
+    dp34 = p4-p3
+    #det = (p4.y-p3.y)*(p2.x-p1.x)-(p4.x-p3.x)*(p2.y-p1.y)
+    det = dp34.y*dp12.x-dp34.x*dp12.y
+    if det == 0:
+        return None
+    else:
+        ua = (dp34.x*(p1.y-p3.y)-dp34.y*(p1.x-p3.x))/det
+        return p1+dp12.multiply(ua)
+
+# def intersection(p1, p2, dp1, dp2):
+#     '''Returns the intersection point between the two lines 
+#     defined by the respective vectors (dp) and origin points (p)'''
+#     from numpy import matrix
+#     from numpy.linalg import linalg
+#     A = matrix([[dp1.y, -dp1.x],
+#                 [dp2.y, -dp2.x]])
+#     B = matrix([[dp1.y*p1.x-dp1.x*p1.y],
+#                 [dp2.y*p2.x-dp2.x*p2.y]])
+    
+#     if linalg.det(A) == 0:
+#         return None
+#     else:
+#         intersection = linalg.solve(A,B)
+#         return Point(intersection[0,0], intersection[1,0])
+
 def segmentIntersection(p1, p2, p3, p4):
     '''Returns the intersecting point of the segments [p1, p2] and [p3, p4], None otherwise'''
-    from numpy import matrix
-    from numpy.linalg import linalg, det
 
     if (Interval.intersection(Interval(p1.x,p2.x,True), Interval(p3.x,p4.x,True)).empty()) or (Interval.intersection(Interval(p1.y,p2.y,True), Interval(p3.y,p4.y,True)).empty()):
         return None
     else:
-        dp1 = p2-p1#[s1[0][1]-s1[0][0], s1[1][1]-s1[1][0]]
-        dp2 = p4-p3#[s2[0][1]-s2[0][0], s2[1][1]-s2[1][0]]
-
-        A = matrix([[dp1.y, -dp1.x],
-                    [dp2.y, -dp2.x]])
-        B = matrix([[dp1.y*p1.x-dp1.x*p1.y],
-                    [dp2.y*p3.x-dp2.x*p3.y]])
-
-        if linalg.det(A) == 0:#crossProduct(ds1, ds2) == 0:
-            return None
+        inter = intersection(p1, p2, p3, p4)
+        if (inter != None 
+            and utils.inBetween(p1.x, p2.x, inter.x)
+            and utils.inBetween(p3.x, p4.x, inter.x)
+            and utils.inBetween(p1.y, p2.y, inter.y)
+            and utils.inBetween(p3.y, p4.y, inter.y)):
+            return inter
         else:
-            intersection = linalg.solve(A,B)
-            if (utils.inBetween(p1.x, p2.x, intersection[0,0])
-                and utils.inBetween(p3.x, p4.x, intersection[0,0])
-                and utils.inBetween(p1.y, p2.y, intersection[1,0])
-                and utils.inBetween(p3.y, p4.y, intersection[1,0])):
-                return Point(intersection[0,0], intersection[1,0])
-            else:
-                return None
-
-# TODO: implement a better algorithm for intersections of sets of segments http://en.wikipedia.org/wiki/Line_segment_intersection
+            return None
 
 class Trajectory(object):
     '''Class for trajectories: temporal sequence of positions
@@ -345,6 +587,16 @@
             self.positions = [[],[]]
 
     @staticmethod
+    def generate(p, v, nPoints):
+        t = Trajectory()
+        p0 = Point(p.x, p.y)
+        t.addPosition(p0)
+        for i in xrange(nPoints-1):
+            p0 += v
+            t.addPosition(p0)
+        return t, Trajectory([[v.x]*nPoints, [v.y]*nPoints])
+
+    @staticmethod
     def load(line1, line2):
         return Trajectory([[float(n) for n in line1.split(' ')],
                            [float(n) for n in line2.split(' ')]])
@@ -352,18 +604,36 @@
     @staticmethod
     def fromPointList(points):
         t = Trajectory()
-        for p in points:
-            t.addPosition(p)
+        if isinstance(points[0], list) or isinstance(points[0], tuple):
+            for p in points:
+                t.addPositionXY(p[0],p[1])
+        else:
+            for p in points:
+                t.addPosition(p)
         return t
 
+    def __len__(self):
+        return len(self.positions[0])
+
+    def length(self):
+        return self.__len__()
+
+    def empty(self):
+        return self.__len__() == 0
+
+    def __getitem__(self, i):
+        if isinstance(i, int):
+            return Point(self.positions[0][i], self.positions[1][i])
+        else:
+            raise TypeError, "Invalid argument type."
+            #elif isinstance( key, slice ):
+
     def __str__(self):
         return ' '.join([self.__getitem__(i).__str__() for i in xrange(self.length())])
 
     def __repr__(self):
-        return str(self)
+        return self.__str__()
 
-    def __getitem__(self, i):
-        return Point(self.positions[0][i], self.positions[1][i])
 
     def __iter__(self):
         self.iterInstantNum = 0
@@ -376,11 +646,13 @@
             self.iterInstantNum += 1
             return self[self.iterInstantNum-1]
 
-    def length(self):
-        return len(self.positions[0])
+    def setPositionXY(self, i, x, y):
+        if i < self.__len__():
+            self.positions[0][i] = x
+            self.positions[1][i] = y
 
-    def __len__(self):
-        return self.length()
+    def setPosition(self, i, p):
+        self.setPositionXY(i, p.x, p.y)
 
     def addPositionXY(self, x, y):
         self.positions[0].append(x)
@@ -389,8 +661,12 @@
     def addPosition(self, p):
         self.addPositionXY(p.x, p.y)
 
+    def duplicateLastPosition(self):
+        self.positions[0].append(self.positions[0][-1])
+        self.positions[1].append(self.positions[1][-1])
+
     @staticmethod
-    def _draw(positions, options = '', withOrigin = False, lastCoordinate = None, timeStep = 1, **kwargs):
+    def _plot(positions, options = '', withOrigin = False, lastCoordinate = None, timeStep = 1, **kwargs):
         from matplotlib.pylab import plot
         if lastCoordinate == None:
             plot(positions[0][::timeStep], positions[1][::timeStep], options, **kwargs)
@@ -400,21 +676,19 @@
             plot([positions[0][0]], [positions[1][0]], 'ro', **kwargs)
 
     def project(self, homography):
-        from numpy.core.multiarray import array
-        projected = cvutils.projectArray(homography, array(self.positions))
-        return Trajectory(projected)
+        return Trajectory(cvutils.projectTrajectory(homography, self.positions))
+
+    def plot(self, options = '', withOrigin = False, timeStep = 1, **kwargs):
+        Trajectory._plot(self.positions, options, withOrigin, None, timeStep, **kwargs)
 
-    def draw(self, options = '', withOrigin = False, timeStep = 1, **kwargs):
-        Trajectory._draw(self.positions, options, withOrigin, None, timeStep, **kwargs)
+    def plotAt(self, lastCoordinate, options = '', withOrigin = False, timeStep = 1, **kwargs):
+        Trajectory._plot(self.positions, options, withOrigin, lastCoordinate, timeStep, **kwargs)
 
-    def drawAt(self, lastCoordinate, options = '', withOrigin = False, timeStep = 1, **kwargs):
-        Trajectory._draw(self.positions, options, withOrigin, lastCoordinate, timeStep, **kwargs)
-
-    def drawOnWorldImage(self, nPixelsPerUnitDistance, imageHeight, options = '', withOrigin = False, timeStep = 1, **kwargs):
+    def plotOnWorldImage(self, nPixelsPerUnitDistance, imageHeight, options = '', withOrigin = False, timeStep = 1, **kwargs):
         from matplotlib.pylab import plot
         imgPositions = [[x*nPixelsPerUnitDistance for x in self.positions[0]],
                         [-x*nPixelsPerUnitDistance+imageHeight for x in self.positions[1]]]
-        Trajectory._draw(imgPositions, options, withOrigin, timeStep, **kwargs)
+        Trajectory._plot(imgPositions, options, withOrigin, timeStep, **kwargs)
 
     def getXCoordinates(self):
         return self.positions[0]
@@ -452,6 +726,14 @@
             return Trajectory([[a-b for a,b in zip(self.getXCoordinates(),traj2.getXCoordinates())],
                                [a-b for a,b in zip(self.getYCoordinates(),traj2.getYCoordinates())]])
 
+    def differentiate(self, doubleLastPosition = False):
+        diff = Trajectory()
+        for i in xrange(1, self.length()):
+            diff.addPosition(self[i]-self[i-1])
+        if doubleLastPosition:
+            diff.addPosition(diff[-1])
+        return diff
+
     def norm(self):
         '''Returns the list of the norms at each instant'''
 #        def add(x, y): return x+y
@@ -460,12 +742,52 @@
         from numpy import hypot
         return hypot(self.positions[0], self.positions[1])
 
-    def cumulatedDisplacement(self):
-        'Returns the sum of the distances between each successive point'
-        displacement = 0
+    # def cumulatedDisplacement(self):
+    #     'Returns the sum of the distances between each successive point'
+    #     displacement = 0
+    #     for i in xrange(self.length()-1):
+    #         displacement += Point.distanceNorm2(self.__getitem__(i),self.__getitem__(i+1))
+    #     return displacement
+
+    def computeCumulativeDistances(self):
+        '''Computes the distance from each point to the next and the cumulative distance up to the point
+        Can be accessed through getDistance(idx) and getCumulativeDistance(idx)'''
+        self.distances = []
+        self.cumulativeDistances = [0.]
+        p1 = self[0]
+        cumulativeDistance = 0.
         for i in xrange(self.length()-1):
-            displacement += Point.distanceNorm2(self.__getitem__(i),self.__getitem__(i+1))
-        return displacement
+            p2 = self[i+1]
+            self.distances.append(Point.distanceNorm2(p1,p2))
+            cumulativeDistance += self.distances[-1]
+            self.cumulativeDistances.append(cumulativeDistance)
+            p1 = p2
+
+    def getDistance(self,i):
+        '''Return the distance between points i and i+1'''
+        if i < self.length()-1:
+            return self.distances[i]
+        else:
+            print('Index {} beyond trajectory length {}-1'.format(i, self.length()))
+
+    def getCumulativeDistance(self, i):
+        '''Return the cumulative distance between the beginning and point i'''
+        if i < self.length():
+            return self.cumulativeDistances[i]
+        else:
+            print('Index {} beyond trajectory length {}'.format(i, self.length()))
+
+    def similarOrientation(self, refDirection, cosineThreshold, minProportion = 0.5):
+        '''Indicates whether the minProportion (<=1.) (eg half) of the trajectory elements (vectors for velocity) 
+        have a cosine with refDirection is smaller than cosineThreshold'''
+        count = 0
+        lengthThreshold = float(self.length())*minProportion
+        for p in self:
+            if p.similarOrientation(refDirection, cosineThreshold):
+                count += 1
+            if count > lengthThreshold:
+                return True
+        return False
 
     def wiggliness(self):
         return self.cumulatedDisplacement()/float(Point.distanceNorm2(self.__getitem__(0),self.__getitem__(self.length()-1)))
@@ -497,34 +819,104 @@
         else:
             return None
     
-    def getTrajectoryInPolygon(self, polygon):
-        '''Returns the set of points inside the polygon
+    def getTrajectoryInPolygonNoShapely(self, polygon):
+        '''Returns the trajectory built with the set of points inside the polygon
         (array of Nx2 coordinates of the polygon vertices)'''
-        import matplotlib.nxutils as nx
         traj = Trajectory()
-        result = nx.points_inside_poly(self.asArray().T, polygon)
-        for i in xrange(self.length()):
-            if result[i]:
-                traj.addPositionXY(self.positions[0][i], self.positions[1][i])
+        for p in self:
+            if p.inPolygonNoShapely(polygon):
+                traj.addPosition(p)
         return traj
 
-    # version 2: use shapely polygon contains
-
+    if shapelyAvailable:
+        def getTrajectoryInPolygon(self, polygon):
+            '''Returns the trajectory built with the set of points inside the (shapely) polygon'''
+            traj = Trajectory()
+            points = [p.asShapely() for p in self]
+            for p in pointsInPolygon(points, polygon):
+                    traj.addPositionXY(p.x, p.y)
+            return traj
+    
     @staticmethod
-    def norm2LCSS(t1, t2, threshold):
-        return utils.LCSS(t1, t2, threshold, Point.distanceNorm2)
+    def lcss(t1, t2, lcss):
+        return lcss.compute(t1, t2)
+
+class CurvilinearTrajectory(Trajectory):
+    '''Sub class of trajectory for trajectories with curvilinear coordinates and lane assignements
+    longitudinal coordinate is stored as first coordinate (exterior name S)
+    lateral coordiante is stored as second coordinate'''
+
+    def __init__(self, S = None, Y = None, lanes = None):
+        if S == None or Y == None or len(S) != len(Y):
+            self.positions = [[],[]]
+            if S != None and Y != None and len(S) != len(Y):
+                print("S and Y coordinates of different lengths\nInitializing to empty lists")
+        else:
+            self.positions = [S,Y]
+        if lanes == None or len(lanes) != self.length():
+            self.lanes = []
+        else:
+            self.lanes = lanes
+        
+    def __getitem__(self,i): 
+        if isinstance(i, int):
+            return [self.positions[0][i], self.positions[1][i], self.lanes[i]]
+        else:
+            raise TypeError, "Invalid argument type."
+            #elif isinstance( key, slice ):
+
+    def getSCoordinates(self):
+        return self.getXCoordinates()
+    
+    def getLanes(self):
+        return self.lanes
 
-    @staticmethod
-    def normMaxLCSS(t1, t2, threshold):
-        return utils.LCSS(t1, t2, threshold, lambda p1, p2: (p1-p2).normMax())
+    def addPositionSYL(self, s, y, lane):
+        self.addPositionXY(s,y)
+        self.lanes.append(lane)
+
+    def addPosition(self, p):
+        'Adds position in the point format for curvilinear of list with 3 values'
+        self.addPositionSYL(p[0], p[1], p[2])
+
+    def setPosition(self, i, s, y, lane):
+        self.setPositionXY(i, s, y)
+        if i < self.__len__():
+            self.lanes[i] = lane
+
+    def differentiate(self, doubleLastPosition = False):
+        diff = CurvilinearTrajectory()
+        p1 = self[0]
+        for i in xrange(1, self.length()):
+            p2 = self[i]
+            diff.addPositionSYL(p2[0]-p1[0], p2[1]-p1[1], p1[2])
+            p1=p2
+        if doubleLastPosition and self.length() > 1:
+            diff.addPosition(diff[-1])
+        return diff
+
+    def getIntersections(self, S1, lane = None):
+        '''Returns a list of the indices at which the trajectory 
+        goes past the curvilinear coordinate S1
+        (in provided lane if lane != None)
+        the list is empty if there is no crossing'''
+        indices = []
+        for i in xrange(self.length()-1):
+            q1=self.__getitem__(i)
+            q2=self.__getitem__(i+1)
+            if q1[0] <= S1 < q2[0] and (lane == None or (self.lanes[i] == lane and self.lanes[i+1] == lane)):
+                indices.append(i+(S1-q1[0])/(q2[0]-q1[0]))
+        return indices
 
 ##################
 # Moving Objects
 ##################
 
-userTypeNames = ['car',
+userTypeNames = ['unknown',
+                 'car',
                  'pedestrian',
-                 'twowheels',
+                 'motorcycle',
+                 'bicycle',
                  'bus',
                  'truck']
 
@@ -532,16 +924,24 @@
 
 class MovingObject(STObject):
     '''Class for moving objects: a spatio-temporal object 
-    with a trajectory and a geometry (constant volume over time) and a usertype (e.g. road user)
+    with a trajectory and a geometry (constant volume over time) 
+    and a usertype (e.g. road user) coded as a number (see userTypeNames)
     '''
 
-    def __init__(self, num = None, timeInterval = None, positions = None, geometry = None, userType = None):
+    def __init__(self, num = None, timeInterval = None, positions = None, velocities = None, geometry = None, userType = userType2Num['unknown']):
         super(MovingObject, self).__init__(num, timeInterval)
         self.positions = positions
+        self.velocities = velocities
         self.geometry = geometry
         self.userType = userType
+        self.features = []
         # compute bounding polygon from trajectory
         
+    @staticmethod
+    def generate(p, v, timeInterval):
+        positions, velocities = Trajectory.generate(p, v, int(timeInterval.length())) 
+        return MovingObject(timeInterval = timeInterval, positions = positions, velocities = velocities)
+
     def getObjectInTimeInterval(self, inter):
         '''Returns a new object extracted from self,
         restricted to time interval inter'''
@@ -565,6 +965,18 @@
     def getVelocities(self):
         return self.velocities
 
+    def getUserType(self):
+        return self.userType
+
+    def getCurvilinearPositions(self):
+        if hasattr(self, 'curvilinearPositions'):
+            return self.curvilinearPositions
+        else:
+            return None
+
+    def setUserType(self, userType):
+        self.userType = userType
+
     def setFeatures(self, features):
         self.features = [features[i] for i in self.featureNumbers]
 
@@ -589,15 +1001,73 @@
     def getYCoordinates(self):
         return self.positions.getYCoordinates()
     
-    def draw(self, options = '', withOrigin = False, timeStep = 1, **kwargs):
-        self.positions.draw(options, withOrigin, timeStep, **kwargs)
+    def plot(self, options = '', withOrigin = False, timeStep = 1, withFeatures = False, **kwargs):
+        if withFeatures:
+            for f in self.features:
+                f.positions.plot('r', True, timeStep, **kwargs)
+            self.positions.plot('bx-', True, timeStep, **kwargs)
+        else:
+            self.positions.plot(options, withOrigin, timeStep, **kwargs)
 
-    def drawOnWorldImage(self, nPixelsPerUnitDistance, imageHeight, options = '', withOrigin = False, timeStep = 1, **kwargs):
-        self.positions.drawOnWorldImage(nPixelsPerUnitDistance, imageHeight, options, withOrigin, timeStep, **kwargs)
+    def plotOnWorldImage(self, nPixelsPerUnitDistance, imageHeight, options = '', withOrigin = False, timeStep = 1, **kwargs):
+        self.positions.plotOnWorldImage(nPixelsPerUnitDistance, imageHeight, options, withOrigin, timeStep, **kwargs)
 
     def play(self, videoFilename, homography = None):
         cvutils.displayTrajectories(videoFilename, [self], homography, self.getFirstInstant(), self.getLastInstant())
 
+    def speedDiagnostics(self, framerate = 1., display = False):
+        from numpy import std
+        from scipy.stats import scoreatpercentile
+        speeds = framerate*self.getSpeeds()
+        coef = utils.linearRegression(range(len(speeds)), speeds)
+        print('min/5th perc speed: {} / {}\nspeed diff: {}\nspeed stdev: {}\nregression: {}'.format(min(speeds), scoreatpercentile(speeds, 5), speeds[-2]-speeds[1], std(speeds), coef[0]))
+        if display:
+            from matplotlib.pyplot import figure, plot, axis
+            figure(1)
+            self.plot()
+            axis('equal')
+            figure(2)
+            plot(list(self.getTimeInterval()), speeds)
+
+    @staticmethod
+    def distances(obj1, obj2, instant1, _instant2 = None):
+        from scipy.spatial.distance import cdist
+        if _instant2 == None:
+            instant2 = instant1
+        else:
+            instant2 = _instant2
+        positions1 = [f.getPositionAtInstant(instant1).astuple() for f in obj1.features if f.existsAtInstant(instant1)]
+        positions2 = [f.getPositionAtInstant(instant2).astuple() for f in obj2.features if f.existsAtInstant(instant2)]
+        return cdist(positions1, positions2, metric = 'euclidean')
+        
+    @staticmethod
+    def minDistance(obj1, obj2, instant1, instant2 = None):
+        return MovingObject.distances(obj1, obj2, instant1, instant2).min()
+
+    @staticmethod
+    def maxDistance(obj1, obj2, instant, instant2 = None):
+        return MovingObject.distances(obj1, obj2, instant1, instant2).max()
+
+    def maxSize(self):
+        '''Returns the max distance between features
+        at instant there are the most features'''
+        if hasattr(self, 'features'):
+            nFeatures = -1
+            tMaxFeatures = 0
+            for t in self.getTimeInterval():
+                n = len([f for f in self.features if f.existsAtInstant(t)])
+                if n > nFeatures:
+                    nFeatures = n
+                    tMaxFeatures = t
+            return MovingObject.maxDistance(self, self, tMaxFeatures)
+        else:
+            print('Load features to compute a maximum size')
+            return None
+			
+    def setRoutes(self, startRouteID, endRouteID):
+        self.startRouteID = startRouteID
+        self.endRouteID = endRouteID
+           
     def getInstantsCrossingLane(self, p1, p2):
         '''Returns the instant(s)
         at which the object passes from one side of the segment to the other
@@ -610,6 +1080,190 @@
         at constant speed'''
         return predictPositionNoLimit(nTimeSteps, self.getPositionAtInstant(instant), self.getVelocityAtInstant(instant), externalAcceleration)
 
+    def projectCurvilinear(self, alignments, ln_mv_av_win=3):
+        ''' Add, for every object position, the class 'moving.CurvilinearTrajectory()'
+            (curvilinearPositions instance) which holds information about the
+            curvilinear coordinates using alignment metadata.
+            From Paul St-Aubin's PVA tools
+            ======
+
+            Input:
+            ======
+            alignments   = a list of alignments, where each alignment is a list of
+                           points (class Point).
+            ln_mv_av_win = moving average window (in points) in which to smooth
+                           lane changes. As per tools_math.cat_mvgavg(), this term
+                           is a search *radius* around the center of the window.
+
+            '''
+
+        self.curvilinearPositions = CurvilinearTrajectory()
+
+        #For each point
+        for i in xrange(int(self.length())):
+            result = getSYfromXY(self.getPositionAt(i), alignments)
+
+            # Error handling
+            if(result == None):
+                print('Warning: trajectory {} at point {} {} has alignment errors (spline snapping)\nCurvilinear trajectory could not be computed'.format(self.getNum(), i, self.getPositionAt(i)))
+            else:
+                [align, alignPoint, snappedPoint, subsegmentDistance, S, Y] = result
+                self.curvilinearPositions.addPositionSYL(S, Y, align)
+
+        ## Go back through points and correct lane  
+        #Run through objects looking for outlier point
+        smoothed_lanes = utils.cat_mvgavg(self.curvilinearPositions.getLanes(),ln_mv_av_win)
+        ## Recalculate projected point to new lane
+        lanes = self.curvilinearPositions.getLanes()
+        if(lanes != smoothed_lanes):
+            for i in xrange(len(lanes)):
+                if(lanes[i] != smoothed_lanes[i]):
+                    result = getSYfromXY(self.getPositionAt(i),[alignments[smoothed_lanes[i]]])
+
+                    # Error handling
+                    if(result == None):
+                        ## This can be triggered by tracking errors when the trajectory jumps around passed another alignment.
+                        print('    Warning: trajectory {} at point {} {} has alignment errors during trajectory smoothing and will not be corrected.'.format(self.getNum(), i, self.getPositionAt(i)))
+                    else:
+                        [align, alignPoint, snappedPoint, subsegmentDistance, S, Y] = result
+                        self.curvilinearPositions.setPosition(i, S, Y, align)
+
+    def computeSmoothTrajectory(self, minCommonIntervalLength):
+        '''Computes the trajectory as the mean of all features
+        if a feature exists, its position is 
+        
+        Warning work in progress
+        TODO? not use the first/last 1-.. positions'''
+        from numpy import array, median
+        nFeatures = len(self.features)
+        if nFeatures == 0:
+            print('Empty object features\nCannot compute smooth trajectory')
+        else:
+            # compute the relative position vectors
+            relativePositions = {} # relativePositions[(i,j)] is the position of j relative to i
+            for i in xrange(nFeatures):
+                for j in xrange(i):
+                    fi = self.features[i]
+                    fj = self.features[j]
+                    inter = fi.commonTimeInterval(fj)
+                    if inter.length() >= minCommonIntervalLength:
+                        xi = array(fi.getXCoordinates()[inter.first-fi.getFirstInstant():int(fi.length())-(fi.getLastInstant()-inter.last)])
+                        yi = array(fi.getYCoordinates()[inter.first-fi.getFirstInstant():int(fi.length())-(fi.getLastInstant()-inter.last)])
+                        xj = array(fj.getXCoordinates()[inter.first-fj.getFirstInstant():int(fj.length())-(fj.getLastInstant()-inter.last)])
+                        yj = array(fj.getYCoordinates()[inter.first-fj.getFirstInstant():int(fj.length())-(fj.getLastInstant()-inter.last)])
+                        relativePositions[(i,j)] = Point(median(xj-xi), median(yj-yi))
+                        relativePositions[(j,i)] = -relativePositions[(i,j)]
+
+    ###
+    # User Type Classification
+    ###
+    def classifyUserTypeSpeedMotorized(self, threshold, aggregationFunc = median, ignoreNInstantsAtEnds = 0):
+        '''Classifies slow and fast road users
+        slow: non-motorized -> pedestrians
+        fast: motorized -> cars'''
+        if ignoreNInstantsAtEnds > 0:
+            speeds = self.getSpeeds()[ignoreNInstantsAtEnds:-ignoreNInstantsAtEnds]
+        else:
+            speeds = self.getSpeeds()
+        if aggregationFunc(speeds) >= threshold:
+            self.setUserType(userType2Num['car'])
+        else:
+            self.setUserType(userType2Num['pedestrian'])
+
+    def classifyUserTypeSpeed(self, speedProbabilities, aggregationFunc = median):
+        '''Classifies road user per road user type
+        speedProbabilities are functions return P(speed|class)
+        in a dictionary indexed by user type names
+        Returns probabilities for each class
+
+        for simple threshold classification, simply pass non-overlapping indicator functions (membership)
+        e.g. def indic(x):
+        if abs(x-mu) < sigma:
+        return 1
+        else:
+        return x'''
+        if not hasattr(self, aggregatedSpeed):
+            self.aggregatedSpeed = aggregationFunc(self.getSpeeds())
+        userTypeProbabilities = {}
+        for userTypename in speedProbabilities:
+            userTypeProbabilities[userType2Num[userTypename]] = speedProbabilities[userTypename](self.aggregatedSpeed)
+        self.setUserType(utils.argmaxDict(userTypeProbabilities))
+        return userTypeProbabilities
+
+    def initClassifyUserTypeHoGSVM(self, aggregationFunc = median):
+        '''Initializes the data structures for classification
+
+        TODO? compute speed for longest feature?
+        Skip beginning and end of feature for speed? Offer options instead of median'''
+        self.aggregatedSpeed = aggregationFunc(self.getSpeeds())
+        self.userTypes = {}
+
+    def classifyUserTypeHoGSVMAtInstant(self, img, pedBikeCarSVM, instant, homography, width, height, bikeCarSVM = None, pedBikeSpeedTreshold = float('Inf'), bikeCarSpeedThreshold = float('Inf'), px = 0.2, py = 0.2, pixelThreshold = 800):
+        '''Extract the image box around the object and 
+        applies the SVM model on it'''
+        from numpy import array
+        croppedImg, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, self, instant, homography, width, height, px, py, pixelThreshold)
+        if len(croppedImg) > 0: # != []
+            hog = array([cvutils.HOG(croppedImg)], dtype = np.float32)
+            if self.aggregatedSpeed < pedBikeSpeedTreshold or bikeCarSVM == None:
+                self.userTypes[instant] = int(pedBikeCarSVM.predict(hog))
+            elif self.aggregatedSpeed < bikeCarSpeedTreshold:
+                self.userTypes[instant] = int(bikeCarSVM.predict(hog))
+            else:
+                self.userTypes[instant] = userType2Num['car']
+        else:
+            self.userTypes[instant] = userType2Num['unknown']
+
+    def classifyUserTypeHoGSVM(self, images, pedBikeCarSVM, homography, width, height, bikeCarSVM = None, pedBikeSpeedTreshold = float('Inf'), bikeCarSpeedThreshold = float('Inf'), speedProbabilities = None, aggregationFunc = median, px = 0.2, py = 0.2, pixelThreshold = 800):
+        '''Agregates SVM detections in each image and returns probability
+        (proportion of instants with classification in each category)
+
+        iamges is a dictionary of images indexed by instant
+        With default parameters, the general (ped-bike-car) classifier will be used
+        TODO? consider all categories?'''
+        if not hasattr(self, aggregatedSpeed) or not hasattr(self, userTypes):
+            print('Initilize the data structures for classification by HoG-SVM')
+            self.initClassifyUserTypeHoGSVM(aggregationFunc)
+
+        if len(self.userTypes) != self.length(): # if classification has not been done previously
+            for t in self.getTimeInterval():
+                if t not in self.userTypes:
+                    self.classifyUserTypeHoGSVMAtInstant(images[t], pedBikeCarSVM, t, homography, width, height, bikeCarSVM, pedBikeSpeedTreshold, bikeCarSpeedThreshold, px, py, pixelThreshold)
+        # compute P(Speed|Class)
+        if speedProbabilities == None: # equiprobable information from speed
+            userTypeProbabilities = {userType2Num['car']: 1., userType2Num['pedestrian']: 1., userType2Num['bicycle']: 1.}
+        else:
+            userTypeProbabilities = {userType2Num[userTypename]: speedProbabilities[userTypename](self.aggregatedSpeed) for userTypename in speedProbabilities}
+        # result is P(Class|Appearance) x P(Speed|Class)
+        nInstantsUserType = {userType2Num[userTypename]: 0 for userTypename in userTypeProbabilities}# number of instants the object is classified as userTypename
+        for t in self.userTypes:
+            nInstantsUserType[self.userTypes[t]] += 1
+        for userTypename in userTypeProbabilities:
+            userTypeProbabilities[userTypename] *= nInstantsUserType[userTypename]
+        # class is the user type that maximizes usertype probabilities
+        self.setUserType(utils.argmaxDict(userTypeProbabilities))
+
+    def classifyUserTypeArea(self, areas, homography):
+        '''Classifies the object based on its location (projected to image space)
+        areas is a dictionary of matrix of the size of the image space 
+        for different road users possible locations, indexed by road user type names
+
+        TODO: areas could be a wrapper object with a contains method that would work for polygons and images (with wrapper class)
+        skip frames at beginning/end?'''
+        print('not implemented/tested yet')
+        if not hasattr(self, projectedPositions):
+            if homography != None:
+                self.projectedPositions = obj.positions.project(homography)
+            else:
+                self.projectedPositions = obj.positions
+        possibleUserTypes = {userType: 0 for userType in range(len(userTypenames))}
+        for p in self.projectedPositions:
+            for userTypename in areas:
+                if areas[userTypename][p.x, p.y] != 0:
+                    possibleUserTypes[userType2Enum[userTypename]] += 1
+        # what to do: threshold for most common type? self.setUserType()
+        return possibleUserTypes
+
     @staticmethod
     def collisionCourseDotProduct(movingObject1, movingObject2, instant):
         'A positive result indicates that the road users are getting closer'
@@ -620,16 +1274,33 @@
     @staticmethod
     def collisionCourseCosine(movingObject1, movingObject2, instant):
         'A positive result indicates that the road users are getting closer'
-        deltap = movingObject1.getPositionAtInstant(instant)-movingObject2.getPositionAtInstant(instant)
-        deltav = movingObject2.getVelocityAtInstant(instant)-movingObject1.getVelocityAtInstant(instant)
-        return Point.dot(deltap, deltav)/(deltap.norm2()*deltav.norm2())
+        return Point.cosine(movingObject1.getPositionAtInstant(instant)-movingObject2.getPositionAtInstant(instant), #deltap
+                            movingObject2.getVelocityAtInstant(instant)-movingObject1.getVelocityAtInstant(instant)) #deltav
+
+
+##################
+# Annotations
+##################
+
+class BBAnnotation(MovingObject):
+    '''Class for : a series of ground truth annotations using bounding boxes
+    Its center is the center of the containing shape
+    '''
+
+    def __init__(self, num = None, timeInterval = None, topPositions = None, bottomPositions = None, userType = userType2Num['unknown']):
+        super(BBAnnotation, self).__init__(num, timeInterval, Trajectory(), userType = userType)
+        self.topPositions = topPositions.getPositions()
+        self.bottomPositions = bottomPositions.getPositions()
+        for i in xrange(int(topPositions.length())):
+            self.positions.addPosition((topPositions.getPositionAt(i) + bottomPositions.getPositionAt(i)).multiply(0.5))
+        
 
 def plotRoadUsers(objects, colors):
     '''Colors is a PlottingPropertyValues instance'''
     from matplotlib.pyplot import figure, axis
     figure()
     for obj in objects:
-        obj.draw(colors.get(obj.userType))
+        obj.plot(colors.get(obj.userType))
     axis('equal')
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/python/objectsmoothing.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,131 @@
+import storage, moving, utils
+from math import * #atan2,asin,degrees,sin,cos,pi
+import numpy as np
+
+import matplotlib.pyplot as plt
+
+def findNearest(feat, featureSet,t,reverse=True):
+	dist={}
+	for f in featureSet:
+		if reverse:
+			dist[f]= moving.Point.distanceNorm2(feat.getPositionAtInstant(t+1),f.getPositionAtInstant(t))
+		else:
+			dist[f]= moving.Point.distanceNorm2(feat.getPositionAtInstant(t-1),f.getPositionAtInstant(t))
+	return min(dist, key=dist.get) # = utils.argmaxDict(dist)
+	
+def getFeatures(obj):
+	longestFeature = utils.argmaxDict({f:f.length() for i,f in enumerate(obj.features)})
+	t1,t3 = longestFeature.getFirstInstant(), longestFeature.getLastInstant()
+	listFeatures=[[longestFeature,t1,t3,moving.Point(0,0)]]
+	# find the features to fill in the beginning of the object existence
+	currentFeature = longestFeature
+	while t1!=obj.getFirstInstant():
+		delta=listFeatures[-1][3]
+		featureSet = [f for f in obj.features if f.existsAtInstant(t1-1)]
+		feat = findNearest(currentFeature,featureSet,t1-1,reverse=True)
+		if feat.existsAtInstant(t1):
+			listFeatures.append([feat,feat.getFirstInstant(),t1-1,(currentFeature.getPositionAtInstant(t1)-feat.getPositionAtInstant(t1))+delta])
+		else:
+			listFeatures.append([feat,feat.getFirstInstant(),t1-1,(currentFeature.getPositionAtInstant(t1)-feat.getPositionAtInstant(t1-1))+delta])
+		currentFeature = feat
+		t1= feat.getFirstInstant()
+	# find the features to fill in the end of the object existence
+	delta=moving.Point(0,0)
+	currentFeature = longestFeature
+	while t3!= obj.getLastInstant():
+		featureSet = [f for f in obj.features if f.existsAtInstant(t3+1)]
+		feat = findNearest(currentFeature,featureSet,t3+1,reverse=False)
+		if feat.existsAtInstant(t3):
+			listFeatures.append([feat,t3+1,feat.getLastInstant(),(currentFeature.getPositionAtInstant(t3)-feat.getPositionAtInstant(t3))+delta])
+		else:
+			listFeatures.append([feat,t3+1,feat.getLastInstant(),(currentFeature.getPositionAtInstant(t3)-feat.getPositionAtInstant(t3+1))+delta])
+		currentFeature = feat
+		t3= feat.getLastInstant()
+		delta=listFeatures[-1][3]
+	return listFeatures
+	
+def buildFeature(obj,num=1):
+	listFeatures= getFeatures(obj)
+	tmp={}
+	delta={}
+	for i in listFeatures:
+		for t in xrange(i[1],i[2]+1):
+			tmp[t]=[i[0],i[3]]
+	newTraj = moving.Trajectory()
+	
+	for instant in obj.getTimeInterval():
+		newTraj.addPosition(tmp[instant][0].getPositionAtInstant(instant)+tmp[instant][1])
+	newFeature= moving.MovingObject(num,timeInterval=obj.getTimeInterval(),positions=newTraj)
+	return newFeature
+
+def getBearing(p1,p2,p3):
+	angle = degrees(atan2(p3.y -p1.y, p3.x -p1.x))
+	bearing1 = (90 - angle) % 360
+	angle2 = degrees(atan2(p2.y -p1.y, p2.x -p1.x))
+	bearing2 = (90 - angle2) % 360	
+	dist= moving.Point.distanceNorm2(p1, p2)
+	return [dist,bearing1,bearing2,bearing2-bearing1]
+	
+def smoothObjectTrajectory(obj,newNum,smoothing=False,plotResults=True,halfWidth=3):
+	results=[]	
+	bearing={}
+	feature= buildFeature(obj,1)
+	for t in feature.getTimeInterval():
+		p1= feature.getPositionAtInstant(t)
+		p2= obj.getPositionAtInstant(t)
+		if t!=feature.getLastInstant():
+			p3= feature.getPositionAtInstant(t+1)
+		else:
+			p1= feature.getPositionAtInstant(t-1)
+			p3= feature.getPositionAtInstant(t)
+		bearing[t]= getBearing(p1,p2,p3)[1]		
+		results.append(getBearing(p1,p2,p3))
+	
+	medianResults=np.median(results,0)
+	dist= medianResults[0]
+	angle= medianResults[3]
+	
+	for i in sorted(bearing.keys()):
+		bearing[i]= bearing[i]+angle
+
+	if smoothing:
+		bearingInput=[]
+		for i in sorted(bearing.keys()):
+			bearingInput.append(bearing[i])
+		bearingOut=utils.filterMovingWindow(bearingInput, halfWidth)
+		for t,i in enumerate(sorted(bearing.keys())):
+			bearing[i]=bearingOut[t]
+		
+		#solve a smoothing problem in case of big drop in computing bearing (0,360)	
+		for t,i in enumerate(sorted(bearing.keys())):
+			if i!= max(bearing.keys()) and abs(bearingInput[t] - bearingInput[t+1])>=340:
+					for x in xrange(max(i-halfWidth,min(bearing.keys())),min(i+halfWidth,max(bearing.keys()))+1):
+						bearing[x]=bearingInput[t-i+x]
+
+	translated = moving.Trajectory()
+	for t in feature.getTimeInterval():
+		p1= feature.getPositionAtInstant(t)
+		p1.x = p1.x + dist*sin(bearing[t]*pi/180)
+		p1.y = p1.y + dist*cos(bearing[t]*pi/180)
+		translated.addPosition(p1)
+		
+	#modify first and last un-smoothed positions (half width)
+	if smoothing:
+		d1= translated[halfWidth]- feature.positions[halfWidth]
+		d2= translated[-halfWidth-1]- feature.positions[-halfWidth-1]
+		for i in xrange(halfWidth):
+			p1 = feature.getPositionAt(i)+d1
+			p2 = feature.getPositionAt(-i-1)+d2
+			translated.setPosition(i,p1)
+			translated.setPosition(-i-1,p2)
+		
+	newObj= moving.MovingObject(newNum,timeInterval=feature.getTimeInterval(),positions=translated,velocities=obj.getVelocities())
+	newObj.features=obj.features
+	newObj.userType=obj.userType
+	if plotResults:
+		plt.figure()
+		plt.title('objects_id = {}'.format(obj.num))
+		newObj.plot('gx-')
+		feature.plot('cx-')
+		obj.plot('rx-')
+	return newObj
--- a/python/offset-trajectories.py	Thu Apr 18 15:29:33 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-#! /usr/bin/env python
-import sys
-
-import ubc_utils
-import utils
-
-if len(sys.argv) < 3:
-    print("Usage: %s filename offsetframes")
-    sys.exit()
-
-nFrames = int(sys.argv[2])
-
-def modifyLines(objectNum, lines):
-    result = lines
-    tmp = lines[0].split(" ")
-    firstInstant = int(tmp[1])+nFrames
-    lastInstant = int(tmp[2])+nFrames
-    tmp[1] = str(firstInstant)
-    tmp[2] = str(lastInstant)
-    result[0] = " ".join(tmp)
-    return result
-
-ubc_utils.modifyTrajectoryFile(modifyLines, sys.argv[1], sys.argv[1]+".new")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/python/pavement.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,314 @@
+#! /usr/bin/env python
+'''Tools for processing and analyzing pavement marking data'''
+
+import utils
+
+import numpy as np
+
+__metaclass__ = type
+
+paintTypes = {0: "Non-existant",
+              1: "Eau",
+              2: "Epoxy",
+              3: "Alkyde",
+              4: "Autre"}
+
+durabilities = {1: 98, #96 to 100
+                2: 85, #75 to 96
+                3: 62, #50 to 75
+                4: 32, #15 to 50
+                5: 7 #0 to 15
+                }
+
+roadFunctionalClasses = {40: "Collectrice",
+                         20: "Nationale",
+                         30: "Regionale",
+                         10: "Autoroute",
+                         60: "Acces ressources",
+                         51: "Local 1",
+                         52: "Local 2",
+                         53: "Local 3",
+                         15: "Aut (PRN)",
+                         25: "Nat (PRN)",
+                         70: "Acces isolees",
+                         99: "Autres"}
+
+def caracteristiques(rtss, maintenanceLevel, rtssWeatherStation, fmr, paintType):
+    '''Computes characteristic data for the RTSS (class rtss) 
+    maintenanceLevel = pylab.csv2rec('C:\Users\Alexandre\Desktop\Projet_maitrise_recherche\BDD_access\\analyse_donnees_deneigement\\exigence_circuits.txt', delimiter = ';')
+    rtssWeatherStation = pylab.csv2rec('C:\Users\Alexandre\Desktop\Projet_maitrise_recherche\stations_environnement_canada\\rtssWeatherStation\juste_pour_rtss_avec_donnees_entretien_hiv\\rtssWeatherStation_EC3.txt', delimiter = ',')
+    fmr = pylab.csv2rec('C:\Users\Alexandre\Desktop\Projet_maitrise_recherche\BDD_access\\analyse_donnees_deneigement\\fmr.txt', delimiter = ';')
+    paintType = pylab.csv2rec('C:\Users\Alexandre\Desktop\Projet_maitrise_recherche\BDD_access\\analyse_donnees_deneigement\\type_peinture.txt', delimiter = ';')
+    '''
+    # determination exigence deneigement
+    if rtss.id in maintenanceLevel['rtss_debut']:
+        for i in range(len(maintenanceLevel)):
+            if maintenanceLevel['rtss_debut'][i] == rtss.id:
+                exigence = maintenanceLevel['exigence'][i]
+    else:
+        exigence = ''
+
+    # determination x/y
+    if rtss.id in rtssWeatherStation['rtss']:
+        for i in range(len(rtssWeatherStation)):		
+            if rtssWeatherStation['rtss'][i] == rtss.id:
+                x_moy = rtssWeatherStation['x_moy'][i]
+                y_moy = rtssWeatherStation['y_moy'][i]
+    else:
+        x_moy, y_moy = '',''	
+
+    # determination info fmr
+    age_revtm, classe_fonct, type_revtm, milieu, djma, pourc_camions, vit_max = [], [], [], [], [], [], []
+    if rtss.id in fmr['rtss_debut']:
+        for i in range(len(fmr)):
+            if fmr['rtss_debut'][i] == rtss.id:
+                age_revtm.append(fmr['age_revtm'][i])
+                classe_fonct.append(fmr['des_clasf_fonct'][i])
+                type_revtm.append(fmr['des_type_revtm'][i])
+                milieu.append(fmr['des_cod_mil'][i])
+                djma.append(fmr['val_djma'][i])
+                pourc_camions.append(fmr['val_pourc_camns'][i])
+                vit_max.append(fmr['val_limt_vitss'][i])
+        age_revtm = utils.mostCommon(age_revtm)
+        classe_fonct = utils.mostCommon(classe_fonct)
+        type_revtm = utils.mostCommon(type_revtm)
+        milieu = utils.mostCommon(milieu)
+        djma = utils.mostCommon(djma)
+        vit_max = utils.mostCommon(vit_max)
+        if vit_max < 0:
+            vit_max = ''
+        pourc_camions = utils.mostCommon(pourc_camions)
+        if pourc_camions == "" or pourc_camions < 0:
+            djma_camions = ""
+        else:
+            djma_camions = pourc_camions*djma/100
+    else:
+        age_revtm, classe_fonct, type_revtm, milieu, djma, djma_camions, vit_max  = '','','','','','',''
+
+    # determination type peinture
+    peinture_rd, peinture_rg, peinture_cl = [], [], []
+    peinture_lrd, peinture_lrg, peinture_lc = 0,0,0
+    if rtss.id in paintType['rtss_debut_orig']:
+        for i in range(len(paintType)):
+            if paintType['rtss_debut_orig'][i] == rtss.id:
+                peinture_rd.append((paintType['peinture_rd'][i]))
+                peinture_rg.append((paintType['peinture_rg'][i]))
+                peinture_cl.append((paintType['peinture_cl'][i]))
+        peinture_lrd = utils.mostCommon(peinture_rd)
+        peinture_lrg = utils.mostCommon(peinture_rg)
+        peinture_lc = utils.mostCommon(peinture_cl)
+    else:
+        peinture_lrd, peinture_lrg, peinture_lc = '','',''		
+
+    return (exigence, x_moy, y_moy, age_revtm, classe_fonct, type_revtm, milieu, djma, djma_camions, vit_max, peinture_lrd, peinture_lrg, peinture_lc)
+
+def winterMaintenanceIndicators(data, startDate, endDate, circuitReference, snowThreshold):
+    '''Computes several winter maintenance indicators
+    data = entretien_hivernal = pylab.csv2rec('C:\Users\Alexandre\Documents\Cours\Poly\Projet\mesures_entretien_hivernal\mesures_deneigement.txt', delimiter = ',')'''
+    import datetime
+    somme_eau, somme_neige, somme_abrasif, somme_sel, somme_lc, somme_lrg, somme_lrd, compteur_premiere_neige, compteur_somme_abrasif = 0,0,0,0,0,0,0,0,0
+
+    if circuitReference in data['ref_circuit']:
+        for i in range(len(data)):
+            if data['ref_circuit'][i] == circuitReference and (data['date'][i] + datetime.timedelta(days = 6)) <= endDate and (data['date'][i] + datetime.timedelta(days = 6)) > startDate:
+                compteur_premiere_neige += float(data['premiere_neige'][i])
+                somme_neige += float(data['neige'][i])
+                somme_eau += float(data['eau'][i])
+                somme_abrasif += float(data['abrasif'][i])
+                somme_sel += float(data['sel'][i])
+                somme_lc += float(data['lc'][i])
+                somme_lrg += float(data['lrg'][i])
+                somme_lrd += float(data['lrd'][i])
+                compteur_somme_abrasif += float(data['autre_abrasif_binaire'][i])
+        if compteur_premiere_neige >= 1:
+            premiere_neige = 1
+        else:
+            premiere_neige = 0
+        if compteur_somme_abrasif >= 1:
+            autres_abrasifs = 1
+        else:
+            autres_abrasifs = 0
+        if somme_neige < snowThreshold:
+            neigeMTQ_sup_seuil = 0
+        else:
+            neigeMTQ_sup_seuil = 1
+    else:
+        somme_eau, somme_neige, somme_abrasif, somme_sel, somme_lc, somme_lrg, somme_lrd, premiere_neige, autres_abrasifs, neigeMTQ_sup_seuil = '','','','','','','','','',''
+
+    return (somme_eau, somme_neige, neigeMTQ_sup_seuil, somme_abrasif, somme_sel, somme_lc, somme_lrg, somme_lrd, premiere_neige, autres_abrasifs)
+
+def weatherIndicators(data, startDate, endDate, snowThreshold, weatherDatatype, minProportionMeasures = 0.):
+    '''Computes the indicators from Environment Canada files
+    (loaded as a recarray using csv2rec in data),
+    between start and end dates (datetime.datetime objects)
+
+    weatherDataType is to indicate Environnement Canada data ('ec') or else MTQ
+    minProportionMeasures is proportion of measures necessary to consider the indicators'''
+    from matplotlib.mlab import find
+    nbre_jours_T_negatif,nbre_jours_gel_degel,pluie_tot,neige_tot,ecart_type_T = 0,0,0,0,0
+    compteur,nbre_jours_gel_consecutifs=0,0
+    tmoys = []
+    seuils_T = [20,15,10,5]
+    deltas_T = [0,0,0,0]
+    startIndex = find(data['date'] == startDate)
+    nDays = int((endDate - startDate).days)+1
+    if len(startIndex) > 0 and startIndex+nDays <= len(data):
+        startIndex = startIndex[0]
+        for i in range(startIndex, startIndex+nDays):
+            if not np.isnan(data['tmax'][i]):
+                tmax = data['tmax'][i]
+            else:
+                tmax = None
+            if not np.isnan(data['tmin'][i]):
+                tmin = data['tmin'][i]
+            else:
+                tmin = None
+            if weatherDatatype == 'ec':
+                if data['pluie_tot'][i] != None and not np.isnan(data['pluie_tot'][i]):
+                    pluie_tot  += data['pluie_tot'][i]
+                if data['neige_tot'][i] != None and not np.isnan(data['neige_tot'][i]):
+                    neige_tot  += data['neige_tot'][i]
+            if tmax != None:
+                if tmax < 0:
+                    nbre_jours_T_negatif += 1
+            if tmax != None and tmin != None:
+                if tmax > 0 and tmin < 0:
+                    nbre_jours_gel_degel += 1
+                for l in range(len(seuils_T)):
+                    if tmax - tmin >=seuils_T[l]:
+                        deltas_T[l] += 1
+            if not np.isnan(data['tmoy'][i]):
+                tmoys.append(data['tmoy'][i])
+            if tmax != None:
+                if tmax < 0:
+                    compteur += 1
+                elif tmax >= 0 and compteur >= nbre_jours_gel_consecutifs:
+                    nbre_jours_gel_consecutifs = compteur
+                    compteur = 0
+                else:
+                    compteur = 0
+            nbre_jours_gel_consecutifs = max(nbre_jours_gel_consecutifs,compteur)
+    if len(tmoys) > 0 and float(len(tmoys))/nDays >= minProportionMeasures:
+        if tmoys != []:
+            ecart_type_T = np.std(tmoys)
+        else:
+            ecart_type = None
+        if neige_tot < snowThreshold:
+            neigeEC_sup_seuil = 0
+        else:
+            neigeEC_sup_seuil = 1
+        return (nbre_jours_T_negatif,nbre_jours_gel_degel, deltas_T, nbre_jours_gel_consecutifs, pluie_tot, neige_tot, neigeEC_sup_seuil, ecart_type_T)
+    else:
+        return [None]*2+[[None]*len(seuils_T)]+[None]*5
+
+def mtqWeatherIndicators(data, startDate, endDate,tmax,tmin,tmoy):
+    print("Deprecated, use weatherIndicators")
+    from matplotlib.mlab import find
+    nbre_jours_T_negatif,nbre_jours_gel_degel,ecart_type_T = 0,0,0
+    compteur,nbre_jours_gel_consecutifs=0,0
+    tmoys = []
+    seuils_T = [20,15,10,5]
+    deltas_T = [0,0,0,0]
+    startIndex = find(data['date'] == startDate)
+    nDays = (endDate - startDate).days+1
+    for i in range(startIndex, startIndex+nDays):
+        if tmax[i] < 0:
+            nbre_jours_T_negatif += 1
+        if tmax[i] > 0 and tmin[i] < 0:
+            nbre_jours_gel_degel += 1
+        for l in range(len(seuils_T)):
+            if tmax[i] - tmin[i] >=seuils_T[l]:
+                deltas_T[l] += 1
+        tmoys.append(tmoy[i])
+        if tmax[i] < 0:
+            compteur += 1
+        elif tmax[i] >= 0 and compteur >= nbre_jours_gel_consecutifs:
+            nbre_jours_gel_consecutifs = compteur
+            compteur = 0
+        else:
+            compteur = 0
+        nbre_jours_gel_consecutifs = max(nbre_jours_gel_consecutifs,compteur)
+        if tmoys != []:
+            ecart_type_T = np.std(tmoys)
+        else:
+            ecart_type = None
+
+    return (nbre_jours_T_negatif,nbre_jours_gel_degel, deltas_T, nbre_jours_gel_consecutifs, ecart_type_T)
+
+class RTSS:
+    '''class for data related to a RTSS:
+    - agregating pavement marking measurements
+    - RTSS characteristics from FMR: pavement type, age, AADT, truck AADT
+    - winter maintenance level from V155
+
+    If divided highway, the RTSS ends with G or D and are distinct: there is no ambiguity
+    - retroreflectivity types: there are CB, RJ and RB
+    If undivided, ending with C
+    - durability is fine: ETAT_MARQG_RG ETAT_MARQG_CL ETAT_MARQG_RD (+SG/SD, but recent)
+    - retroreflectivity: CJ is center line, RB and SB are left/right if DEBUT-FIN>0 or <0
+    '''
+
+    def __init__(self, _id, name, data):
+        self.id = _id
+        self.name = name
+        self.data = data
+
+class MarkingTest:
+    '''class for a test site for a given product
+
+    including the series of measurements over the years'''
+
+    def __init__(self, _id, paintingDate, paintingType, color, data):
+        self.id = _id
+        self.paintingDate = paintingDate
+        self.paintingType = paintingType
+        self.color = color
+        self.data = data
+        self.nMeasures = len(data)
+
+    def getSite(self):
+        return int(self.id[:2])
+
+    def getTestAttributes(self):
+        return [self.paintingType, self.color, self.paintingDate.year]
+
+    def plot(self, measure, options = 'o', dayRatio = 1., **kwargs):
+        from matplotlib.pyplot import plot
+        plot(self.data['jours']/float(dayRatio), 
+             self.data[measure], options, **kwargs)
+
+    def getMarkingMeasures(self, dataLabel):
+        nonZeroIndices = ~np.isnan(self.data[dataLabel])
+        return self.data[nonZeroIndices]['jours'], self.data[nonZeroIndices][dataLabel]
+
+    def plotMarkingMeasures(self, measure, options = 'o', dayRatio = 1., **kwargs):
+        for i in range(1,7):
+            self.plot('{}_{}'.format(measure, i), options, dayRatio, **kwargs)
+
+    def computeMarkingMeasureVariations(self, dataLabel, lanePositions, weatherData, snowThreshold, weatherDataType = 'ec', minProportionMeasures = 0.):
+        '''Computes for each successive measurement
+        lanePositions = None
+        measure variation, initial measure, time duration, weather indicators
+        
+        TODO if measurements per lane, add a variable for lane position (position1 to 6)
+        lanePositions = list of integers (range(1,7))
+        measure variation, initial measure, time duration, lane position1, weather indicators
+        measure variation, initial measure, time duration, lane position2, weather indicators
+        ...'''
+        variationData = []
+        if lanePositions == None:
+            nonZeroIndices = ~np.isnan(self.data[dataLabel])
+            days = self.data[nonZeroIndices]['jours']
+            dates = self.data[nonZeroIndices]['date_mesure']
+            measures = self.data[nonZeroIndices][dataLabel]
+            for i in range(1, len(dates)):
+                nDaysTNegative, nDaysThawFreeze, deltaTemp, nConsecutiveFrozenDays, totalRain, totalSnow, snowAboveThreshold, stdevTemp = weatherIndicators(weatherData, dates[i-1], dates[i], snowThreshold, weatherDataType, minProportionMeasures)
+                if dates[i-1].year+1 == dates[i].year:
+                    winter = 1
+                    if days[i-1]<365:
+                        firstWinter = 1
+                else:
+                    winter = 0
+                    firstWinter = 0
+                variationData.append([measures[i-1]-measures[i], measures[i-1], days[i]-days[i-1], days[i-1], winter, firstWinter, nDaysTNegative, nDaysThawFreeze] + deltaTemp + [nConsecutiveFrozenDays, totalRain, totalSnow, snowAboveThreshold, stdevTemp])
+        return variationData
--- a/python/play-video.py	Thu Apr 18 15:29:33 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-#! /usr/bin/env python
-
-import sys,getopt
-import cvutils
-
-options, args = getopt.getopt(sys.argv[1:], 'hi:f:',['help', 'fps=']) 
-options = dict(options)
-print options
-
-if '--help' in options.keys() or '-h' in options.keys() or len(sys.argv) == 1:
-    print('Usage: '+sys.argv[0]+' --help|-h -i video-filename [-f first_frame] [--fps frame_rate]')
-    sys.exit()
-
-firstFrameNum = 0
-if '-f' in options.keys():
-    firstFrameNum = int(options['-f'])
-
-frameRate = -1
-if '--fps' in options.keys():
-    frameRate = int(options['--fps'])
-
-cvutils.playVideo(options['-i'], firstFrameNum, frameRate)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/python/poly-utils.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,126 @@
+#! /usr/bin/env python
+'''Various utilities to load data saved by the POLY new output(s)'''
+import sys
+import utils
+from moving import  TimeInterval
+import numpy as np
+
+__metaclass__ = type
+from indicators import SeverityIndicator
+
+
+def loadNewInteractions(videoFilename,interactionType,dirname, extension, indicatorsNames, roaduserNum1,roaduserNum2, selectedIndicators=[]):
+    '''Loads interactions from the POLY traffic event format'''
+    from events import Interaction 
+    filename= dirname + videoFilename + extension
+    #filename= dirname + interactionType+ '-' + videoFilename + extension # case of min distance todo: change the saving format to be matched with all outputs
+    file = utils.openCheck(filename)
+    if (not file):
+        return []
+    #interactions = []
+    interactionNum = 0
+    data= np.loadtxt(filename)
+    indicatorFrameNums= data[:,0]
+    inter = Interaction(interactionNum, TimeInterval(indicatorFrameNums[0],indicatorFrameNums[-1]), roaduserNum1, roaduserNum2) 
+    inter.addVideoFilename(videoFilename)
+    inter.addInteractionType(interactionType)
+    for key in indicatorsNames.keys():
+        values= {}
+        for i,t in enumerate(indicatorFrameNums):
+            values[t] = data[i,key]
+        inter.addIndicator(SeverityIndicator(indicatorsNames[key], values))
+    if selectedIndicators !=[]:
+        values= {}
+        for i,t in enumerate(indicatorFrameNums):
+            values[t] = [data[i,index] for index in selectedIndicators]
+        inter.addIndicator(SeverityIndicator('selectedIndicators', values))	
+		
+    #interactions.append(inter)
+    file.close()
+    #return interactions
+    return inter
+
+# Plotting results
+
+frameRate = 15.
+
+# To run in directory that contains the directories that contain the results (Miss-xx and Incident-xx)
+#dirname = '/home/nicolas/Research/Data/kentucky-db/'
+
+interactingRoadUsers = {'Miss/0404052336': [(0,3)] # 0,2 and 1 vs 3
+                        #,
+                        #'Incident/0306022035': [(1,3)]
+                        #,
+                        #'Miss/0208030956': [(4,5),(5,7)]
+                        }
+
+
+def getIndicatorName(filename, withUnit = False):
+    if withUnit:
+        unit = ' (s)'
+    else:
+        unit = ''
+    if 'collision-point' in filename:
+        return 'TTC'+unit
+    elif 'crossing' in filename:
+        return 'pPET'+unit
+    elif 'probability' in filename:
+        return 'P(UEA)'
+
+def getMethodName(fileprefix):
+    if fileprefix == 'constant-velocity':
+        return 'Con. Vel.'
+    elif fileprefix == 'normal-adaptation':
+        return 'Norm. Ad.'
+    elif fileprefix == 'point-set':
+        return 'Pos. Set'
+    elif fileprefix == 'evasive-action':
+        return 'Ev. Act.'
+    elif fileprefix == 'point-set-evasive-action':
+        return 'Pos. Set'
+
+indicator2TimeIdx = {'TTC':2,'pPET':2, 'P(UEA)':3}
+
+def getDataAtInstant(data, i):
+    return data[data[:,2] == i]
+
+def getPointsAtInstant(data, i):
+    return getDataAtInstant(i)[3:5]
+
+def getIndicator(data, roadUserNumbers, indicatorName):
+    if data.ndim ==1:
+        data.shape = (1,data.shape[0])
+
+    # find the order for the roadUserNumbers
+    uniqueObj1 = np.unique(data[:,0])
+    uniqueObj2 = np.unique(data[:,1])
+    found = False
+    if roadUserNumbers[0] in uniqueObj1 and roadUserNumbers[1] in uniqueObj2:
+        objNum1 = roadUserNumbers[0]
+        objNum2 = roadUserNumbers[1]
+        found = True
+    if roadUserNumbers[1] in uniqueObj1 and roadUserNumbers[0] in uniqueObj2:
+        objNum1 = roadUserNumbers[1]
+        objNum2 = roadUserNumbers[0]
+        found = True
+
+    # get subset of data for road user numbers
+    if found:
+        roadUserData = data[np.logical_and(data[:,0] == objNum1, data[:,1] == objNum2),:]
+        if roadUserData.size > 0:
+            time = np.unique(roadUserData[:,indicator2TimeIdx[indicatorName]])
+            values = {}
+            if indicatorName == 'P(UEA)':
+                tmp = roadUserData[:,4]
+                for k,v in zip(time, tmp):
+                    values[k]=v
+                return SeverityIndicator(indicatorName, values, mostSevereIsMax = False, maxValue = 1.), roadUserData
+            else:
+                for i in xrange(time[0],time[-1]+1):
+                    try:
+						tmp = getDataAtInstant(roadUserData, i)
+						values[i] = np.sum(tmp[:,5]*tmp[:,6])/np.sum(tmp[:,5])/frameRate
+                    except IOError:
+                    	values[i] = np.inf
+                return SeverityIndicator(indicatorName, values, mostSevereIsMax = False), roadUserData
+    return None, None
\ No newline at end of file
--- a/python/poly_utils.py	Thu Apr 18 15:29:33 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-#! /usr/bin/env python
-'''Various utilities to load data saved by the POLY new output(s)'''
-import sys
-import utils
-from moving import  TimeInterval
-import numpy as np
-
-__metaclass__ = type
-from indicators import SeverityIndicator
-
-
-def loadNewInteractions(videoFilename,interactionType,dirname, extension, indicatorsNames, roaduserNum1,roaduserNum2, selectedIndicators=[]):
-    '''Loads interactions from the POLY traffic event format'''
-    from events import Interaction 
-    filename= dirname + videoFilename + extension
-    #filename= dirname + interactionType+ '-' + videoFilename + extension # case of min distance todo: change the saving format to be matched with all outputs
-    file = utils.openCheck(filename)
-    if (not file):
-        return []
-    #interactions = []
-    interactionNum = 0
-    data= np.loadtxt(filename)
-    indicatorFrameNums= data[:,0]
-    inter = Interaction(interactionNum, TimeInterval(indicatorFrameNums[0],indicatorFrameNums[-1]), roaduserNum1, roaduserNum2) 
-    inter.addVideoFilename(videoFilename)
-    inter.addInteractionType(interactionType)
-    for key in indicatorsNames.keys():
-        values= {}
-        for i,t in enumerate(indicatorFrameNums):
-            values[t] = data[i,key]
-        inter.addIndicator(SeverityIndicator(indicatorsNames[key], values))
-    if selectedIndicators !=[]:
-        values= {}
-        for i,t in enumerate(indicatorFrameNums):
-            values[t] = [data[i,index] for index in selectedIndicators]
-        inter.addIndicator(SeverityIndicator('selectedIndicators', values))	
-		
-    #interactions.append(inter)
-    file.close()
-    #return interactions
-    return inter
-
-# Plotting results
-
-frameRate = 15.
-
-# To run in directory that contains the directories that contain the results (Miss-xx and Incident-xx)
-#dirname = '/home/nicolas/Research/Data/kentucky-db/'
-
-interactingRoadUsers = {'Miss/0404052336': [(0,3)] # 0,2 and 1 vs 3
-                        #,
-                        #'Incident/0306022035': [(1,3)]
-                        #,
-                        #'Miss/0208030956': [(4,5),(5,7)]
-                        }
-
-
-def getIndicatorName(filename, withUnit = False):
-    if withUnit:
-        unit = ' (s)'
-    else:
-        unit = ''
-    if 'collision-point' in filename:
-        return 'TTC'+unit
-    elif 'crossing' in filename:
-        return 'pPET'+unit
-    elif 'probability' in filename:
-        return 'P(UEA)'
-
-def getMethodName(fileprefix):
-    if fileprefix == 'constant-velocity':
-        return 'Con. Vel.'
-    elif fileprefix == 'normal-adaptation':
-        return 'Norm. Ad.'
-    elif fileprefix == 'point-set':
-        return 'Pos. Set'
-    elif fileprefix == 'evasive-action':
-        return 'Ev. Act.'
-    elif fileprefix == 'point-set-evasive-action':
-        return 'Pos. Set'
-
-indicator2TimeIdx = {'TTC':2,'pPET':2, 'P(UEA)':3}
-
-def getDataAtInstant(data, i):
-    return data[data[:,2] == i]
-
-def getPointsAtInstant(data, i):
-    return getDataAtInstant(i)[3:5]
-
-def getIndicator(data, roadUserNumbers, indicatorName):
-    if data.ndim ==1:
-        data.shape = (1,data.shape[0])
-
-    # find the order for the roadUserNumbers
-    uniqueObj1 = np.unique(data[:,0])
-    uniqueObj2 = np.unique(data[:,1])
-    found = False
-    if roadUserNumbers[0] in uniqueObj1 and roadUserNumbers[1] in uniqueObj2:
-        objNum1 = roadUserNumbers[0]
-        objNum2 = roadUserNumbers[1]
-        found = True
-    if roadUserNumbers[1] in uniqueObj1 and roadUserNumbers[0] in uniqueObj2:
-        objNum1 = roadUserNumbers[1]
-        objNum2 = roadUserNumbers[0]
-        found = True
-
-    # get subset of data for road user numbers
-    if found:
-        roadUserData = data[np.logical_and(data[:,0] == objNum1, data[:,1] == objNum2),:]
-        if roadUserData.size > 0:
-            time = np.unique(roadUserData[:,indicator2TimeIdx[indicatorName]])
-            values = {}
-            if indicatorName == 'P(UEA)':
-                tmp = roadUserData[:,4]
-                for k,v in zip(time, tmp):
-                    values[k]=v
-                return SeverityIndicator(indicatorName, values, mostSevereIsMax = False, maxValue = 1.), roadUserData
-            else:
-                for i in xrange(time[0],time[-1]+1):
-                    try:
-						tmp = getDataAtInstant(roadUserData, i)
-						values[i] = np.sum(tmp[:,5]*tmp[:,6])/np.sum(tmp[:,5])/frameRate
-                    except IOError:
-                    	values[i] = np.inf
-                return SeverityIndicator(indicatorName, values, mostSevereIsMax = False), roadUserData
-    return None, None
\ No newline at end of file
--- a/python/prediction.py	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/prediction.py	Fri Dec 05 12:13:53 2014 -0500
@@ -30,14 +30,14 @@
     def getPredictedSpeeds(self):
         return [so.norm for so in self.predictedSpeedOrientations.values()]
 
-    def draw(self, options = '', withOrigin = False, timeStep = 1, **kwargs):
-        self.getPredictedTrajectory().draw(options, withOrigin, timeStep, **kwargs)
+    def plot(self, options = '', withOrigin = False, timeStep = 1, **kwargs):
+        self.getPredictedTrajectory().plot(options, withOrigin, timeStep, **kwargs)
 
 class PredictedTrajectoryConstant(PredictedTrajectory):
     '''Predicted trajectory at constant speed or acceleration
     TODO generalize by passing a series of velocities/accelerations'''
 
-    def __init__(self, initialPosition, initialVelocity, control = moving.NormAngle(0,0), probability = 1, maxSpeed = None):
+    def __init__(self, initialPosition, initialVelocity, control = moving.NormAngle(0,0), probability = 1., maxSpeed = None):
         self.control = control
         self.maxSpeed = maxSpeed
         self.probability = probability
@@ -47,9 +47,37 @@
     def getControl(self):
         return self.control
 
-class PredictedTrajectoryNormalAdaptation(PredictedTrajectory):
-    '''Random small adaptation of vehicle control '''
-    def __init__(self, initialPosition, initialVelocity, accelerationDistribution, steeringDistribution, probability = 1, maxSpeed = None):
+class PredictedTrajectoryPrototype(PredictedTrajectory):
+    '''Predicted trajectory that follows a prototype trajectory
+    The prototype is in the format of a moving.Trajectory: it could be
+    1. an observed trajectory (extracted from video)
+    2. a generic polyline (eg the road centerline) that a vehicle is supposed to follow
+
+    Prediction can be done
+    1. at constant speed (the instantaneous user speed)
+    2. following the trajectory path, at the speed of the user
+    (applying a constant ratio equal 
+    to the ratio of the user instantaneous speed and the trajectory closest speed)'''
+
+    def __init__(self, initialPosition, initialVelocity, prototypeTrajectory, constantSpeed = True, probability = 1.):
+        self.prototypeTrajectory = prototypeTrajectory
+        self.constantSpeed = constantSpeed
+        self.probability = probability
+        self.predictedPositions = {0: initialPosition}
+        self.predictedSpeedOrientations = {0: moving.NormAngle.fromPoint(initialVelocity)}
+
+    def predictPosition(self, nTimeSteps):
+        if nTimeSteps > 0 and not nTimeSteps in self.predictedPositions.keys():
+            if constantSpeed:
+                # calculate cumulative distance
+                pass
+            else: # see c++ code, calculate ratio
+                pass
+        return self.predictedPositions[nTimeSteps]
+
+class PredictedTrajectoryRandomControl(PredictedTrajectory):
+    '''Random vehicle control: suitable for normal adaptation'''
+    def __init__(self, initialPosition, initialVelocity, accelerationDistribution, steeringDistribution, probability = 1., maxSpeed = None):
         '''Constructor
         accelerationDistribution and steeringDistribution are distributions 
         that return random numbers drawn from them'''
@@ -63,101 +91,6 @@
     def getControl(self):
         return moving.NormAngle(self.accelerationDistribution(),self.steeringDistribution())
 
-class PredictionParameters:
-    def __init__(self, name, maxSpeed):
-        self.name = name
-        self.maxSpeed = maxSpeed
-
-    def __str__(self):
-        return '{0} {1}'.format(self.name, self.maxSpeed)
-
-class ConstantPredictionParameters(PredictionParameters):
-    def __init__(self, maxSpeed):
-        PredictionParameters.__init__(self, 'constant velocity', maxSpeed)
-
-    def generatePredictedTrajectories(self, obj, instant):
-        return [PredictedTrajectoryConstant(obj.getPositionAtInstant(instant), obj.getVelocityAtInstant(instant), 
-                                               maxSpeed = self.maxSpeed)]
-
-class NormalAdaptationPredictionParameters(PredictionParameters):
-    def __init__(self, maxSpeed, nPredictedTrajectories, maxAcceleration, maxSteering):
-        PredictionParameters.__init__(self, 'normal adaptation', maxSpeed)
-        self.nPredictedTrajectories = nPredictedTrajectories
-        self.maxAcceleration = maxAcceleration
-        self.maxSteering = maxSteering
-        self.accelerationDistribution = lambda: random.triangular(-self.maxAcceleration, 
-                                                                   self.maxAcceleration, 0.)
-        self.steeringDistribution = lambda: random.triangular(-self.maxSteering, 
-                                                               self.maxSteering, 0.)
-        
-    def __str__(self):
-        return PredictionParameters.__str__(self)+' {0} {1} {2}'.format(self.nPredictedTrajectories, 
-                                                                           self.maxAcceleration, 
-                                                                           self.maxSteering)
-
-    def generatePredictedTrajectories(self, obj, instant):
-        predictedTrajectories = []
-        for i in xrange(self.nPredictedTrajectories):
-            predictedTrajectories.append(PredictedTrajectoryNormalAdaptation(obj.getPositionAtInstant(instant), 
-                                                                                   obj.getVelocityAtInstant(instant), 
-                                                                                   self.accelerationDistribution, 
-                                                                                   self.steeringDistribution, 
-                                                                                   maxSpeed = self.maxSpeed))
-        return predictedTrajectories
-
-class PointSetPredictionParameters(PredictionParameters):
-    # todo generate several trajectories with normal adaptatoins from each position (feature)
-    def __init__(self, maxSpeed):
-        PredictionParameters.__init__(self, 'point set', maxSpeed)
-    
-    def generatePredictedTrajectories(self, obj, instant):
-        predictedTrajectories = []        
-        features = [f for f in obj.features if f.existsAtInstant(instant)]
-        positions = [f.getPositionAtInstant(instant) for f in features]
-        velocities = [f.getVelocityAtInstant(instant) for f in features]
-        for initialPosition,initialVelocity in zip(positions, velocities):
-            predictedTrajectories.append(PredictedTrajectoryConstant(initialPosition, initialVelocity, 
-                                                                           maxSpeed = self.maxSpeed))
-        return predictedTrajectories
-
-class EvasiveActionPredictionParameters(PredictionParameters):
-    def __init__(self, maxSpeed, nPredictedTrajectories, minAcceleration, maxAcceleration, maxSteering, useFeatures = False):
-        if useFeatures:
-            name = 'point set evasive action'
-        else:
-            name = 'evasive action'
-        PredictionParameters.__init__(self, name, maxSpeed)
-        self.nPredictedTrajectories = nPredictedTrajectories
-        self.minAcceleration = minAcceleration
-        self.maxAcceleration = maxAcceleration
-        self.maxSteering = maxSteering
-        self.useFeatures = useFeatures
-        self.accelerationDistribution = lambda: random.triangular(self.minAcceleration, 
-                                                                  self.maxAcceleration, 0.)
-        self.steeringDistribution = lambda: random.triangular(-self.maxSteering, 
-                                                               self.maxSteering, 0.)
-
-    def __str__(self):
-        return PredictionParameters.__str__(self)+' {0} {1} {2} {3}'.format(self.nPredictedTrajectories, self.minAcceleration, self.maxAcceleration, self.maxSteering)
-
-    def generatePredictedTrajectories(self, obj, instant):
-        predictedTrajectories = []
-        if self.useFeatures:
-            features = [f for f in obj.features if f.existsAtInstant(instant)]
-            positions = [f.getPositionAtInstant(instant) for f in features]
-            velocities = [f.getVelocityAtInstant(instant) for f in features]
-        else:
-            positions = [obj.getPositionAtInstant(instant)]
-            velocities = [obj.getVelocityAtInstant(instant)]
-        for i in xrange(self.nPredictedTrajectories):
-            for initialPosition,initialVelocity in zip(positions, velocities):
-                predictedTrajectories.append(PredictedTrajectoryConstant(initialPosition, 
-                                                                               initialVelocity, 
-                                                                               moving.NormAngle(self.accelerationDistribution(), 
-                                                                                                self.steeringDistribution()), 
-                                                                               maxSpeed = self.maxSpeed))
-        return predictedTrajectories
-
 class SafetyPoint(moving.Point):
     '''Can represent a collision point or crossing zone 
     with respective safety indicator, TTC or pPET'''
@@ -175,9 +108,10 @@
         for p in points:
             out.write('{0} {1} {2} {3}\n'.format(objNum1, objNum2, predictionInstant, p))
 
-def computeExpectedIndicator(points):
-    from numpy import sum
-    return sum([p.indicator*p.probability for p in points])/sum([p.probability for p in points])
+    @staticmethod
+    def computeExpectedIndicator(points):
+        from numpy import sum
+        return sum([p.indicator*p.probability for p in points])/sum([p.probability for p in points])
 
 def computeCollisionTime(predictedTrajectory1, predictedTrajectory2, collisionDistanceThreshold, timeHorizon):
     '''Computes the first instant at which two predicted trajectories are within some distance threshold'''
@@ -190,19 +124,34 @@
         t += 1
     return t, p1, p2
 
-def computeCrossingsCollisionsAtInstant(currentInstant, obj1, obj2, predictionParameters, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False):
-    '''returns the lists of collision points and crossing zones
-    
-    Check: Predicting all the points together, as if they represent the whole vehicle'''
-    predictedTrajectories1 = predictionParameters.generatePredictedTrajectories(obj1, currentInstant)
-    predictedTrajectories2 = predictionParameters.generatePredictedTrajectories(obj2, currentInstant)
+def savePredictedTrajectoriesFigure(currentInstant, obj1, obj2, predictedTrajectories1, predictedTrajectories2, timeHorizon):
+    from matplotlib.pyplot import figure, axis, title, close, savefig
+    figure()
+    for et in predictedTrajectories1:
+        et.predictPosition(timeHorizon)
+        et.plot('rx')
+
+    for et in predictedTrajectories2:
+        et.predictPosition(timeHorizon)
+        et.plot('bx')
+    obj1.plot('r')
+    obj2.plot('b')
+    title('instant {0}'.format(currentInstant))
+    axis('equal')
+    savefig('predicted-trajectories-t-{0}.png'.format(currentInstant))
+    close()
+
+def computeCrossingsCollisionsAtInstant(predictionParams, currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False):
+    '''returns the lists of collision points and crossing zones'''
+    predictedTrajectories1 = predictionParams.generatePredictedTrajectories(obj1, currentInstant)
+    predictedTrajectories2 = predictionParams.generatePredictedTrajectories(obj2, currentInstant)
 
     collisionPoints = []
     crossingZones = []
     for et1 in predictedTrajectories1:
         for et2 in predictedTrajectories2:
             t, p1, p2 = computeCollisionTime(et1, et2, collisionDistanceThreshold, timeHorizon)
-            
+
             if t <= timeHorizon:
                 collisionPoints.append(SafetyPoint((p1+p2).multiply(0.5), et1.probability*et2.probability, t))
             elif computeCZ: # check if there is a crossing zone
@@ -217,29 +166,17 @@
                         #if (et1.predictPosition(t1)-et2.predictPosition(t2)).norm2() < collisionDistanceThreshold:
                         #    cz = (et1.predictPosition(t1)+et2.predictPosition(t2)).multiply(0.5)
                         cz = moving.segmentIntersection(et1.predictPosition(t1), et1.predictPosition(t1+1), et2.predictPosition(t2), et2.predictPosition(t2+1))
-                        if cz:
+                        if cz != None:
                             crossingZones.append(SafetyPoint(cz, et1.probability*et2.probability, abs(t1-t2)))
                         t2 += 1
                     t1 += 1                        
 
     if debug:
-        from matplotlib.pyplot import figure, axis, title
-        figure()
-        for et in predictedTrajectories1:
-            et.predictPosition(timeHorizon)
-            et.draw('rx')
+        savePredictedTrajectoriesFigure(currentInstant, obj1, obj2, predictedTrajectories1, predictedTrajectories2, timeHorizon)
 
-        for et in predictedTrajectories2:
-            et.predictPosition(timeHorizon)
-            et.draw('bx')
-        obj1.draw('r')
-        obj2.draw('b')
-        title('instant {0}'.format(i))
-        axis('equal')
+    return currentInstant, collisionPoints, crossingZones
 
-    return collisionPoints, crossingZones
-    
-def computeCrossingsCollisions(obj1, obj2, predictionParameters, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, timeInterval = None):
+def computeCrossingsCollisions(predictionParams, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, timeInterval = None, nProcesses = 1):
     '''Computes all crossing and collision points at each common instant for two road users. '''
     collisionPoints={}
     crossingZones={}
@@ -247,49 +184,253 @@
         commonTimeInterval = timeInterval
     else:
         commonTimeInterval = obj1.commonTimeInterval(obj2)
-    for i in list(commonTimeInterval)[:-1]: # do not look at the 1 last position/velocities, often with errors
-        collisionPoints[i], crossingZones[i] = computeCrossingsCollisionsAtInstant(i, obj1, obj2, predictionParameters, collisionDistanceThreshold, timeHorizon, computeCZ, debug)
-
-    return collisionPoints, crossingZones
- 
-def computeCollisionProbability(obj1, obj2, predictionParameters, collisionDistanceThreshold, timeHorizon, debug = False, timeInterval = None):
-    '''Computes only collision probabilities
-    Returns for each instant the collision probability and number of samples drawn'''
-    collisionProbabilities = {}
-    if timeInterval:
-        commonTimeInterval = timeInterval
+    if nProcesses == 1:
+        for i in list(commonTimeInterval)[:-1]: # do not look at the 1 last position/velocities, often with errors
+            i, cp, cz = computeCrossingsCollisionsAtInstant(predictionParams, i, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ, debug)
+            if len(cp) != 0:
+                collisionPoints[i] = cp
+            if len(cz) != 0:
+                 crossingZones[i] = cz
     else:
-        commonTimeInterval = obj1.commonTimeInterval(obj2)
-    for i in list(commonTimeInterval)[:-1]:
-        nCollisions = 0
-        print(obj1.num, obj2.num, i)
-        predictedTrajectories1 = predictionParameters.generatePredictedTrajectories(obj1, i)
-        predictedTrajectories2 = predictionParameters.generatePredictedTrajectories(obj2, i)
-        for et1 in predictedTrajectories1:
-            for et2 in predictedTrajectories2:
-                t, p1, p2 = computeCollisionTime(et1, et2, collisionDistanceThreshold, timeHorizon)
-                if t <= timeHorizon:
-                    nCollisions += 1
-        # take into account probabilities ??
-        nSamples = float(len(predictedTrajectories1)*len(predictedTrajectories2))
-        collisionProbabilities[i] = [nSamples, float(nCollisions)/nSamples]
+        from multiprocessing import Pool
+        pool = Pool(processes = nProcesses)
+        jobs = [pool.apply_async(computeCrossingsCollisionsAtInstant, args = (predictionParams, i, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ, debug)) for i in list(commonTimeInterval)[:-1]]
+        #results = [j.get() for j in jobs]
+        #results.sort()
+        for j in jobs:
+            i, cp, cz = j.get()
+            #if len(cp) != 0 or len(cz) != 0:
+            if len(cp) != 0:
+                collisionPoints[i] = cp
+            if len(cz) != 0:
+                crossingZones[i] = cz
+        pool.close()
+    return collisionPoints, crossingZones
+
+class PredictionParameters:
+    def __init__(self, name, maxSpeed):
+        self.name = name
+        self.maxSpeed = maxSpeed
+
+    def __str__(self):
+        return '{0} {1}'.format(self.name, self.maxSpeed)
+
+    def generatePredictedTrajectories(self, obj, instant):
+        return []
+
+    def computeCrossingsCollisionsAtInstant(self, currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False):
+        return computeCrossingsCollisionsAtInstant(self, currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ, debug)
+
+    def computeCrossingsCollisions(self, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, timeInterval = None, nProcesses = 1):
+        return computeCrossingsCollisions(self, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ, debug, timeInterval, nProcesses)
+
+    def computeCollisionProbability(self, obj1, obj2, collisionDistanceThreshold, timeHorizon, debug = False, timeInterval = None):
+        '''Computes only collision probabilities
+        Returns for each instant the collision probability and number of samples drawn'''
+        collisionProbabilities = {}
+        if timeInterval:
+            commonTimeInterval = timeInterval
+        else:
+            commonTimeInterval = obj1.commonTimeInterval(obj2)
+        for i in list(commonTimeInterval)[:-1]:
+            nCollisions = 0
+            predictedTrajectories1 = self.generatePredictedTrajectories(obj1, i)
+            predictedTrajectories2 = self.generatePredictedTrajectories(obj2, i)
+            for et1 in predictedTrajectories1:
+                for et2 in predictedTrajectories2:
+                    t, p1, p2 = computeCollisionTime(et1, et2, collisionDistanceThreshold, timeHorizon)
+                    if t <= timeHorizon:
+                        nCollisions += 1
+            # take into account probabilities ??
+            nSamples = float(len(predictedTrajectories1)*len(predictedTrajectories2))
+            collisionProbabilities[i] = [nSamples, float(nCollisions)/nSamples]
+
+            if debug:
+                savePredictedTrajectoriesFigure(i, obj1, obj2, predictedTrajectories1, predictedTrajectories2, timeHorizon)
+
+        return collisionProbabilities
+
+class ConstantPredictionParameters(PredictionParameters):
+    def __init__(self, maxSpeed):
+        PredictionParameters.__init__(self, 'constant velocity', maxSpeed)
+
+    def generatePredictedTrajectories(self, obj, instant):
+        return [PredictedTrajectoryConstant(obj.getPositionAtInstant(instant), obj.getVelocityAtInstant(instant), 
+                                               maxSpeed = self.maxSpeed)]
+
+class NormalAdaptationPredictionParameters(PredictionParameters):
+    def __init__(self, maxSpeed, nPredictedTrajectories, accelerationDistribution, steeringDistribution, useFeatures = False):
+        '''An example of acceleration and steering distributions is
+        lambda: random.triangular(-self.maxAcceleration, self.maxAcceleration, 0.)
+        '''
+        if useFeatures:
+            name = 'point set normal adaptation'
+        else:
+            name = 'normal adaptation'
+        PredictionParameters.__init__(self, name, maxSpeed)
+        self.nPredictedTrajectories = nPredictedTrajectories
+        self.useFeatures = useFeatures
+        self.accelerationDistribution = accelerationDistribution
+        self.steeringDistribution = steeringDistribution
+        
+    def __str__(self):
+        return PredictionParameters.__str__(self)+' {0} {1} {2}'.format(self.nPredictedTrajectories, 
+                                                                        self.maxAcceleration, 
+                                                                        self.maxSteering)
 
-        if debug:
-            from matplotlib.pyplot import figure, axis, title
+    def generatePredictedTrajectories(self, obj, instant):
+        predictedTrajectories = []
+        if self.useFeatures:
+            features = [f for f in obj.features if f.existsAtInstant(instant)]
+            positions = [f.getPositionAtInstant(instant) for f in features]
+            velocities = [f.getVelocityAtInstant(instant) for f in features]
+        else:
+            positions = [obj.getPositionAtInstant(instant)]
+            velocities = [obj.getVelocityAtInstant(instant)]
+        for i in xrange(self.nPredictedTrajectories):
+            for initialPosition,initialVelocity in zip(positions, velocities):
+                predictedTrajectories.append(PredictedTrajectoryRandomControl(initialPosition, 
+                                                                              initialVelocity, 
+                                                                              self.accelerationDistribution, 
+                                                                              self.steeringDistribution, 
+                                                                              maxSpeed = self.maxSpeed))
+        return predictedTrajectories
+
+class PointSetPredictionParameters(PredictionParameters):
+    # todo generate several trajectories with normal adaptatoins from each position (feature)
+    def __init__(self, maxSpeed):
+        PredictionParameters.__init__(self, 'point set', maxSpeed)
+        #self.nPredictedTrajectories = nPredictedTrajectories
+    
+    def generatePredictedTrajectories(self, obj, instant):
+        predictedTrajectories = []        
+        features = [f for f in obj.features if f.existsAtInstant(instant)]
+        positions = [f.getPositionAtInstant(instant) for f in features]
+        velocities = [f.getVelocityAtInstant(instant) for f in features]
+        #for i in xrange(self.nPredictedTrajectories):
+        for initialPosition,initialVelocity in zip(positions, velocities):
+            predictedTrajectories.append(PredictedTrajectoryConstant(initialPosition, initialVelocity, 
+                                                                     maxSpeed = self.maxSpeed))
+        return predictedTrajectories
+
+class EvasiveActionPredictionParameters(PredictionParameters):
+    def __init__(self, maxSpeed, nPredictedTrajectories, accelerationDistribution, steeringDistribution, useFeatures = False):
+        '''Suggested acceleration distribution may not be symmetric, eg
+        lambda: random.triangular(self.minAcceleration, self.maxAcceleration, 0.)'''
+
+        if useFeatures:
+            name = 'point set evasive action'
+        else:
+            name = 'evasive action'
+        PredictionParameters.__init__(self, name, maxSpeed)
+        self.nPredictedTrajectories = nPredictedTrajectories
+        self.useFeatures = useFeatures
+        self.accelerationDistribution = accelerationDistribution
+        self.steeringDistribution = steeringDistribution
+
+    def __str__(self):
+        return PredictionParameters.__str__(self)+' {0} {1} {2} {3}'.format(self.nPredictedTrajectories, self.minAcceleration, self.maxAcceleration, self.maxSteering)
+
+    def generatePredictedTrajectories(self, obj, instant):
+        predictedTrajectories = []
+        if self.useFeatures:
+            features = [f for f in obj.features if f.existsAtInstant(instant)]
+            positions = [f.getPositionAtInstant(instant) for f in features]
+            velocities = [f.getVelocityAtInstant(instant) for f in features]
+        else:
+            positions = [obj.getPositionAtInstant(instant)]
+            velocities = [obj.getVelocityAtInstant(instant)]
+        for i in xrange(self.nPredictedTrajectories):
+            for initialPosition,initialVelocity in zip(positions, velocities):
+                predictedTrajectories.append(PredictedTrajectoryConstant(initialPosition, 
+                                                                         initialVelocity, 
+                                                                         moving.NormAngle(self.accelerationDistribution(), 
+                                                                                          self.steeringDistribution()), 
+                                                                         maxSpeed = self.maxSpeed))
+        return predictedTrajectories
+
+
+class CVDirectPredictionParameters(PredictionParameters):
+    '''Prediction parameters of prediction at constant velocity
+    using direct computation of the intersecting point'''
+    
+    def __init__(self):
+        PredictionParameters.__init__(self, 'constant velocity (direct computation)', None)
+
+    def computeCrossingsCollisionsAtInstant(self, currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False):
+        collisionPoints = []
+        crossingZones = []
+
+        p1 = obj1.getPositionAtInstant(currentInstant)
+        p2 = obj2.getPositionAtInstant(currentInstant)
+        if (p1-p2).norm2() <= collisionDistanceThreshold:
+            collisionPoints = [SafetyPoint((p1+p1).multiply(0.5), 1., 0.)]
+        else:
+            v1 = obj1.getVelocityAtInstant(currentInstant)
+            v2 = obj2.getVelocityAtInstant(currentInstant)
+            intersection = moving.intersection(p1, p2, v1, v2)
+
+            if intersection != None:
+                dp1 = intersection-p1
+                dp2 = intersection-p2
+                if moving.Point.dot(dp1, v1) > 0 and moving.Point.dot(dp2, v2) > 0: # if the road users are moving towards the intersection
+                    dist1 = dp1.norm2()
+                    dist2 = dp2.norm2()
+                    s1 = v1.norm2()
+                    s2 = v2.norm2()
+                    halfCollisionDistanceThreshold = collisionDistanceThreshold/2.
+                    timeInterval1 = moving.TimeInterval(max(0,dist1-halfCollisionDistanceThreshold)/s1, (dist1+halfCollisionDistanceThreshold)/s1)
+                    timeInterval2 = moving.TimeInterval(max(0,dist2-halfCollisionDistanceThreshold)/s2, (dist2+halfCollisionDistanceThreshold)/s2)
+                    collisionTimeInterval = moving.TimeInterval.intersection(timeInterval1, timeInterval2)
+                    if computeCZ and collisionTimeInterval.empty():
+                        crossingZones = [SafetyPoint(intersection, 1., timeInterval1.distance(timeInterval2))]
+                    else:
+                        collisionPoints = [SafetyPoint(intersection, 1., collisionTimeInterval.center())]
+    
+        if debug and intersection!= None:
+            from matplotlib.pyplot import plot, figure, axis, title
             figure()
-            for et in predictedTrajectories1:
-                et.predictPosition(timeHorizon)
-                et.draw('rx')
-                
-            for et in predictedTrajectories2:
-                et.predictPosition(timeHorizon)
-                et.draw('bx')
-            obj1.draw('r')
-            obj2.draw('b')
-            title('instant {0}'.format(i))
+            plot([p1.x, intersection.x], [p1.y, intersection.y], 'r')
+            plot([p2.x, intersection.x], [p2.y, intersection.y], 'b')
+            intersection.plot()            
+            obj1.plot('r')
+            obj2.plot('b')
+            title('instant {0}'.format(currentInstant))
             axis('equal')
 
-    return collisionProbabilities
+        return collisionPoints, crossingZones
+
+class CVExactPredictionParameters(PredictionParameters):
+    '''Prediction parameters of prediction at constant velocity
+    using direct computation of the intersecting point (solving for the equation'''
+    
+    def __init__(self):
+        PredictionParameters.__init__(self, 'constant velocity (direct exact computation)', None)
+
+    def computeCrossingsCollisionsAtInstant(self, currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False):
+        'TODO add collision point coordinates, compute pPET'
+        #collisionPoints = []
+        #crossingZones = []
+
+        p1 = obj1.getPositionAtInstant(currentInstant)
+        p2 = obj2.getPositionAtInstant(currentInstant)
+        v1 = obj1.getVelocityAtInstant(currentInstant)
+        v2 = obj2.getVelocityAtInstant(currentInstant)
+        intersection = moving.intersection(p1, p2, v1, v2)
+
+        if intersection != None:
+            ttc = moving.Point.timeToCollision(p1, p2, v1, v2, collisionDistanceThreshold)
+            if ttc:
+                return [SafetyPoint(intersection, 1., ttc)], [] # (p1+v1.multiply(ttc)+p2+v2.multiply(ttc)).multiply(0.5)
+            else:
+                return [],[]
+
+####
+# Other Methods
+####
+
+
+
 
 
 if __name__ == "__main__":
--- a/python/requirements.txt	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/requirements.txt	Fri Dec 05 12:13:53 2014 -0500
@@ -4,7 +4,7 @@
 The following libraries are necessary for (sometimes very) specific classes/functions.
 
 CV functionalities (cvutils.py): opencv
-Image functionalities (cvutils.py): Python Image Library
+Image functionalities (cvutils.py): Python Image Library (new version is called Pillow)
 
 Machine learning (ml.py): scipy
 
--- a/python/run-tests.sh	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/run-tests.sh	Fri Dec 05 12:13:53 2014 -0500
@@ -1,8 +1,7 @@
 #!/bin/sh
 # for file in tests/*... basename
-python moving.py
-python storage.py
+for f in ./*.py
+do
+    python $f
+done
 rm nonexistent
-python indicators.py
-python utils.py
-python prediction.py
\ No newline at end of file
--- a/python/storage.py	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/storage.py	Fri Dec 05 12:13:53 2014 -0500
@@ -2,40 +2,52 @@
 # -*- coding: utf-8 -*-
 '''Various utilities to save and load data'''
 
-import utils
-import moving
+import utils, moving, events, indicators
+
+import sqlite3, logging
 
 __metaclass__ = type
 
 
+commentChar = '#'
+
+delimiterChar = '%';
+
 ngsimUserTypes = {'twowheels':1,
                   'car':2,
                   'truck':3}
 
 #########################
-# txt files
-#########################
-
-
-
-#########################
 # Sqlite
 #########################
 
-def writeTrajectoriesToSqlite(objects, outFile, trajectoryType, objectNumbers = -1):
+# utils
+def printDBError(error):
+    print('DB Error: {}'.format(error))
+
+def dropTables(connection, tableNames):
+    'deletes the table with names in tableNames'
+    try:
+        cursor = connection.cursor()
+        for tableName in tableNames:
+            cursor.execute('DROP TABLE IF EXISTS '+tableName)
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+
+# TODO: add test if database connection is open
+# IO to sqlite
+def writeTrajectoriesToSqlite(objects, outputFilename, trajectoryType, objectNumbers = -1):
     """
     This function writers trajectories to a specified sqlite file
     @param[in] objects -> a list of trajectories
     @param[in] trajectoryType -
-    @param[out] outFile -> the .sqlite file containting the written objects
+    @param[out] outputFilename -> the .sqlite file containting the written objects
     @param[in] objectNumber : number of objects loaded
     """
-
-    import sqlite3
-    connection = sqlite3.connect(outFile)
+    connection = sqlite3.connect(outputFilename)
     cursor = connection.cursor()
 
-    schema = "CREATE TABLE \"positions\"(trajectory_id INTEGER,frame_number INTEGER, x_coordinate REAL, y_coordinate REAL, PRIMARY KEY(trajectory_id, frame_number))"
+    schema = "CREATE TABLE IF NOT EXISTS \"positions\"(trajectory_id INTEGER,frame_number INTEGER, x_coordinate REAL, y_coordinate REAL, PRIMARY KEY(trajectory_id, frame_number))"
     cursor.execute(schema)
 
     trajectory_id = 0
@@ -50,7 +62,166 @@
                     query = "insert into positions (trajectory_id, frame_number, x_coordinate, y_coordinate) values (?,?,?,?)"
                     cursor.execute(query,(trajectory_id,frame_number,position.x,position.y))
                     
-    connection.commit()            
+    connection.commit()
+    connection.close()
+	
+def writeFeaturesToSqlite(objects, outputFilename, trajectoryType, objectNumbers = -1):
+    '''write features trajectories maintain trajectory ID,velocities dataset  '''
+    connection = sqlite3.connect(outputFilename)
+    cursor = connection.cursor()
+
+    cursor.execute("CREATE TABLE IF NOT EXISTS \"positions\"(trajectory_id INTEGER,frame_number INTEGER, x_coordinate REAL, y_coordinate REAL, PRIMARY KEY(trajectory_id, frame_number))")
+    cursor.execute("CREATE TABLE IF NOT EXISTS \"velocities\"(trajectory_id INTEGER,frame_number INTEGER, x_coordinate REAL, y_coordinate REAL, PRIMARY KEY(trajectory_id, frame_number))")
+	
+    if trajectoryType == 'feature':
+        if type(objectNumbers) == int and objectNumbers == -1:
+            for trajectory in objects:
+                trajectory_id = trajectory.num
+                frame_number = trajectory.timeInterval.first
+                for position,velocity in zip(trajectory.getPositions(),trajectory.getVelocities()):
+                    cursor.execute("insert into positions (trajectory_id, frame_number, x_coordinate, y_coordinate) values (?,?,?,?)",(trajectory_id,frame_number,position.x,position.y))
+                    cursor.execute("insert into velocities (trajectory_id, frame_number, x_coordinate, y_coordinate) values (?,?,?,?)",(trajectory_id,frame_number,velocity.x,velocity.y))
+                    frame_number += 1
+                    
+    connection.commit()
+    connection.close()
+	
+def writePrototypesToSqlite(prototypes,nMatching, outputFilename):
+    """ prototype dataset is a dictionary with  keys== routes, values== prototypes Ids """
+    connection = sqlite3.connect(outputFilename)
+    cursor = connection.cursor()
+
+    cursor.execute("CREATE TABLE IF NOT EXISTS \"prototypes\"(prototype_id INTEGER,routeIDstart INTEGER,routeIDend INTEGER, nMatching INTEGER, PRIMARY KEY(prototype_id))")
+	
+    for route in prototypes.keys():
+        if prototypes[route]!=[]:
+            for i in prototypes[route]:
+                cursor.execute("insert into prototypes (prototype_id, routeIDstart,routeIDend, nMatching) values (?,?,?,?)",(i,route[0],route[1],nMatching[route][i]))
+                    
+    connection.commit()
+    connection.close()
+	
+def loadPrototypesFromSqlite(filename):
+    """
+    This function loads the prototype file in the database 
+    It returns a dictionary for prototypes for each route and nMatching
+    """
+    prototypes = {}
+    nMatching={}
+
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+
+    try:
+        cursor.execute('SELECT * from prototypes order by prototype_id, routeIDstart,routeIDend, nMatching')
+    except sqlite3.OperationalError as error:
+        utils.printDBError(error)
+        return []
+
+    for row in cursor:
+        route=(row[1],row[2])
+        if route not in prototypes.keys():
+            prototypes[route]=[]
+        prototypes[route].append(row[0])
+        nMatching[row[0]]=row[3]
+
+    connection.close()
+    return prototypes,nMatching
+	
+def writeLabelsToSqlite(labels, outputFilename):
+    """ labels is a dictionary with  keys: routes, values: prototypes Ids
+    """
+    connection = sqlite3.connect(outputFilename)
+    cursor = connection.cursor()
+
+    cursor.execute("CREATE TABLE IF NOT EXISTS \"labels\"(object_id INTEGER,routeIDstart INTEGER,routeIDend INTEGER, prototype_id INTEGER, PRIMARY KEY(object_id))")
+	
+    for route in labels.keys():
+        if labels[route]!=[]:
+            for i in labels[route]:
+                for j in labels[route][i]:
+                    cursor.execute("insert into labels (object_id, routeIDstart,routeIDend, prototype_id) values (?,?,?,?)",(j,route[0],route[1],i))
+                    
+    connection.commit()
+    connection.close()
+	
+def loadLabelsFromSqlite(filename):
+    labels = {}
+
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+
+    try:
+        cursor.execute('SELECT * from labels order by object_id, routeIDstart,routeIDend, prototype_id')
+    except sqlite3.OperationalError as error:
+        utils.printDBError(error)
+        return []
+
+    for row in cursor:
+        route=(row[1],row[2])
+        p=row[3]
+        if route not in labels.keys():
+            labels[route]={}
+        if p not in labels[route].keys():
+            labels[route][p]=[]
+        labels[route][p].append(row[0])
+
+    connection.close()
+    return labels
+
+def writeRoutesToSqlite(Routes, outputFilename):
+    """ This function writes the activity path define by start and end IDs"""
+    connection = sqlite3.connect(outputFilename)
+    cursor = connection.cursor()
+
+    cursor.execute("CREATE TABLE IF NOT EXISTS \"routes\"(object_id INTEGER,routeIDstart INTEGER,routeIDend INTEGER, PRIMARY KEY(object_id))")
+	
+    for route in Routes.keys():
+        if Routes[route]!=[]:
+            for i in Routes[route]:
+                cursor.execute("insert into routes (object_id, routeIDstart,routeIDend) values (?,?,?)",(i,route[0],route[1]))
+                    
+    connection.commit()
+    connection.close()
+	
+def loadRoutesFromSqlite(filename):
+    Routes = {}
+
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+
+    try:
+        cursor.execute('SELECT * from routes order by object_id, routeIDstart,routeIDend')
+    except sqlite3.OperationalError as error:
+        utils.printDBError(error)
+        return []
+
+    for row in cursor:
+        route=(row[1],row[2])
+        if route not in Routes.keys():
+            Routes[route]=[]
+        Routes[route].append(row[0])
+
+    connection.close()
+    return Routes	
+
+def setRoutes(filename, objects):
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+    for obj in objects:
+        cursor.execute('update objects set startRouteID = {} where object_id = {}'.format(obj.startRouteID, obj.getNum()))
+        cursor.execute('update objects set endRouteID = {} where object_id = {}'.format(obj.endRouteID, obj.getNum()))	        
+    connection.commit()
+    connection.close()
+
+def setRoadUserTypes(filename, objects):
+    '''Saves the user types of the objects in the sqlite database stored in filename
+    The objects should exist in the objects table'''
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+    for obj in objects:
+        cursor.execute('update objects set road_user_type = {} where object_id = {}'.format(obj.getUserType(), obj.getNum()))
+    connection.commit()
     connection.close()
 
 def loadPrototypeMatchIndexesFromSqlite(filename):
@@ -60,14 +231,13 @@
     """
     matched_indexes = []
 
-    import sqlite3    
     connection = sqlite3.connect(filename)
     cursor = connection.cursor()
 
     try:
         cursor.execute('SELECT * from prototypes order by prototype_id, trajectory_id_matched')
-    except sqlite3.OperationalError as err:
-        print('DB Error: {0}'.format(err))
+    except sqlite3.OperationalError as error:
+        printDBError(error)
         return []
 
     for row in cursor:
@@ -78,41 +248,51 @@
 
 def getTrajectoryIdQuery(objectNumbers, trajectoryType):
     if trajectoryType == 'feature':
-        statementBeginning = ' where trajectory_id'
+        statementBeginning = 'where trajectory_id '
     elif trajectoryType == 'object':
-        statementBeginning =  ' and OF.object_id'
+        statementBeginning = 'and OF.object_id '
+    elif trajectoryType == 'bbtop' or 'bbbottom':
+        statementBeginning = 'where object_id '
     else:
         print('no trajectory type was chosen')
 
-    if type(objectNumbers) == int:
-        if objectNumbers == -1:
-            query = ''
-        else:
-            query = statementBeginning+' between 0 and {0}'.format(objectNumbers)
+    if objectNumbers is None:
+        query = ''
+    elif type(objectNumbers) == int:
+        query = statementBeginning+'between 0 and {0} '.format(objectNumbers)
     elif type(objectNumbers) == list:
-        query = statementBeginning+' in ('+', '.join([str(n) for n in objectNumbers])+')'
+        query = statementBeginning+'in ('+', '.join([str(n) for n in objectNumbers])+') '
     return query
 
-def loadTrajectoriesFromTable(connection, tableName, trajectoryType, objectNumbers = -1):
+def loadTrajectoriesFromTable(connection, tableName, trajectoryType, objectNumbers = None):
     '''Loads trajectories (in the general sense) from the given table
     can be positions or velocities
 
     returns a moving object'''
-    import sqlite3
-
     cursor = connection.cursor()
 
     try:
+        idQuery = getTrajectoryIdQuery(objectNumbers, trajectoryType)
         if trajectoryType == 'feature':
-            trajectoryIdQuery = getTrajectoryIdQuery(objectNumbers, trajectoryType)
-            cursor.execute('SELECT * from '+tableName+trajectoryIdQuery+' order by trajectory_id, frame_number')
+            queryStatement = 'SELECT * from '+tableName+' '+idQuery+'ORDER BY trajectory_id, frame_number'
+            cursor.execute(queryStatement)
+            logging.debug(queryStatement)
         elif trajectoryType == 'object':
-            objectIdQuery = getTrajectoryIdQuery(objectNumbers, trajectoryType)
-            cursor.execute('SELECT OF.object_id, P.frame_number, avg(P.x_coordinate), avg(P.y_coordinate) from '+tableName+' P, objects_features OF where P.trajectory_id = OF.trajectory_id '+objectIdQuery+' group by object_id, frame_number')
+            queryStatement = 'SELECT OF.object_id, P.frame_number, avg(P.x_coordinate), avg(P.y_coordinate) from '+tableName+' P, objects_features OF where P.trajectory_id = OF.trajectory_id '+idQuery+'group by OF.object_id, P.frame_number ORDER BY OF.object_id, P.frame_number'
+            cursor.execute(queryStatement)
+            logging.debug(queryStatement)
+        elif trajectoryType in ['bbtop', 'bbbottom']:
+            if trajectoryType == 'bbtop':
+                corner = 'top_left'
+            elif trajectoryType == 'bbbottom':
+                corner = 'bottom_right'
+            queryStatement = 'SELECT object_id, frame_number, x_'+corner+', y_'+corner+' FROM '+tableName+' '+trajectoryIdQuery+'ORDER BY object_id, frame_number'
+            cursor.execute(queryStatement)
+            logging.debug(queryStatement)
         else:
             print('no trajectory type was chosen')
-    except sqlite3.OperationalError as err:
-        print('DB Error: {0}'.format(err))
+    except sqlite3.OperationalError as error:
+        printDBError(error)
         return []
 
     objId = -1
@@ -121,78 +301,365 @@
     for row in cursor:
         if row[0] != objId:
             objId = row[0]
-            if obj:
+            if obj != None and obj.length() == obj.positions.length():
                 objects.append(obj)
+            elif obj != None:
+                print('Object {} is missing {} positions'.format(obj.getNum(), int(obj.length())-obj.positions.length()))
             obj = moving.MovingObject(row[0], timeInterval = moving.TimeInterval(row[1], row[1]), positions = moving.Trajectory([[row[2]],[row[3]]]))
         else:
             obj.timeInterval.last = row[1]
             obj.positions.addPositionXY(row[2],row[3])
 
-    if obj:
+    if obj != None and obj.length() == obj.positions.length():
         objects.append(obj)
+    elif obj != None:
+        print('Object {} is missing {} positions'.format(obj.getNum(), int(obj.length())-obj.positions.length()))
 
     return objects
 
-def loadTrajectoriesFromSqlite(filename, trajectoryType, objectNumbers = -1):
-    '''Loads nObjects or the indices in objectNumbers from the database 
-    TODO: load feature numbers and not average feature trajectories
-    TODO: other ways of averaging trajectories (load all points, sorted by frame_number and leave the agregation to be done in python)
-    '''
-    import sqlite3
-    
-    connection = sqlite3.connect(filename) # add test if it open
+def loadUserTypesFromTable(cursor, trajectoryType, objectNumbers):
+    objectIdQuery = getTrajectoryIdQuery(objectNumbers, trajectoryType)
+    if objectIdQuery == '':
+        cursor.execute('SELECT object_id, road_user_type from objects')
+    else:
+        cursor.execute('SELECT object_id, road_user_type from objects where '+objectIdQuery[7:])
+    userTypes = {}
+    for row in cursor:
+        userTypes[row[0]] = row[1]
+    return userTypes
+
+def loadTrajectoriesFromSqlite(filename, trajectoryType, objectNumbers = None):
+    '''Loads the first objectNumbers objects or the indices in objectNumbers from the database'''
+    connection = sqlite3.connect(filename)
 
     objects = loadTrajectoriesFromTable(connection, 'positions', trajectoryType, objectNumbers)
     objectVelocities = loadTrajectoriesFromTable(connection, 'velocities', trajectoryType, objectNumbers)
 
     if len(objectVelocities) > 0:
         for o,v in zip(objects, objectVelocities):
-            if o.num == v.num:
+            if o.getNum() == v.getNum():
                 o.velocities = v.positions
+                o.velocities.duplicateLastPosition() # avoid having velocity shorter by one position than positions
             else:
-                print('Could not match positions {0} with velocities {1}'.format(o.num, v.num))
+                print('Could not match positions {0} with velocities {1}'.format(o.getNum(), v.getNum()))
 
     if trajectoryType == 'object':
         cursor = connection.cursor()
         try:
+            # attribute feature numbers to objects
             objectIdQuery = getTrajectoryIdQuery(objectNumbers, trajectoryType)
-            cursor.execute('SELECT P.trajectory_id, OF.object_id from positions P, objects_features OF where P.trajectory_id = OF.trajectory_id '+objectIdQuery+' group by P.trajectory_id order by OF.object_id')
+            queryStatement = 'SELECT P.trajectory_id, OF.object_id from positions P, objects_features OF where P.trajectory_id = OF.trajectory_id '+objectIdQuery+'group by P.trajectory_id order by OF.object_id' # order is important to group all features per object
+            cursor.execute(queryStatement) 
+            logging.debug(queryStatement)
 
-            # attribute feature numbers to objects
-            objId = -1
             featureNumbers = {}
             for row in cursor:
-                if row[1] != objId:
-                    objId = row[1]
+                objId = row[1]
+                if objId not in featureNumbers:
                     featureNumbers[objId] = [row[0]]
                 else:
                     featureNumbers[objId].append(row[0])
                     
             for obj in objects:
-                obj.featureNumbers = featureNumbers[obj.num]
-        except sqlite3.OperationalError as err:
-            print('DB Error: {0}'.format(err))
-            return []
+                obj.featureNumbers = featureNumbers[obj.getNum()]
+
+            # load userType
+            userTypes = loadUserTypesFromTable(cursor, trajectoryType, objectNumbers)
+            for obj in objects:
+                obj.userType = userTypes[obj.getNum()]
+             
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+            objects = []
 
     connection.close()
     return objects
 
-def removeObjectsFromSqlite(filename):
-    'Removes the objects and object_features tables in the filename'
-    import sqlite3
+def loadGroundTruthFromSqlite(filename, gtType, gtNumbers = None):
+    'Loads bounding box annotations (ground truth) from an SQLite '
     connection = sqlite3.connect(filename)
-    utils.dropTables(connection, ['objects', 'objects_features'])
+    gt = []
+
+    if gtType == 'bb':
+        topCorners = loadTrajectoriesFromTable(connection, 'bounding_boxes', 'bbtop', gtNumbers)
+        bottomCorners = loadTrajectoriesFromTable(connection, 'bounding_boxes', 'bbbottom', gtNumbers)
+        userTypes = loadUserTypesFromTable(connection.cursor(), 'object', gtNumbers) # string format is same as object
+        
+        for t, b in zip(topCorners, bottomCorners):
+            num = t.getNum()
+            if t.getNum() == b.getNum():
+                annotation = moving.BBAnnotation(num, t.getTimeInterval(), t, b, userTypes[num])
+                gt.append(annotation)
+    else:
+        print ('Unknown type of annotation {}'.format(gtType))
+
+    connection.close()
+    return gt
+
+def deleteFromSqlite(filename, dataType):
+    'Deletes (drops) some tables in the filename depending on type of data'
+    import os
+    if os.path.isfile(filename):
+        connection = sqlite3.connect(filename)
+        if dataType == 'object':
+            dropTables(connection, ['objects', 'objects_features'])
+        elif dataType == 'interaction':
+            dropTables(connection, ['interactions', 'indicators'])
+        elif dataType == 'bb':
+            dropTables(connection, ['bounding_boxes'])
+        else:
+            print('Unknown data type {} to delete from database'.format(dataType))
+        connection.close()
+    else:
+        print('{} does not exist'.format(filename))
+
+def createInteractionTable(cursor):
+    cursor.execute('CREATE TABLE IF NOT EXISTS interactions (id INTEGER PRIMARY KEY, object_id1 INTEGER, object_id2 INTEGER, first_frame_number INTEGER, last_frame_number INTEGER, FOREIGN KEY(object_id1) REFERENCES objects(id), FOREIGN KEY(object_id2) REFERENCES objects(id))')
+
+def createIndicatorTables(cursor):
+    # cursor.execute('CREATE TABLE IF NOT EXISTS indicators (id INTEGER PRIMARY KEY, interaction_id INTEGER, indicator_type INTEGER, FOREIGN KEY(interaction_id) REFERENCES interactions(id))')
+    # cursor.execute('CREATE TABLE IF NOT EXISTS indicator_values (indicator_id INTEGER, frame_number INTEGER, value REAL, FOREIGN KEY(indicator_id) REFERENCES indicators(id), PRIMARY KEY(indicator_id, frame_number))')
+    cursor.execute('CREATE TABLE IF NOT EXISTS indicators (interaction_id INTEGER, indicator_type INTEGER, frame_number INTEGER, value REAL, FOREIGN KEY(interaction_id) REFERENCES interactions(id), PRIMARY KEY(interaction_id, indicator_type, frame_number))')
+
+def saveInteraction(cursor, interaction):
+    roadUserNumbers = list(interaction.getRoadUserNumbers())
+    cursor.execute('INSERT INTO interactions VALUES({}, {}, {}, {}, {})'.format(interaction.getNum(), roadUserNumbers[0], roadUserNumbers[1], interaction.getFirstInstant(), interaction.getLastInstant()))
+
+def saveInteractions(filename, interactions):
+    'Saves the interactions in the table'
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+    try:
+        createInteractionTable(cursor)
+        for inter in interactions:
+            saveInteraction(cursor, inter)
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+    connection.commit()
+    connection.close()
+
+def saveIndicator(cursor, interactionNum, indicator):
+    for instant in indicator.getTimeInterval():
+        if indicator[instant]:
+            cursor.execute('INSERT INTO indicators VALUES({}, {}, {}, {})'.format(interactionNum, events.Interaction.indicatorNameToIndices[indicator.getName()], instant, indicator[instant]))
+
+def saveIndicators(filename, interactions, indicatorNames = events.Interaction.indicatorNames):
+    'Saves the indicator values in the table'
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+    try:
+        createInteractionTable(cursor)
+        createIndicatorTables(cursor)
+        for inter in interactions:
+            saveInteraction(cursor, inter)
+            for indicatorName in indicatorNames:
+                indicator = inter.getIndicator(indicatorName)
+                if indicator != None:
+                    saveIndicator(cursor, inter.getNum(), indicator)
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+    connection.commit()
     connection.close()
 
+def loadInteractions(filename):
+    '''Loads interaction and their indicators
+    
+    TODO choose the interactions to load'''
+    interactions = []
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+    try:
+        cursor.execute('select INT.id, INT.object_id1, INT.object_id2, INT.first_frame_number, INT.last_frame_number, IND.indicator_type, IND.frame_number, IND.value from interactions INT, indicators IND where INT.id = IND.interaction_id ORDER BY INT.id, IND.indicator_type')
+        interactionNum = -1
+        indicatorTypeNum = -1
+        tmpIndicators = {}
+        for row in cursor:
+            if row[0] != interactionNum: # save interaction and create new interaction
+                if interactionNum >= 0:
+                    interactions.append(events.Interaction(interactionNum, moving.TimeInterval(row[3],row[4]), roadUserNumbers[0], roadUserNumbers[1]))
+                    interactions[-1].indicators = tmpIndicators
+                    tmpIndicators = {}
+                interactionNum = row[0]
+                roadUserNumbers = row[1:3]
+            if indicatorTypeNum != row[5]:
+                if indicatorTypeNum >= 0:
+                    indicatorName = events.Interaction.indicatorNames[indicatorTypeNum]
+                    tmpIndicators[indicatorName] = indicators.SeverityIndicator(indicatorName, indicatorValues)
+                indicatorTypeNum = row[5]
+                indicatorValues = {row[6]:row[7]}
+            else:
+                indicatorValues[row[6]] = row[7]
+        if interactionNum >= 0:
+            if indicatorTypeNum >= 0:
+                indicatorName = events.Interaction.indicatorNames[indicatorTypeNum]
+                tmpIndicators[indicatorName] = indicators.SeverityIndicator(indicatorName, indicatorValues)
+            interactions.append(events.Interaction(interactionNum, moving.TimeInterval(row[3],row[4]), roadUserNumbers[0], roadUserNumbers[1]))
+            interactions[-1].indicators = tmpIndicators
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+        return []
+    connection.close()
+    return interactions
+# load first and last object instants
+# CREATE TEMP TABLE IF NOT EXISTS object_instants AS SELECT OF.object_id, min(frame_number) as first_instant, max(frame_number) as last_instant from positions P, objects_features OF where P.trajectory_id = OF.trajectory_id group by OF.object_id order by OF.object_id
+
+def createBoundingBoxTable(filename, invHomography = None):
+    '''Create the table to store the object bounding boxes in image space
+    '''
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+    try:
+        cursor.execute('CREATE TABLE IF NOT EXISTS bounding_boxes (object_id INTEGER, frame_number INTEGER, x_top_left REAL, y_top_left REAL, x_bottom_right REAL, y_bottom_right REAL,  PRIMARY KEY(object_id, frame_number))')
+        cursor.execute('INSERT INTO bounding_boxes SELECT object_id, frame_number, min(x), min(y), max(x), max(y) from '
+              '(SELECT object_id, frame_number, (x*{}+y*{}+{})/w as x, (x*{}+y*{}+{})/w as y from '
+              '(SELECT OF.object_id, P.frame_number, P.x_coordinate as x, P.y_coordinate as y, P.x_coordinate*{}+P.y_coordinate*{}+{} as w from positions P, objects_features OF where P.trajectory_id = OF.trajectory_id)) '.format(invHomography[0,0], invHomography[0,1], invHomography[0,2], invHomography[1,0], invHomography[1,1], invHomography[1,2], invHomography[2,0], invHomography[2,1], invHomography[2,2])+
+              'GROUP BY object_id, frame_number')
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+    connection.commit()
+    connection.close()
+
+def loadBoundingBoxTableForDisplay(filename):
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+    boundingBoxes = {} # list of bounding boxes for each instant
+    try:
+        cursor.execute('SELECT name FROM sqlite_master WHERE type=\'table\' AND name=\'bounding_boxes\'')
+        result = [row for row in cursor]
+        if len(result) > 0:
+            cursor.execute('SELECT * FROM bounding_boxes')
+            for row in cursor:
+                boundingBoxes.setdefault(row[1], []).append([moving.Point(row[2], row[3]), moving.Point(row[4], row[5])])
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+        return boundingBoxes
+    connection.close()
+    return boundingBoxes
+
+def loadBoundingBoxTable(filename):
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+    boundingBoxes = []
+    
+    try:
+        pass
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+        return boundingBoxes
+    connection.close()
+    return boundingBoxes
+
+
+#########################
+# txt files
+#########################
+
+def openCheck(filename, option = 'r', quitting = False):
+    '''Open file filename in read mode by default
+    and checks it is open'''
+    try:
+        return open(filename, option)
+    except IOError:
+        print 'File %s could not be opened.' % filename
+        if quitting:
+            from sys import exit
+            exit()
+        return None
+
+def readline(f, commentCharacters = commentChar):
+    '''Modified readline function to skip comments
+    Can take a list of characters or a string (in will work in both)'''
+    s = f.readline()
+    while (len(s) > 0) and s[0] in commentCharacters:
+        s = f.readline()
+    return s.strip()
+
+def getLines(f, commentCharacters = commentChar):
+    '''Gets a complete entry (all the lines) in between delimiterChar.'''
+    dataStrings = []
+    s = readline(f, commentCharacters)
+    while len(s) > 0:
+        dataStrings += [s.strip()]
+        s = readline(f, commentCharacters)
+    return dataStrings
+
+def writeList(filename, l):
+    f = openCheck(filename, 'w')
+    for x in l:
+        f.write('{}\n'.format(x))
+    f.close()
+
+def loadListStrings(filename, commentCharacters = commentChar):
+    f = openCheck(filename, 'r')
+    result = getLines(f, commentCharacters)
+    f.close()
+    return result
+
+def getValuesFromINIFile(filename, option, delimiterChar = '=', commentCharacters = commentChar):
+    values = []
+    for l in loadListStrings(filename, commentCharacters):
+        if l.startswith(option):
+            values.append(l.split(delimiterChar)[1].strip())
+    return values
+
+class FakeSecHead(object):
+    '''Add fake section header [asection]
+
+    from http://stackoverflow.com/questions/2819696/parsing-properties-file-in-python/2819788#2819788
+    use read_file in Python 3.2+
+    '''
+    def __init__(self, fp):
+        self.fp = fp
+        self.sechead = '[main]\n'
+
+    def readline(self):
+        if self.sechead:
+            try: return self.sechead
+            finally: self.sechead = None
+        else: return self.fp.readline()
+
+def loadTrajectoriesFromVissimFile(filename, simulationStepsPerTimeUnit, nObjects = -1, warmUpLastInstant = None):
+    '''Reads data from VISSIM .fzp trajectory file
+    simulationStepsPerTimeUnit is the number of simulation steps per unit of time used by VISSIM
+    for example, there seems to be 5 simulation steps per simulated second in VISSIM, 
+    so simulationStepsPerTimeUnit should be 5, 
+    so that all times correspond to the number of the simulation step (and can be stored as integers)
+
+    Assumed to be sorted over time'''
+    objects = {} # dictionary of objects index by their id
+    firstInstants = {}
+
+    inputfile = openCheck(filename, quitting = True)
+
+    # data = pd.read_csv(filename, skiprows=15, delimiter=';')
+    # skip header: 15 lines + 1
+    line = readline(inputfile, '*$')
+    while len(line) > 0:#for line in inputfile:
+        data = line.strip().split(';')
+        objNum = int(data[1])
+        instant = int(float(data[0])*simulationStepsPerTimeUnit)
+        s = float(data[4])
+        y = float(data[5])
+        lane = data[2]+'_'+data[3]
+        if objNum not in firstInstants:
+            firstInstants[objNum] = instant
+            if warmUpLastInstant == None or firstInstants[objNum] >= warmUpLastInstant:
+                if nObjects < 0 or len(objects) < nObjects:
+                    objects[objNum] = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(instant, instant))
+                    objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory()
+        if (warmUpLastInstant == None or firstInstants[objNum] >= warmUpLastInstant) and objNum in objects:
+            objects[objNum].timeInterval.last = instant
+            objects[objNum].curvilinearPositions.addPositionSYL(s, y, lane)
+        line = readline(inputfile, '*$')
+
+    return objects.values()
+    
 def loadTrajectoriesFromNgsimFile(filename, nObjects = -1, sequenceNum = -1):
     '''Reads data from the trajectory data provided by NGSIM project 
     and returns the list of Feature objects'''
     objects = []
 
-    input = utils.openCheck(filename)
-    if not input:
-        import sys
-        sys.exit()
+    inputfile = openCheck(filename, quitting = True)
 
     def createObject(numbers):
         firstFrameNum = int(numbers[1])
@@ -211,21 +678,21 @@
         obj.followingVehicles = [int(numbers[15])] # following vehicle (after)
         obj.spaceHeadways = [float(numbers[16])] # feet
         obj.timeHeadways = [float(numbers[17])] # seconds
-        obj.curvilinearPositions = moving.Trajectory([[float(numbers[5])],[float(numbers[4])]]) # X is the longitudinal coordinate
+        obj.curvilinearPositions = moving.CurvilinearTrajectory([float(numbers[5])],[float(numbers[4])], obj.laneNums) # X is the longitudinal coordinate
         obj.speeds = [float(numbers[11])]
         obj.size = [float(numbers[8]), float(numbers[9])] # 8 lengh, 9 width # TODO: temporary, should use a geometry object
         return obj
 
-    numbers = input.readline().strip().split()
+    numbers = readline(inputfile).strip().split()
     if (len(numbers) > 0):
         obj = createObject(numbers)
 
-    for line in input:
+    for line in inputfile:
         numbers = line.strip().split()
-        if obj.num != int(numbers[0]):
+        if obj.getNum() != int(numbers[0]):
             # check and adapt the length to deal with issues in NGSIM data
             if (obj.length() != obj.positions.length()):
-                print 'length pb with object %s (%d,%d)' % (obj.num,obj.length(),obj.positions.length())
+                print 'length pb with object %s (%d,%d)' % (obj.getNum(),obj.length(),obj.positions.length())
                 obj.last = obj.getFirstInstant()+obj.positions.length()-1
                 #obj.velocities = utils.computeVelocities(f.positions) # compare norm to speeds ?
             objects.append(obj)
@@ -233,33 +700,33 @@
                 break
             obj = createObject(numbers)
         else:
+            obj.laneNums.append(int(numbers[13]))
             obj.positions.addPositionXY(float(numbers[6]), float(numbers[7]))
-            obj.curvilinearPositions.addPositionXY(float(numbers[5]), float(numbers[4]))
+            obj.curvilinearPositions.addPositionSYL(float(numbers[5]), float(numbers[4]), obj.laneNums[-1])
             obj.speeds.append(float(numbers[11]))
-            obj.laneNums.append(int(numbers[13]))
             obj.precedingVehicles.append(int(numbers[14]))
             obj.followingVehicles.append(int(numbers[15]))
             obj.spaceHeadways.append(float(numbers[16]))
             obj.timeHeadways.append(float(numbers[17]))
 
             if (obj.size[0] != float(numbers[8])):
-                print 'changed length obj %d' % (f.num)
+                print 'changed length obj %d' % (obj.getNum())
             if (obj.size[1] != float(numbers[9])):
-                print 'changed width obj %d' % (f.num)
+                print 'changed width obj %d' % (obj.getNum())
     
-    input.close()
+    inputfile.close()
     return objects
 
-def convertNgsimFile(inFile, outFile, append = False, nObjects = -1, sequenceNum = 0):
+def convertNgsimFile(inputfile, outputfile, append = False, nObjects = -1, sequenceNum = 0):
     '''Reads data from the trajectory data provided by NGSIM project
     and converts to our current format.'''
     if append:
-        out = open(outFile,'a')
+        out = openCheck(outputfile,'a')
     else:
-        out = open(outFile,'w')
+        out = openCheck(outputfile,'w')
     nObjectsPerType = [0,0,0]
 
-    features = loadNgsimFile(inFile, sequenceNum)
+    features = loadNgsimFile(inputfile, sequenceNum)
     for f in features:
         nObjectsPerType[f.userType-1] += 1
         f.write(out)
@@ -268,6 +735,114 @@
         
     out.close()
 
+def writePositionsToCsv(f, obj):
+    timeInterval = obj.getTimeInterval()
+    positions = obj.getPositions()
+    curvilinearPositions = obj.getCurvilinearPositions()
+    for i in xrange(int(obj.length())):
+        p1 = positions[i]
+        s = '{},{},{},{}'.format(obj.num,timeInterval[i],p1.x,p1.y)
+        if curvilinearPositions != None:
+            p2 = curvilinearPositions[i]
+            s += ',{},{}'.format(p2[0],p2[1])
+        f.write(s+'\n')
+
+def writeTrajectoriesToCsv(filename, objects):
+    f = openCheck(filename, 'w')
+    for i,obj in enumerate(objects):
+        writePositionsToCsv(f, obj)
+    f.close()
+
+
+#########################
+# Utils to read .ini type text files for configuration, meta data...
+#########################
+
+class ProcessParameters:
+    '''Class for all parameters controlling data processing: input,
+    method parameters, etc. for tracking, classification and safety
+
+    Note: framerate is already taken into account'''
+
+    def loadConfigFile(self, filename):
+        from ConfigParser import ConfigParser
+        from numpy import loadtxt
+        from os import path
+
+        config = ConfigParser()
+        config.readfp(FakeSecHead(openCheck(filename)))
+        self.sectionHeader = config.sections()[0]
+        # Tracking/display parameters
+        self.videoFilename = config.get(self.sectionHeader, 'video-filename')
+        self.databaseFilename = config.get(self.sectionHeader, 'database-filename')
+        self.homographyFilename = config.get(self.sectionHeader, 'homography-filename')
+        if (path.exists(self.homographyFilename)):
+            self.homography = loadtxt(self.homographyFilename)
+        else:
+            self.homography = None
+        self.intrinsicCameraFilename = config.get(self.sectionHeader, 'intrinsic-camera-filename')
+        if (path.exists(self.intrinsicCameraFilename)):
+            self.intrinsicCameraMatrix = loadtxt(self.intrinsicCameraFilename)
+        else:
+            self.intrinsicCameraMatrix = None
+        distortionCoefficients = getValuesFromINIFile(filename, 'distortion-coefficients', '=')        
+        self.distortionCoefficients = [float(x) for x in distortionCoefficients]
+        self.undistortedImageMultiplication  = config.getfloat(self.sectionHeader, 'undistorted-size-multiplication')
+        self.undistort = config.getboolean(self.sectionHeader, 'undistort')
+        self.firstFrameNum = config.getint(self.sectionHeader, 'frame1')
+        self.videoFrameRate = config.getfloat(self.sectionHeader, 'video-fps')
+
+        # Classification parameters
+        
+
+        # Safety parameters
+        self.maxPredictedSpeed = config.getfloat(self.sectionHeader, 'max-predicted-speed')/3.6/self.videoFrameRate
+        self.predictionTimeHorizon = config.getfloat(self.sectionHeader, 'prediction-time-horizon')*self.videoFrameRate
+        self.collisionDistance = config.getfloat(self.sectionHeader, 'collision-distance')
+        self.crossingZones = config.getboolean(self.sectionHeader, 'crossing-zones')
+        self.predictionMethod = config.get(self.sectionHeader, 'prediction-method')
+        self.nPredictedTrajectories = config.getint(self.sectionHeader, 'npredicted-trajectories')
+        self.maxNormalAcceleration = config.getfloat(self.sectionHeader, 'max-normal-acceleration')/self.videoFrameRate**2
+        self.maxNormalSteering = config.getfloat(self.sectionHeader, 'max-normal-steering')/self.videoFrameRate
+        self.minExtremeAcceleration = config.getfloat(self.sectionHeader, 'min-extreme-acceleration')/self.videoFrameRate**2
+        self.maxExtremeAcceleration = config.getfloat(self.sectionHeader, 'max-extreme-acceleration')/self.videoFrameRate**2
+        self.maxExtremeSteering = config.getfloat(self.sectionHeader, 'max-extreme-steering')/self.videoFrameRate
+        self.useFeaturesForPrediction = config.getboolean(self.sectionHeader, 'use-features-prediction')
+
+    def __init__(self, filename = None):
+        if filename != None:
+            self.loadConfigFile(filename)
+
+class SceneParameters:
+    def __init__(self, config, sectionName):
+        from ConfigParser import NoOptionError
+        from ast import literal_eval
+        try:
+            self.sitename = config.get(sectionName, 'sitename')
+            self.databaseFilename = config.get(sectionName, 'data-filename')
+            self.homographyFilename = config.get(sectionName, 'homography-filename')
+            self.calibrationFilename = config.get(sectionName, 'calibration-filename') 
+            self.videoFilename = config.get(sectionName, 'video-filename')
+            self.frameRate = config.getfloat(sectionName, 'framerate')
+            self.date = datetime.strptime(config.get(sectionName, 'date'), datetimeFormat) # 2011-06-22 11:00:39
+            self.translation = literal_eval(config.get(sectionName, 'translation')) #         = [0.0, 0.0]
+            self.rotation = config.getfloat(sectionName, 'rotation')
+            self.duration = config.getint(sectionName, 'duration')
+        except NoOptionError as e:
+            print(e)
+            print('Not a section for scene meta-data')
+
+    @staticmethod
+    def loadConfigFile(filename):
+        from ConfigParser import ConfigParser
+        config = ConfigParser()
+        config.readfp(openCheck(filename))
+        configDict = dict()
+        for sectionName in config.sections():
+            configDict[sectionName] = SceneParameters(config, sectionName) 
+        return configDict
+
+
 if __name__ == "__main__":
     import doctest
     import unittest
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/python/tests/events.txt	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,11 @@
+>>> from events import *
+>>> from moving import MovingObject, TimeInterval
+
+>>> objects = [MovingObject(num = i, timeInterval = TimeInterval(0,10)) for i in range(10)]
+>>> interactions = createInteractions(objects)
+>>> len([i for i in interactions if len(i.roadUserNumbers) == 1])
+0
+>>> objects2 = [MovingObject(num = i, timeInterval = TimeInterval(0,10)) for i in range(100, 110)]
+>>> interactions = createInteractions(objects, objects2)
+>>> len([i for i in interactions if len(i.roadUserNumbers) == 1])
+0
--- a/python/tests/moving.txt	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/tests/moving.txt	Fri Dec 05 12:13:53 2014 -0500
@@ -42,6 +42,10 @@
 
 >>> Point(3,4)-Point(1,7)
 (2.000000,-3.000000)
+>>> -Point(1,2)
+(-1.000000,-2.000000)
+>>> Point(1,2).multiply(0.5)
+(0.500000,1.000000)
 
 >>> Point(3,2).norm2Squared()
 13
@@ -49,19 +53,29 @@
 >>> Point.distanceNorm2(Point(3,4),Point(1,7))
 3.605551275463989
 
->>> Point(3,2).inPolygon([Point(0,0),Point(1,0),Point(1,1),Point(0,1)])
+>>> Point(3,2).inPolygonNoShapely(np.array([[0,0],[1,0],[1,1],[0,1]]))
 False
->>> Point(3,2).inPolygon([Point(0,0),Point(4,0),Point(4,3),Point(0,3)])
+>>> Point(3,2).inPolygonNoShapely(np.array([[0,0],[4,0],[4,3],[0,3]]))
 True
 
 >>> predictPositionNoLimit(10, Point(0,0), Point(1,1)) # doctest:+ELLIPSIS
 ((1.0...,1.0...), (10.0...,10.0...))
 
->>> segmentIntersection(Point(0,0),Point(1,1), Point(0,1), Point(1,2))
->>> segmentIntersection(Point(0,1),Point(1,0), Point(0,2), Point(2,1))
->>> segmentIntersection(Point(0,0),Point(2,0), Point(1,-1),Point(1,1))
-(1.000000,0.000000)
->>> segmentIntersection(Point(0,1),Point(2,0),Point(1,1),Point(1,2))
+>>> segmentIntersection(Point(0,0), Point(0,1), Point(1,1), Point(2,3))
+>>> segmentIntersection(Point(0,1), Point(0,3), Point(1,0), Point(3,1))
+>>> segmentIntersection(Point(0.,0.), Point(2.,2.), Point(0.,2.), Point(2.,0.))
+(1.000000,1.000000)
+>>> segmentIntersection(Point(0,1), Point(1,2), Point(2,0), Point(3,2))
+
+>>> left = Trajectory.fromPointList([(92.291666666666686, 102.99239033124439), (56.774193548387103, 69.688898836168306)])
+>>> middle = Trajectory.fromPointList([(87.211021505376351, 93.390778871978512), (59.032258064516128, 67.540286481647257)])
+>>> right = Trajectory.fromPointList([(118.82392473118281, 115.68263205013426), (63.172043010752688, 66.600268576544309)])
+>>> alignments = [left, middle, right]
+>>> for a in alignments: a.computeCumulativeDistances()
+>>> getSYfromXY(Point(73, 82), alignments)
+[1, 0, (73.819977,81.106170), 18.172277808821125, 18.172277808821125, 1.2129694042343868]
+>>> getSYfromXY(Point(78, 83), alignments, 0.5)
+[1, 0, (77.033188,84.053889), 13.811799123113715, 13.811799123113715, -1.4301775140225983]
 
 >>> Trajectory().length()
 0
@@ -70,12 +84,79 @@
 True
 >>> t1[1]
 (1.500000,3.500000)
->>> t1.getTrajectoryInPolygon(np.array([[0,0],[4,0],[4,3],[0,3]]))
+>>> t1.getTrajectoryInPolygonNoShapely(np.array([[0,0],[4,0],[4,3],[0,3]]))
 (0.500000,0.500000)
->>> t1.getTrajectoryInPolygon(np.array([[10,10],[14,10],[14,13],[10,13]])).length()
+>>> t1.getTrajectoryInPolygonNoShapely(np.array([[10,10],[14,10],[14,13],[10,13]])).length()
 0
 
->>> Trajectory.norm2LCSS(t1, t1, 0.1)
+>>> t1.differentiate()
+(1.000000,3.000000) (1.000000,3.000000)
+>>> t1.differentiate(True)
+(1.000000,3.000000) (1.000000,3.000000) (1.000000,3.000000)
+>>> t1 = Trajectory([[0.5,1.5,3.5],[0.5,2.5,7.5]])
+>>> t1.differentiate()
+(1.000000,2.000000) (2.000000,5.000000)
+
+>>> t1.computeCumulativeDistances()
+>>> t1.getDistance(0)
+2.23606797749979
+>>> t1.getDistance(1)
+5.385164807134504
+>>> t1.getDistance(2)
+Index 2 beyond trajectory length 3-1
+>>> t1.getCumulativeDistance(0)
+0.0
+>>> t1.getCumulativeDistance(1)
+2.23606797749979
+>>> t1.getCumulativeDistance(2)
+7.6212327846342935
+>>> t1.getCumulativeDistance(3)
+Index 3 beyond trajectory length 3
+
+
+>>> from utils import LCSS
+>>> lcss = LCSS(lambda x,y: Point.distanceNorm2(x,y) <= 0.1)
+>>> Trajectory.lcss(t1, t1, lcss)
+3
+>>> lcss = LCSS(lambda p1, p2: (p1-p2).normMax() <= 0.1)
+>>> Trajectory.lcss(t1, t1, lcss)
 3
->>> Trajectory.normMaxLCSS(t1, t1, 0.1)
-3
+
+>>> p1=Point(0,0)
+>>> p2=Point(1,0)
+>>> v1 = Point(0.1,0.1)
+>>> v2 = Point(-0.1, 0.1)
+>>> abs(Point.timeToCollision(p1, p2, v1, v2, 0.)-5.0) < 0.00001
+True
+>>> abs(Point.timeToCollision(p1, p2, v1, v2, 0.1)-4.5) < 0.00001
+True
+>>> p1=Point(0,1)
+>>> p2=Point(1,0)
+>>> v1 = Point(0,0.1)
+>>> v2 = Point(0.1, 0)
+>>> Point.timeToCollision(p1, p2, v1, v2, 0.) == None
+True
+>>> Point.timeToCollision(p2, p1, v2, v1, 0.) == None
+True
+>>> Point.midPoint(p1, p2)
+(0.500000,0.500000)
+
+>>> t = CurvilinearTrajectory(S = [1., 2., 3., 5.], Y = [0.5, 0.5, 0.6, 0.7], lanes = ['1']*4)
+>>> t.differentiate() # doctest:+ELLIPSIS
+[1.0, 0.0, '1'] [1.0, 0.099..., '1'] [2.0, 0.099..., '1']
+>>> t.differentiate(True) # doctest:+ELLIPSIS
+[1.0, 0.0, '1'] [1.0, 0.099..., '1'] [2.0, 0.099..., '1'] [2.0, 0.099..., '1']
+>>> t = CurvilinearTrajectory(S = [1.], Y = [0.5], lanes = ['1'])
+>>> t.differentiate().empty()
+True
+
+>>> o1 = MovingObject(positions = Trajectory([[0]*3,[2]*3]), velocities = Trajectory([[0]*3,[1]*3]))
+>>> o1.classifyUserTypeSpeedMotorized(0.5, np.median)
+>>> userTypeNames[o1.getUserType()]
+'car'
+>>> o1.classifyUserTypeSpeedMotorized(0.5, np.mean)
+>>> userTypeNames[o1.getUserType()]
+'car'
+>>> o1.classifyUserTypeSpeedMotorized(1.5, np.median)
+>>> userTypeNames[o1.getUserType()]
+'pedestrian'
--- a/python/tests/prediction.txt	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/tests/prediction.txt	Fri Dec 05 12:13:53 2014 -0500
@@ -18,7 +18,7 @@
 >>> import random
 >>> acceleration = lambda: random.uniform(-0.5,0.5)
 >>> steering = lambda: random.uniform(-0.1,0.1)
->>> et = prediction.PredictedTrajectoryNormalAdaptation(moving.Point(0,0),moving.Point(1,1), acceleration, steering, maxSpeed = 2)
+>>> et = prediction.PredictedTrajectoryRandomControl(moving.Point(0,0),moving.Point(1,1), acceleration, steering, maxSpeed = 2)
 >>> p = et.predictPosition(500)
 >>> from numpy import max
 >>> max(et.getPredictedSpeeds()) <= 2.
--- a/python/tests/storage.txt	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/tests/storage.txt	Fri Dec 05 12:13:53 2014 -0500
@@ -1,9 +1,31 @@
 >>> from storage import *
+>>> from StringIO import StringIO
 
->>> loadPrototypeMatchIndexesFromSqlite("nonexistent")
+>>> f = openCheck('non_existant_file.txt')
+File non_existant_file.txt could not be opened.
+
+>>> nonexistentFilename = "nonexistent"
+>>> loadPrototypeMatchIndexesFromSqlite(nonexistentFilename)
 DB Error: no such table: prototypes
 []
->>> loadTrajectoriesFromSqlite("nonexistent", 'feature')
+>>> loadTrajectoriesFromSqlite(nonexistentFilename, 'feature')
 DB Error: no such table: positions
 DB Error: no such table: velocities
 []
+>>> from os import remove
+>>> remove(nonexistentFilename)
+
+>>> strio = StringIO('# asdlfjasdlkj0\nsadlkfjsdlakjf')
+>>> readline(strio)
+'sadlkfjsdlakjf'
+>>> strio = StringIO('# asdlfjasdlkj0\nsadlkfjsdlakjf')
+>>> readline(strio, ['#'])
+'sadlkfjsdlakjf'
+>>> strio = StringIO('# asdlfjasdlkj0\nsadlkfjsdlakjf')
+>>> readline(strio, ['%'])
+'# asdlfjasdlkj0'
+>>> strio = StringIO('# asdlfjasdlkj0\nsadlkfjsdlakjf')
+>>> readline(strio, '%*$')
+'# asdlfjasdlkj0'
+>>> readline(strio, '%#')
+'sadlkfjsdlakjf'
--- a/python/tests/utils.txt	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/tests/utils.txt	Fri Dec 05 12:13:53 2014 -0500
@@ -20,9 +20,6 @@
 >>> inBetween(1,2,0)
 False
 
->>> f = openCheck('non_existant_file.txt')
-File non_existant_file.txt could not be opened.
-
 >>> removeExtension('test-adfasdf.asdfa.txt')
 'test-adfasdf.asdfa'
 >>> removeExtension('test-adfasdf')
@@ -44,10 +41,79 @@
 >>> stepPlot([3, 5, 7, 8], 1, 10, 0)
 ([1, 3, 3, 5, 5, 7, 7, 8, 8, 10], [0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
 
->>> LCSS(range(5), range(5), 0.1, lambda x,y:abs(x-y))
+>>> mostCommon(['a','b','c','b'])
+'b'
+>>> mostCommon(['a','b','c','b', 'c'])
+'b'
+>>> mostCommon(range(10)+[1])
+1
+>>> mostCommon([range(2), range(4), range(2)])
+[0, 1]
+
+>>> lcss = LCSS(lambda x,y: abs(x-y) <= 0.1)
+>>> lcss.compute(range(5), range(5))
 5
->>> LCSS(range(1,5), range(5), 0.1, lambda x,y:abs(x-y))
+>>> lcss.compute(range(1,5), range(5))
+4
+>>> lcss.compute(range(5,10), range(5))
+0
+>>> lcss.compute(range(5), range(10))
+5
+>>> lcss.compute(range(5), range(10), 2)
+5
+>>> lcss.similarityFunc = lambda x,y: x == y
+>>> lcss.compute(['a','b','c'], ['a','b','c', 'd'])
+3
+>>> lcss.computeNormalized(['a','b','c'], ['a','b','c', 'd']) #doctest: +ELLIPSIS
+1.0
+>>> lcss.computeNormalized(['a','b','c','x'], ['a','b','c', 'd']) #doctest: +ELLIPSIS
+0.75
+>>> lcss.compute(['a','b','c'], ['a','b','c', 'd'])
+3
+>>> lcss.compute(['a','x','b','c'], ['a','b','c','d','x'])
+3
+>>> lcss.compute(['a','b','c','x','d'], ['a','b','c','d','x'])
 4
->>> LCSS(range(5,10), range(5), 0.1, lambda x,y:abs(x-y))
+>>> lcss.delta = 1
+>>> lcss.compute(['a','b','c'], ['a','b','x','x','c'])
+2
+
+>>> lcss.delta = float('inf')
+>>> lcss.compute(['a','b','c'], ['a','b','c', 'd'], computeSubSequence = True)
+3
+>>> lcss.subSequenceIndices
+[(0, 0), (1, 1), (2, 2)]
+>>> lcss.compute(['a','b','c'], ['x','a','b','c'], computeSubSequence = True)
+3
+>>> lcss.subSequenceIndices
+[(0, 1), (1, 2), (2, 3)]
+>>> lcss.compute(['a','g','b','c'], ['a','b','c', 'd'], computeSubSequence = True)
+3
+>>> lcss.subSequenceIndices
+[(0, 0), (2, 1), (3, 2)]
+
+>>> alignedLcss = LCSS(lambda x,y:(abs(x-y) <= 0.1), delta = 2, aligned = True)
+>>> alignedLcss.compute(range(5), range(5))
+5
+>>> alignedLcss.compute(range(1,5), range(5))
+4
+
+>>> alignedLcss.compute(range(5,10), range(10))
+5
+
+>>> lcss.delta = 2
+>>> lcss.compute(range(5,10), range(10))
 0
-
+>>> alignedLcss.delta = 6
+>>> alignedLcss.compute(range(5), range(5))
+5
+>>> alignedLcss.compute(range(5), range(6))
+5
+>>> lcss.delta = 10
+>>> alignedLcss.compute(range(1,7), range(6))
+5
+>>> lcss = LCSS(lambda x,y: x == y, delta = 2, aligned = True)
+>>> lcss.compute(range(20), [2,4,6,7,8,9,11,13], True)
+8
+>>> lcss.subSequenceIndices
+[(2, 0), (4, 1), (6, 2), (7, 3), (8, 4), (9, 5), (11, 6), (13, 7)]
--- a/python/traffic_engineering.py	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/traffic_engineering.py	Fri Dec 05 12:13:53 2014 -0500
@@ -12,6 +12,20 @@
 # Simulation
 #########################
 
+def generateTimeHeadways(meanTimeHeadway, simulationTime):
+    '''Generates the time headways between arrivals 
+    given the meanTimeHeadway and the negative exponential distribution
+    over a time interval of length simulationTime (assumed to be in same time unit as headway'''
+    from random import expovariate
+    headways = []
+    totalTime = 0
+    flow = 1/meanTimeHeadway
+    while totalTime < simulationTime:
+        h = expovariate(flow)
+        headways.append(h)
+        totalTime += h
+    return headways
+
 class Vehicle:
     '''Generic vehicle class
     1D coordinates for now'''
@@ -159,11 +173,16 @@
             self.types = types
             self.proportions = proportions
             self.equivalents = equivalents
-            self.nLanes = nLanes # unused
+            self.nLanes = nLanes
         else:
             print('Proportions do not sum to 1')
             pass
 
+    def checkProtected(self, opposedThroughMvt):
+        '''Checks if this left movement should be protected,
+        ie if one of the main two conditions on left turn is verified'''
+        return self.volume >= 200 or self.volume*opposedThroughMvt.volume/opposedThroughMvt.nLanes > 50000
+
     def getPCUVolume(self):
         '''Returns the passenger-car equivalent for the input volume'''
         v = 0
@@ -182,15 +201,6 @@
     def getTVUVolume(self):
         return self.mvtEquivalent*self.volume.getPCUVolume()    
 
-class IntersectionApproach: # should probably not be used
-    def __init__(self, leftTurnVolume, throughVolume, rightTurnVolume):
-        self.leftTurnVolume = leftTurnVolume
-        self.throughVolume = throughVolume
-        self.rightTurnVolume = rightTurnVolume
-
-    def getTVUVolume(self, leftTurnEquivalent = 1, throughEquivalent = 1, rightTurnEquivalent = 1):
-        return self.leftTurnVolume.getPCUVolume()*leftTurnEquivalent+self.throughVolume.getPCUVolume()*throughEquivalent+self.rightTurnVolume.getPCUVolume()*rightTurnEquivalent
-
 class LaneGroup:
     '''Class that represents a group of mouvements'''
 
@@ -204,11 +214,6 @@
     def getCharge(self, saturationVolume):
         return self.getTVUVolume()/(self.nLanes*saturationVolume)
 
-def checkProtectedLeftTurn(leftMvt, opposedThroughMvt):
-    '''Checks if one of the main two conditions on left turn is verified
-    The lane groups should contain left and through movement'''
-    return leftMvt.volume >= 200 or leftMvt.volume*opposedThroughMvt.volume/opposedThroughMvt.nLanes > 50000
-
 def optimalCycle(lostTime, criticalCharge):
     return (1.5*lostTime+5)/(1-criticalCharge)
 
@@ -226,9 +231,7 @@
         self.saturationVolume = saturationVolume
 
     def computeCriticalCharges(self):
-        self.criticalCharges = []
-        for phase in self.phases:
-            self.criticalCharges.append(max([lg.getCharge(self.saturationVolume) for lg in phase]))
+        self.criticalCharges = [max([lg.getCharge(self.saturationVolume) for lg in phase]) for phase in self.phases]
         self.criticalCharge = sum(self.criticalCharges)
         
     def computeOptimalCycle(self):
@@ -242,7 +245,7 @@
         return self.C
 
     def computeEffectiveGreen(self):
-        from numpy import round
+        #from numpy import round
         #self.computeCycle() # in case it was not done before
         effectiveGreenTime = self.C-self.lostTime
         self.effectiveGreens = [round(c*effectiveGreenTime/self.criticalCharge,1) for c in self.criticalCharges]
@@ -263,6 +266,15 @@
     '''Computes the uniform delay'''
     return 0.5*cycleLength*(1-float(effectiveGreen)/cycleLength)/(1-float(effectiveGreen*saturationDegree)/cycleLength)
 
+def overflowDelay(T, X, c, k=0.5, I=1):
+    '''Computes the overflow delay (HCM)
+    T in hours
+    c capacity of the lane group
+    k default for fixed time signal
+    I=1 for isolated intersection (Poisson arrival)'''
+    from math import sqrt
+    return 900*T*(X - 1 + sqrt((X - 1)**2 + 8*k*I*X/(c*T)))
+
 #########################
 # misc
 #########################
--- a/python/ubc_utils.py	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/ubc_utils.py	Fri Dec 05 12:13:53 2014 -0500
@@ -1,7 +1,7 @@
 #! /usr/bin/env python
 '''Various utilities to load data saved by the UBC tool(s)'''
 
-import utils
+import utils, events
 from moving import MovingObject, TimeInterval, Trajectory
 
 __metaclass__ = type
@@ -12,12 +12,18 @@
                  'contoursequence']
 
 severityIndicatorNames = ['Distance',
-                          'Cosine',
+                          'Collision Course Cosine',
                           'Velocity Cosine',
                           'Speed Differential',
                           'Collision Probability',
                           'Severity Index',
-                          'TTC']
+                          'Time to Collision']
+
+userTypeNames = ['car',
+                 'pedestrian',
+                 'twowheels',
+                 'bus',
+                 'truck']
 
 # severityIndicator = {'Distance': 0,
 #                      'Cosine': 1,
--- a/python/utils.py	Thu Apr 18 15:29:33 2013 -0400
+++ b/python/utils.py	Fri Dec 05 12:13:53 2014 -0500
@@ -3,12 +3,12 @@
 
 #from numpy import *
 #from pylab import *
+from datetime import time, datetime
+from math import sqrt
 
 __metaclass__ = type
 
-commentChar = '#'
-
-delimiterChar = '%';
+datetimeFormat = "%Y-%m-%d %H:%M:%S"
 
 #########################
 # Enumerations
@@ -22,46 +22,29 @@
     return result
 
 #########################
-# CLI utils
-#########################
-
-def parseCLIOptions(helpMessage, options, cliArgs, optionalOptions=[]):
-    ''' Simple function to handle similar argument parsing
-    Returns the dictionary of options and their values
-
-    * cliArgs are most likely directly sys.argv 
-    (only the elements after the first one are considered)
-    
-    * options should be a list of strings for getopt options, 
-    eg ['frame=','correspondences=','video=']
-    A value must be provided for each option, or the program quits'''
-    import sys, getopt
-    from numpy.core.fromnumeric import all
-    optionValues, args = getopt.getopt(cliArgs[1:], 'h', ['help']+options+optionalOptions)
-    optionValues = dict(optionValues)
-
-    if '--help' in optionValues.keys() or '-h' in optionValues.keys():
-        print(helpMessage+
-              '\n - Compulsory options: '+' '.join([opt.replace('=','') for opt in options])+
-              '\n - Non-compulsory options: '+' '.join([opt.replace('=','') for opt in optionalOptions]))
-        sys.exit()
-
-    missingArgument = [('--'+opt.replace('=','') in optionValues.keys()) for opt in options]
-    if not all(missingArgument):
-        print('Missing argument')
-        print(optionValues)
-        sys.exit()
-
-    return optionValues
-
-#########################
 # simple statistics
 #########################
 
-def confidenceInterval(mean, stdev, nSamples, percentConfidence, printLatex = False):
-    from math import sqrt
+def sampleSize(stdev, tolerance, percentConfidence, printLatex = False):
     from scipy.stats.distributions import norm
     k = round(norm.ppf(0.5+percentConfidence/200., 0, 1)*100)/100. # 1.-(100-percentConfidence)/200.
+    if printLatex:
+        print('${0}^2\\frac{{{1}^2}}{{{2}^2}}$'.format(k, stdev, tolerance))
+    return (k*stdev/tolerance)**2
+
+def confidenceInterval(mean, stdev, nSamples, percentConfidence, trueStd = True, printLatex = False):
+    '''if trueStd, use normal distribution, otherwise, Student
+
+    Use otherwise t.interval or norm.interval
+    ex: norm.interval(0.95, loc = 0., scale = 2.3/sqrt(11))
+    t.interval(0.95, 10, loc=1.2, scale = 2.3/sqrt(nSamples))
+    loc is mean, scale is sigma/sqrt(n) (for Student, 10 is df)'''
+    from math import sqrt
+    from scipy.stats.distributions import norm, t
+    if trueStd:
+        k = round(norm.ppf(0.5+percentConfidence/200., 0, 1)*100)/100. # 1.-(100-percentConfidence)/200.
+    else: # use Student
+         k = round(t.ppf(0.5+percentConfidence/200., nSamples-1)*100)/100.
     e = k*stdev/sqrt(nSamples)
     if printLatex:
         print('${0} \pm {1}\\frac{{{2}}}{{\sqrt{{{3}}}}}$'.format(mean, k, stdev, nSamples))
@@ -78,15 +61,13 @@
     def nSamples(self):
         return sum(self.counts)
 
-def cumulativeDensityFunction(sample):
+def cumulativeDensityFunction(sample, normalized = False):
     '''Returns the cumulative density function of the sample of a random variable'''
-    from numpy.core.multiarray import array
-    from numpy.lib.function_base import unique
-    from numpy.core.fromnumeric import sum
-    a = array(sample)
-    a.sort()
-    xaxis = unique(a)
-    counts = [sum(a <= x) for x in xaxis]
+    from numpy import arange, cumsum
+    xaxis = sorted(sample)
+    counts = arange(1,len(sample)+1) # dtype = float
+    if normalized:
+        counts /= float(len(sample))
     return xaxis, counts
 
 class EmpiricalDiscreteDistribution(EmpiricalDistribution):
@@ -174,33 +155,75 @@
 # maths section
 #########################
 
-def LCSS(l1, l2, threshold, distance, delta = float('inf')):
-    '''returns the longest common subsequence similarity
-    based on the threshold on distance between two elements of lists l1, l2
-    '''
-    from numpy import zeros, int as npint
-    m = len(l1)
-    n = len(l2)
-    similarity = zeros((m+1,n+1), dtype = npint)
-    for i in xrange(1,m+1):
-        for j in xrange(max(1,i-delta),min(n+1,i+delta)):
-            if distance(l1[i-1], l2[j-1])<=threshold:
-                similarity[i][j] = similarity[i-1][j-1]+1
-            else:
-                similarity[i][j] = max(similarity[i-1][j], similarity[i][j-1])
-    return similarity[-1][-1]
+# def kernelSmoothing(sampleX, X, Y, weightFunc, halfwidth):
+#     '''Returns a smoothed weighted version of Y at the predefined values of sampleX
+#     Sum_x weight(sample_x,x) * y(x)'''
+#     from numpy import zeros, array
+#     smoothed = zeros(len(sampleX))
+#     for i,x in enumerate(sampleX):
+#         weights = array([weightFunc(x,xx, halfwidth) for xx in X])
+#         if sum(weights)>0:
+#             smoothed[i] = sum(weights*Y)/sum(weights)
+#         else:
+#             smoothed[i] = 0
+#     return smoothed
+
+def kernelSmoothing(x, X, Y, weightFunc, halfwidth):
+    '''Returns the smoothed estimate of (X,Y) at x
+    Sum_x weight(sample_x,x) * y(x)'''
+    from numpy import zeros, array
+    weights = array([weightFunc(x,observedx, halfwidth) for observedx in X])
+    if sum(weights)>0:
+        return sum(weights*Y)/sum(weights)
+    else:
+        return 0
+
+def uniform(center, x, halfwidth):
+    if abs(center-x)<halfwidth:
+        return 1.
+    else:
+        return 0.
 
-def framesToTime(nFrames, frameRate, initialTime = (0.,0.,0.)):
-    'returns hour, minutes and seconds'
+def gaussian(center, x, halfwidth):
+    from numpy import exp
+    return exp(-((center-x)/halfwidth)**2/2)
+
+def epanechnikov(center, x, halfwidth):
+    diff = abs(center-x)
+    if diff<halfwidth:
+        return 1.-(diff/halfwidth)**2
+    else:
+        return 0.
+    
+def triangular(center, x, halfwidth):
+    diff = abs(center-x)
+    if diff<halfwidth:
+        return 1.-abs(diff/halfwidth)
+    else:
+        return 0.
+
+def medianSmoothing(x, X, Y, halfwidth):
+    '''Returns the media of Y's corresponding to X's in the interval [x-halfwidth, x+halfwidth]'''
+    from numpy import median
+    return median([y for observedx, y in zip(X,Y) if abs(x-observedx)<halfwidth])
+
+def argmaxDict(d):
+    return max(d, key=d.get)
+
+def framesToTime(nFrames, frameRate, initialTime = time()):
+    '''returns a datetime.time for the time in hour, minutes and seconds
+    initialTime is a datetime.time'''
     from math import floor
-    from datetime import time
-    seconds = int(floor(float(nFrames)/float(frameRate))+initialTime[0]*3600+initialTime[1]*60+initialTime[2])
+    seconds = int(floor(float(nFrames)/float(frameRate))+initialTime.hour*3600+initialTime.minute*60+initialTime.second)
     h = int(floor(seconds/3600.))
     seconds = seconds - h*3600
     m = int(floor(seconds/60))
     seconds = seconds - m*60
     return time(h, m, seconds)
 
+def timeToFrames(t, frameRate):
+    return frameRate*(t.hour*3600+t.minute*60+t.second)
+
 def sortXY(X,Y):
     'returns the sorted (x, Y(x)) sorted on X'
     D = {}
@@ -217,14 +240,32 @@
     return ceil(v*tens)/tens
 
 def inBetween(bound1, bound2, x):
-    return bound1 <= x <= bound2 or bound2 <= x<= bound1
+    return bound1 <= x <= bound2 or bound2 <= x <= bound1
+
+def pointDistanceL2(x1,y1,x2,y2):
+    ''' Compute point-to-point distance (L2 norm, ie Euclidean distance)'''
+    return sqrt((x2-x1)**2+(y2-y1)**2)
 
 def crossProduct(l1, l2):
     return l1[0]*l2[1]-l1[1]*l2[0]
 
-def filterMovingWindow(input, halfWidth):
+def cat_mvgavg(cat_list, halfWidth):
+    ''' Return a list of categories/values smoothed according to a window. 
+        halfWidth is the search radius on either side'''
+    from copy import deepcopy
+    catgories = deepcopy(cat_list)
+    smoothed = catgories
+    for point in range(len(catgories)):
+        lower_bound_check = max(0,point-halfWidth)
+        upper_bound_check = min(len(catgories)-1,point+halfWidth+1)
+        window_values = catgories[lower_bound_check:upper_bound_check]
+        smoothed[point] = max(set(window_values), key=window_values.count)
+    return smoothed
+
+def filterMovingWindow(inputSignal, halfWidth):
     '''Returns an array obtained after the smoothing of the input by a moving average
     The first and last points are copied from the original.'''
+    from numpy import ones,convolve,array
     width = float(halfWidth*2+1)
     win = ones(width,'d')
     result = convolve(win/width,array(inputSignal),'same')
@@ -251,20 +292,162 @@
     return coef
 
 #########################
+# iterable section
+#########################
+
+def mostCommon(L):
+    '''Returns the most frequent element in a iterable
+
+    taken from http://stackoverflow.com/questions/1518522/python-most-common-element-in-a-list'''
+    from itertools import groupby
+    from operator import itemgetter
+    # get an iterable of (item, iterable) pairs
+    SL = sorted((x, i) for i, x in enumerate(L))
+    # print 'SL:', SL
+    groups = groupby(SL, key=itemgetter(0))
+    # auxiliary function to get "quality" for an item
+    def _auxfun(g):
+        item, iterable = g
+        count = 0
+        min_index = len(L)
+        for _, where in iterable:
+            count += 1
+            min_index = min(min_index, where)
+            # print 'item %r, count %r, minind %r' % (item, count, min_index)
+        return count, -min_index
+    # pick the highest-count/earliest item
+    return max(groups, key=_auxfun)[0]
+
+#########################
+# sequence section
+#########################
+
+class LCSS:
+    '''Class that keeps the LCSS parameters
+    and puts together the various computations'''
+    def __init__(self, similarityFunc, delta = float('inf'), aligned = False, lengthFunc = min):
+        self.similarityFunc = similarityFunc
+        self.aligned = aligned
+        self.delta = delta
+        self.lengthFunc = lengthFunc
+        self.subSequenceIndices = [(0,0)]
+
+    def similarities(self, l1, l2, jshift=0):
+        from numpy import zeros, int as npint
+        n1 = len(l1)
+        n2 = len(l2)
+        self.similarityTable = zeros((n1+1,n2+1), dtype = npint)
+        for i in xrange(1,n1+1):
+            for j in xrange(max(1,i-jshift-self.delta),min(n2,i-jshift+self.delta)+1):
+                if self.similarityFunc(l1[i-1], l2[j-1]):
+                    self.similarityTable[i,j] = self.similarityTable[i-1,j-1]+1
+                else:
+                    self.similarityTable[i,j] = max(self.similarityTable[i-1,j], self.similarityTable[i,j-1])
+
+    def subSequence(self, i, j):
+        '''Returns the subsequence of two sequences
+        http://en.wikipedia.org/wiki/Longest_common_subsequence_problem'''
+        if i == 0 or j == 0:
+            return []
+        elif self.similarityTable[i][j] == self.similarityTable[i][j-1]:
+            return self.subSequence(i, j-1)
+        elif self.similarityTable[i][j] == self.similarityTable[i-1][j]:
+            return self.subSequence(i-1, j)
+        else:
+            return self.subSequence(i-1, j-1) + [(i-1,j-1)]
+
+    def _compute(self, _l1, _l2, computeSubSequence = False):
+        '''returns the longest common subsequence similarity
+        based on the threshold on distance between two elements of lists l1, l2
+        similarityFunc returns True or False whether the two points are considered similar
+
+        if aligned, returns the best matching if using a finite delta by shiftinig the series alignments
+
+        eg distance(p1, p2) < epsilon
+        '''
+        if len(_l2) < len(_l1): # l1 is the shortest
+            l1 = _l2
+            l2 = _l1
+            revertIndices = True
+        else:
+            l1 = _l1
+            l2 = _l2
+            revertIndices = False
+        n1 = len(l1)
+        n2 = len(l2)
+
+        if self.aligned:
+            lcssValues = {}
+            similarityTables = {}
+            for i in xrange(-n2-self.delta+1, n1+self.delta): # interval such that [i-shift-delta, i-shift+delta] is never empty, which happens when i-shift+delta < 1 or when i-shift-delta > n2
+                self.similarities(l1, l2, i)
+                lcssValues[i] = self.similarityTable.max()
+                similarityTables[i] = self.similarityTable
+                #print self.similarityTable
+            alignmentShift = argmaxDict(lcssValues) # ideally get the medium alignment shift, the one that minimizes distance
+            self.similarityTable = similarityTables[alignmentShift]
+        else:
+            alignmentShift = 0
+            self.similarities(l1, l2)
+
+        # threshold values for the useful part of the similarity table are n2-n1-delta and n1-n2-delta
+        self.similarityTable = self.similarityTable[:min(n1, n2+alignmentShift+self.delta)+1, :min(n2, n1-alignmentShift+self.delta)+1]
+
+        if computeSubSequence:
+            self.subSequenceIndices = self.subSequence(self.similarityTable.shape[0]-1, self.similarityTable.shape[1]-1)
+            if revertIndices:
+                self.subSequenceIndices = [(j,i) for i,j in self.subSequenceIndices]
+        return self.similarityTable[-1,-1]
+
+    def compute(self, l1, l2, computeSubSequence = False):
+        '''get methods are to be shadowed in child classes '''
+        return self._compute(l1, l2, computeSubSequence)
+
+    def computeAlignment(self):
+        from numpy import mean
+        return mean([j-i for i,j in self.subSequenceIndices])
+
+    def _computeNormalized(self, l1, l2, computeSubSequence = False):
+        ''' compute the normalized LCSS
+        ie, the LCSS divided by the min or mean of the indicator lengths (using lengthFunc)
+        lengthFunc = lambda x,y:float(x,y)/2'''
+        return float(self._compute(l1, l2, computeSubSequence))/self.lengthFunc(len(l1), len(l2))
+
+    def computeNormalized(self, l1, l2, computeSubSequence = False):
+        return self._computeNormalized(l1, l2, computeSubSequence)
+
+    def _computeDistance(self, l1, l2, computeSubSequence = False):
+        ''' compute the LCSS distance'''
+        return 1-self._computeNormalized(l1, l2, computeSubSequence)
+
+    def computeDistance(self, l1, l2, computeSubSequence = False):
+        return self._computeDistance(l1, l2, computeSubSequence)
+    
+#########################
 # plotting section
 #########################
 
-def stepPlot(X, firstX, lastX, initialCount = 0):
-    '''for each value in x, increment by one the initial count
+def plotPolygon(poly, options = ''):
+    'Plots shapely polygon poly'
+    from numpy.core.multiarray import array
+    from matplotlib.pyplot import plot
+    from shapely.geometry import Polygon
+
+    tmp = array(poly.exterior)
+    plot(tmp[:,0], tmp[:,1], options)
+
+def stepPlot(X, firstX, lastX, initialCount = 0, increment = 1):
+    '''for each value in X, increment by increment the initial count
     returns the lists that can be plotted 
-    to obtain a step plot increasing by one for each value in x, from first to last value'''
+    to obtain a step plot increasing by one for each value in x, from first to last value
+    firstX and lastX should be respectively smaller and larger than all elements in X'''
     
     sortedX = []
     counts = [initialCount]
     for x in sorted(X):
         sortedX += [x,x]
         counts.append(counts[-1])
-        counts.append(counts[-1]+1)
+        counts.append(counts[-1]+increment)
     counts.append(counts[-1])
     return [firstX]+sortedX+[lastX], counts
 
@@ -302,50 +485,6 @@
 # file I/O section
 #########################
 
-def openCheck(filename, option = 'r', quit = False):
-    '''Open file filename in read mode by default
-    and checks it is open'''
-    try:
-        return open(filename, option)
-    except IOError:
-        print 'File %s could not be opened.' % filename
-        if quit:
-            from sys import exit
-            exit()
-        return None
-
-def readline(f, commentCharacter = commentChar):
-    '''Modified readline function to skip comments.'''
-    s = f.readline()
-    while (len(s) > 0) and s.startswith(commentCharacter):
-        s = f.readline()
-    return s.strip()
-
-def getLines(f, delimiterCharacter = delimiterChar):
-    '''Gets a complete entry (all the lines) in between delimiterChar.'''
-    dataStrings = []
-    s = readline(f)
-    while (len(s) > 0) and (not s.startswith(delimiterCharacter)):
-        dataStrings += [s.strip()]
-        s = readline(f)
-    return dataStrings
-
-class FakeSecHead(object):
-    '''Add fake section header [asection]
-
-    from http://stackoverflow.com/questions/2819696/parsing-properties-file-in-python/2819788#2819788
-    use read_file in Python 3.2+
-    '''
-    def __init__(self, fp):
-        self.fp = fp
-        self.sechead = '[main]\n'
-
-    def readline(self):
-        if self.sechead:
-            try: return self.sechead
-            finally: self.sechead = None
-        else: return self.fp.readline()
-
 def removeExtension(filename, delimiter = '.'):
     '''Returns the filename minus the extension (all characters after last .)'''
     i = filename.rfind(delimiter)
@@ -386,14 +525,6 @@
     else:
         print(filename+' does not exist')
 
-def plotPolygon(poly, options = ''):
-    from numpy.core.multiarray import array
-    from matplotlib.pyplot import plot
-    from shapely.geometry import Polygon
-
-    tmp = array(poly.exterior)
-    plot(tmp[:,0], tmp[:,1], options)
-
 def line2Floats(l, separator=' '):
     '''Returns the list of floats corresponding to the string'''
     return [float(x) for x in l.split(separator)]
@@ -403,14 +534,62 @@
     return [int(x) for x in l.split(separator)]
 
 #########################
-# sqlite
+# CLI utils
 #########################
 
-def dropTables(connection, tableNames):
-    'deletes the table with names in tableNames'
-    cursor = connection.cursor()
-    for tableName in tableNames:
-        cursor.execute('DROP TABLE '+tableName)
+def parseCLIOptions(helpMessage, options, cliArgs, optionalOptions=[]):
+    ''' Simple function to handle similar argument parsing
+    Returns the dictionary of options and their values
+
+    * cliArgs are most likely directly sys.argv 
+    (only the elements after the first one are considered)
+    
+    * options should be a list of strings for getopt options, 
+    eg ['frame=','correspondences=','video=']
+    A value must be provided for each option, or the program quits'''
+    import sys, getopt
+    from numpy.core.fromnumeric import all
+    optionValues, args = getopt.getopt(cliArgs[1:], 'h', ['help']+options+optionalOptions)
+    optionValues = dict(optionValues)
+
+    if '--help' in optionValues.keys() or '-h' in optionValues.keys():
+        print(helpMessage+
+              '\n - Compulsory options: '+' '.join([opt.replace('=','') for opt in options])+
+              '\n - Non-compulsory options: '+' '.join([opt.replace('=','') for opt in optionalOptions]))
+        sys.exit()
+
+    missingArgument = [('--'+opt.replace('=','') in optionValues.keys()) for opt in options]
+    if not all(missingArgument):
+        print('Missing argument')
+        print(optionValues)
+        sys.exit()
+
+    return optionValues
+
+
+#########################
+# Profiling
+#########################
+
+def analyzeProfile(profileFilename, stripDirs = True):
+    '''Analyze the file produced by cProfile 
+
+    obtained by for example: 
+    - call in script (for main() function in script)
+    import cProfile, os
+    cProfile.run('main()', os.path.join(os.getcwd(),'main.profile'))
+
+    - or on the command line:
+    python -m cProfile [-o profile.bin] [-s sort] scriptfile [arg]'''
+    import pstats, os
+    p = pstats.Stats(os.path.join(os.pardir, profileFilename))
+    if stripDirs:
+        p.strip_dirs()
+    p.sort_stats('time')
+    p.print_stats(.2)
+    #p.sort_stats('time')
+    # p.print_callees(.1, 'int_prediction.py:')
+    return p
 
 #########################
 # running tests
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/run-tests.sh	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,17 @@
+#!/bin/sh
+echo "------------"
+echo "Python tests"
+cd python
+./run-tests.sh
+cd ..
+echo "------------"
+echo "C++ tests"
+if [ -f ./bin/tests ]
+then
+    ./bin/tests
+else
+    echo "The test executable has not been compiled"
+fi
+echo "------------"
+echo "Script tests"
+./scripts/run-tests.sh
Binary file samples/TTC/Video.png has changed
Binary file samples/TTC/bikeDes.png has changed
Binary file samples/TTC/bikeOri.png has changed
Binary file samples/TTC/carDes.png has changed
Binary file samples/TTC/carOri.png has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/samples/TTC/computeTTC.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,61 @@
+def computeTTC(databaseFilename,homography,framePerSecond,videoX,videoY,collisionDistanceThreshold,bikAreaOri,bikAreaDes,carAreaOri,carAreaDes):
+
+    import numpy as np
+    import sys
+    sys.path.append('/home/sohail/trafficintelligence/python/')
+    import moving, cvutils, storage
+    import timeToCollision
+
+    print 'Loading database...'
+    objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object')
+
+    bikCount=0
+    carCount=0
+    bik=[]
+    car=[]
+    bikSpeed=[]
+    carSpeed=[]
+
+    for obj in objects:
+	    inCarAreaOri = False
+	    inBikAreaOri = False
+	    for time in obj.getTimeInterval():
+		x=int(obj.getPositionAtInstant(time).project(homography).x)
+		y=int(obj.getPositionAtInstant(time).project(homography).y)
+		x=min(videoX-1,x)
+		y=min(videoY-1,y)
+		if bikAreaOri[y,x] == 1 and obj.userType == moving.userType2Num['bicycle']:
+		    inBikAreaOri = True
+		if bikAreaDes[y,x] == 1 and inBikAreaOri == True:
+		    bikCount += 1
+		    bik.append(obj)
+		    bikSpeed.append(framePerSecond*3.6*np.median(obj.getSpeeds()))
+		    break
+		if carAreaOri[y,x] == 1 and obj.userType == moving.userType2Num['car']:
+		    inCarAreaOri = True
+		if carAreaDes[y,x] == 1 and inCarAreaOri == True:
+		    carCount += 1
+		    car.append(obj)
+		    carSpeed.append(framePerSecond*3.6*np.median(obj.getSpeeds()))
+		    break
+
+    print 'Computing TTC...'
+    TTC=[]
+    potCollision=0
+    for obj1 in bik:
+	for obj2 in car:
+	    ti1=obj1.getTimeInterval()
+	    ti2=obj2.getTimeInterval()
+	    if ti1.first < ti2.last and ti2.first < ti1.last:
+		potCollision += 1
+		ttc=[]
+		for frameNum in range(max(ti1.first,ti2.first),min(ti1.last,ti2.last)):
+		    ttcp=timeToCollision.timeToCollision(obj1,obj2,collisionDistanceThreshold,frameNum,framePerSecond)
+		    if ttcp < 100:
+			ttc.append(ttcp)
+		if ttc != []:
+		    ttc.sort()
+	            TTC.append(ttc[int(.15*len(ttc))])
+
+    return bikCount,carCount,bikSpeed,carSpeed,TTC,potCollision
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/samples/TTC/ttcCompare.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,51 @@
+import matplotlib.image as mpimg
+from numpy.linalg.linalg import inv
+from numpy import loadtxt
+import computeTTC
+import numpy as np
+import glob
+
+#databaseName = ['11.sqlite']
+databaseName = glob.glob("*.sqlite")
+
+bikOri = mpimg.imread('bikeOri.png')
+bikAreaOri = bikOri[:,:,0]
+bikDes = mpimg.imread('bikeDes.png')
+bikAreaDes = bikDes[:,:,0]
+carOri = mpimg.imread('carOri.png')
+carAreaOri = carOri[:,:,0]
+carDes = mpimg.imread('carDes.png')
+carAreaDes = carDes[:,:,0]
+
+videoY = bikOri.shape[0]
+videoX = bikOri.shape[1]
+
+homography=inv(loadtxt('homography.txt'))
+framePerSecond=15
+collisionDistanceThreshold=10
+
+bikCount=0
+carCount=0
+bikSpeed=[]
+carSpeed=[]
+ttcDist=[]
+potentioalCollision=0
+
+for d in databaseName:
+    print d
+    bc,cc,bs,cs,ttcdist,potCollision = computeTTC.computeTTC(d,homography,framePerSecond,videoX,videoY,collisionDistanceThreshold,bikAreaOri,bikAreaDes,carAreaOri,carAreaDes)
+    bikCount += bc
+    carCount += cc
+    bikSpeed.extend(bs)
+    carSpeed.extend(cs)
+    ttcDist.extend(ttcdist)
+    potentioalCollision += potCollision
+
+print 'bikCount :', bikCount
+print 'carCount :', carCount
+print 'Number of Potential Collisions :', potentioalCollision
+np.savetxt('ttc.txt',ttcDist,fmt='%1.2f')
+np.savetxt('freq.txt',[bikCount,carCount,potentioalCollision],fmt='%i')
+np.savetxt('bikSpeed.txt',bikSpeed,fmt='%1.2f')
+np.savetxt('carSpeed.txt',carSpeed,fmt='%1.2f')
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/classify-objects.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,23 @@
+#! /usr/bin/env python
+
+import numpy as np
+import sys, argparse
+from cv2 import SVM_RBF, SVM_C_SVC
+
+import cvutils, moving, ml
+
+parser = argparse.ArgumentParser(description='The program processes indicators for all pairs of road users in the scene')
+parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file', required = True)
+parser.add_argument('-d', dest = 'directoryName', help = 'name of the parent directory containing the videos and extracted trajectories to process', required = True)
+# parser.add_argument('-o', dest = 'homographyFilename', help = 'name of the image to world homography file')
+# need a classification config file for speed distribution parameters, svm models, frequency parameters, area parameters etc
+#parser.add_argument('--cfg', dest = 'svmType', help = 'SVM type', default = SVM_C_SVC, type = long)
+
+
+#parser.add_argument('-s', dest = 'rescaleSize', help = 'rescale size of image samples', default = 64, type = int)
+#parser.add_argument('-o', dest = 'nOrientations', help = 'number of orientations in HoG', default = 9, type = int)
+#parser.add_argument('-p', dest = 'nPixelsPerCell', help = 'number of pixels per cell', default = 8, type = int)
+#parser.add_argument('-c', dest = 'nCellsPerBlock', help = 'number of cells per block', default = 2, type = int)
+
+args = parser.parse_args()
+params = storage.ProcessParameters(args.configFilename)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/compute-homography.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,129 @@
+#! /usr/bin/env python
+
+import sys, argparse
+
+import matplotlib.pyplot as plt
+import numpy as np
+import cv2
+
+import cvutils
+import utils
+
+parser = argparse.ArgumentParser(description='The program computes the homography matrix from at least 4 non-colinear point correspondences inputed in the same order in a video frame and a aerial photo/ground map, or from the list of corresponding points in the two planes.', epilog = '''The point correspondence file contains at least 4 non-colinear point coordinates 
+with the following format:
+ - the first two lines are the x and y coordinates in the projected space (usually world space)
+ - the last two lines are the x and y coordinates in the origin space (usually image space)
+
+If providing video and world images, with a number of points to input
+and a ration to convert pixels to world distance unit (eg meters per pixel), 
+the images will be shown in turn and the user should click 
+in the same order the corresponding points in world and image spaces.''', formatter_class=argparse.RawDescriptionHelpFormatter,)
+
+parser.add_argument('-p', dest = 'pointCorrespondencesFilename', help = 'name of the text file containing the point correspondences')
+parser.add_argument('-i', dest = 'videoFrameFilename', help = 'filename of the video frame')
+parser.add_argument('-w', dest = 'worldFilename', help = 'filename of the aerial photo/ground map')
+parser.add_argument('-n', dest = 'nPoints', help = 'number of corresponding points to input', default = 4, type = int)
+parser.add_argument('-u', dest = 'unitsPerPixel', help = 'number of units per pixel', default = 1., type = float)
+parser.add_argument('--display', dest = 'displayPoints', help = 'display original and projected points on both images', action = 'store_true')
+parser.add_argument('--intrinsic', dest = 'intrinsicCameraMatrixFilename', help = 'name of the intrinsic camera file')
+parser.add_argument('--distortion-coefficients', dest = 'distortionCoefficients', help = 'distortion coefficients', nargs = '*', type = float)
+parser.add_argument('--undistorted-multiplication', dest = 'undistortedImageMultiplication', help = 'undistorted image multiplication', type = float)
+parser.add_argument('--undistort', dest = 'undistort', help = 'undistort the video (because features have been extracted that way', action = 'store_true')
+parser.add_argument('--save', dest = 'saveImages', help = 'save the undistorted video frame (display option must be chosen)', action = 'store_true')
+
+args = parser.parse_args()
+
+# TODO process camera intrinsic and extrinsic parameters to obtain image to world homography, taking example from Work/src/python/generate-homography.py script
+# cameraMat = load(videoFilenamePrefix+'-camera.txt');
+# T1 = cameraMat[3:6,:].copy();
+# A = cameraMat[0:3,0:3].copy();
+
+# # pay attention, rotation may be the transpose
+# # R = T1[:,0:3].T;
+# R = T1[:,0:3];
+# rT = dot(R, T1[:,3]/1000);
+# T = zeros((3,4),'f');
+# T[:,0:3] = R[:];
+# T[:,3] = rT;
+
+# AT = dot(A,T);
+
+# nPoints = 4;
+# worldPoints = cvCreateMat(nPoints, 3, CV_64FC1);
+# imagePoints = cvCreateMat(nPoints, 3, CV_64FC1);
+
+# # extract homography from the camera calibration
+# worldPoints = cvCreateMat(4, 3, CV_64FC1);
+# imagePoints = cvCreateMat(4, 3, CV_64FC1);
+
+# worldPoints[0,:] = [[1, 1, 0]];
+# worldPoints[1,:] = [[1, 2, 0]];
+# worldPoints[2,:] = [[2, 1, 0]];
+# worldPoints[3,:] = [[2, 2, 0]];
+
+# wPoints = [[1,1,2,2],
+#            [1,2,1,2],
+#            [0,0,0,0]];
+# iPoints = utils.worldToImage(AT, wPoints);
+
+# for i in range(nPoints):
+#     imagePoints[i,:] = [iPoints[:,i].tolist()];
+
+# H = cvCreateMat(3, 3, CV_64FC1);
+
+# cvFindHomography(imagePoints, worldPoints, H);
+
+
+homography = np.array([])
+if args.pointCorrespondencesFilename != None:
+    worldPts, videoPts = cvutils.loadPointCorrespondences(args.pointCorrespondencesFilename)
+    homography, mask = cv2.findHomography(videoPts, worldPts) # method=0, ransacReprojThreshold=3
+elif args.videoFrameFilename != None and args.worldFilename != None:
+    worldImg = plt.imread(args.worldFilename)
+    videoImg = plt.imread(args.videoFrameFilename)
+    if args.undistort:        
+        [map1, map2] = cvutils.computeUndistortMaps(videoImg.shape[1], videoImg.shape[0], args.undistortedImageMultiplication, np.loadtxt(args.intrinsicCameraMatrixFilename), args.distortionCoefficients)
+        videoImg = cv2.remap(videoImg, map1, map2, interpolation=cv2.INTER_LINEAR)
+    print('Click on {0} points in the video frame'.format(args.nPoints))
+    plt.figure()
+    plt.imshow(videoImg)
+    videoPts = np.array(plt.ginput(args.nPoints, timeout=3000))
+    print('Click on {0} points in the world image'.format(args.nPoints))
+    plt.figure()
+    plt.imshow(worldImg)
+    worldPts = args.unitsPerPixel*np.array(plt.ginput(args.nPoints, timeout=3000))
+    plt.close('all')
+    homography, mask = cv2.findHomography(videoPts, worldPts)
+    # save the points in file
+    f = open('point-correspondences.txt', 'a')
+    np.savetxt(f, worldPts.T)
+    np.savetxt(f, videoPts.T)
+    f.close()
+
+if homography.size>0:
+    np.savetxt('homography.txt',homography)
+
+if args.displayPoints and args.videoFrameFilename != None and args.worldFilename != None and homography.size>0:
+    worldImg = cv2.imread(args.worldFilename)
+    videoImg = cv2.imread(args.videoFrameFilename)
+    if args.undistort:
+        [map1, map2] = cvutils.computeUndistortMaps(videoImg.shape[1], videoImg.shape[0], args.undistortedImageMultiplication, np.loadtxt(args.intrinsicCameraMatrixFilename), args.distortionCoefficients)
+        videoImg = cv2.remap(videoImg, map1, map2, interpolation=cv2.INTER_LINEAR)
+        if args.saveImages:
+            cv2.imwrite(utils.removeExtension(args.videoFrameFilename)+'-undistorted.png', videoImg)
+    invHomography = np.linalg.inv(homography)
+    projectedWorldPts = cvutils.projectArray(invHomography, worldPts.T).T
+    projectedVideoPts = cvutils.projectArray(homography, videoPts.T).T
+    for i in range(worldPts.shape[0]):
+        # world image
+        cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))),2,cvutils.cvBlue)
+        cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/args.unitsPerPixel))),2,cvutils.cvRed)
+        cv2.putText(worldImg, str(i+1), tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))+5), cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue, 2)
+        # video image
+        cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))),2,cvutils.cvBlue)
+        cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))),2,cvutils.cvRed)
+        cv2.putText(videoImg, str(i+1), tuple(np.int32(np.round(videoPts[i])+5)), cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue, 2)
+    cv2.imshow('video frame',videoImg)
+    cv2.imshow('world image',worldImg)
+    cv2.waitKey()
+    cv2.destroyAllWindows()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/create-bounding-boxes.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,21 @@
+#! /usr/bin/env python
+
+import argparse
+
+import storage
+
+from numpy.linalg.linalg import inv
+from numpy import loadtxt
+
+parser = argparse.ArgumentParser(description='The program creates bounding boxes in image space around all features (for display and for comparison to ground truth in the form of bouding boxes.')
+parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', required = True)
+parser.add_argument('-o', dest = 'homography', help = 'name of the image to world homography')
+
+args = parser.parse_args()
+
+homography = None
+if args.homography != None:
+    homography = inv(loadtxt(args.homography))
+
+storage.createBoundingBoxTable(args.databaseFilename, homography)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/delete-tables.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,14 @@
+#! /usr/bin/env python
+
+import sys, argparse
+
+import utils
+import storage
+
+parser = argparse.ArgumentParser(description='The program deletes (drops) the tables in the database before saving new results (for objects, tables object_features and objects are dropped; for interactions, the tables interactions and indicators are dropped')
+#parser.add_argument('configFilename', help = 'name of the configuration file')
+parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database', required = True)
+parser.add_argument('-t', dest = 'dataType', help = 'type of the data to remove', required = True, choices = ['object','interaction', 'bb'])
+args = parser.parse_args()
+
+storage.deleteFromSqlite(args.databaseFilename, args.dataType)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/display-trajectories.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,67 @@
+#! /usr/bin/env python
+
+import sys, argparse
+
+import storage, cvutils, utils
+
+from numpy.linalg.linalg import inv
+from numpy import loadtxt
+
+parser = argparse.ArgumentParser(description='The program displays feature or object trajectories overlaid over the video frames.', epilog = 'Either the configuration filename or the other parameters (at least video and database filenames) need to be provided.')
+parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file')
+parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file')
+parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file')
+parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to display', choices = ['feature', 'object'], default = 'feature')
+parser.add_argument('-o', dest = 'homographyFilename', help = 'name of the image to world homography file')
+parser.add_argument('--intrinsic', dest = 'intrinsicCameraMatrixFilename', help = 'name of the intrinsic camera file')
+parser.add_argument('--distortion-coefficients', dest = 'distortionCoefficients', help = 'distortion coefficients', nargs = '*', type = float)
+parser.add_argument('--undistorted-multiplication', dest = 'undistortedImageMultiplication', help = 'undistorted image multiplication', type = float)
+parser.add_argument('-u', dest = 'undistort', help = 'undistort the video (because features have been extracted that way)', action = 'store_true')
+parser.add_argument('-f', dest = 'firstFrameNum', help = 'number of first frame number to display', type = int)
+parser.add_argument('-r', dest = 'rescale', help = 'rescaling factor for the displayed image', default = 1., type = float)
+parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each display', default = 1, type = int)
+parser.add_argument('--save-images', dest = 'saveAllImages', help = 'save all images', action = 'store_true')
+parser.add_argument('--last-frame', dest = 'lastFrameNum', help = 'number of last frame number to save (for image saving, no display is made)', type = int)
+
+args = parser.parse_args()
+
+if args.configFilename: # consider there is a configuration file
+    params = storage.ProcessParameters(args.configFilename)
+    videoFilename = params.videoFilename
+    databaseFilename = params.databaseFilename
+    if params.homography != None:
+        homography = inv(params.homography)
+    else:
+        homography = None
+    intrinsicCameraMatrix = params.intrinsicCameraMatrix
+    distortionCoefficients = params.distortionCoefficients
+    undistortedImageMultiplication = params.undistortedImageMultiplication
+    undistort = params.undistort
+    firstFrameNum = params.firstFrameNum
+else:
+    homography = None
+    undistort = False
+    intrinsicCameraMatrix = None
+    distortionCoefficients = []
+    undistortedImageMultiplication = None
+    firstFrameNum = 0
+
+if not args.configFilename and args.videoFilename != None:
+    videoFilename = args.videoFilename
+if not args.configFilename and args.databaseFilename != None:
+    databaseFilename = args.databaseFilename
+if not args.configFilename and args.homographyFilename != None:
+    homography = inv(loadtxt(args.homographyFilename))            
+if not args.configFilename and args.intrinsicCameraMatrixFilename != None:
+    intrinsicCameraMatrix = loadtxt(args.intrinsicCameraMatrixFilename)
+if not args.configFilename and args.distortionCoefficients != None:
+    distortionCoefficients = args.distortionCoefficients
+if not args.configFilename and args.undistortedImageMultiplication != None:
+    undistortedImageMultiplication = args.undistortedImageMultiplication
+if args.firstFrameNum != None:
+    firstFrameNum = args.firstFrameNum
+
+
+objects = storage.loadTrajectoriesFromSqlite(databaseFilename, args.trajectoryType)
+boundingBoxes = storage.loadBoundingBoxTableForDisplay(databaseFilename)
+cvutils.displayTrajectories(videoFilename, objects, boundingBoxes, homography, firstFrameNum, args.lastFrameNum, rescale = args.rescale, nFramesStep = args.nFramesStep, saveAllImages = args.saveAllImages, undistort = (undistort or args.undistort), intrinsicCameraMatrix = intrinsicCameraMatrix, distortionCoefficients = distortionCoefficients, undistortedImageMultiplication = undistortedImageMultiplication)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/examples.sql	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,4 @@
+
+
+# get object id, first and last frame numbers, length and number of features
+select OF.object_id, min(P.frame_number) as frame1, max(P.frame_number) as frame2, max(P.frame_number)-min(P.frame_number) as length, count(OF.trajectory_id) as nfeatures from positions P, objects_features OF where P.trajectory_id = OF.trajectory_id group by OF.object_id order by nfeatures asc;
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/play-video.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,23 @@
+#! /usr/bin/env python
+
+import sys, argparse
+import cvutils
+
+
+parser = argparse.ArgumentParser(description='The program displays the video.')
+parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file', required = True)
+parser.add_argument('-f', dest = 'firstFrameNum', help = 'number of first frame number to display', default = 0, type = int)
+parser.add_argument('--fps', dest = 'frameRate', help = 'approximate frame rate to replay', type = float)
+parser.add_argument('-r', dest = 'rescale', help = 'rescaling factor for the displayed image', default = 1., type = float)
+
+args = parser.parse_args()
+
+firstFrameNum = 0
+if args.firstFrameNum != None:
+    firstFrameNum = args.firstFrameNum
+
+frameRate = -1
+if args.frameRate != None:
+    frameRate = args.frameRate
+
+cvutils.playVideo(args.videoFilename, firstFrameNum, frameRate, rescale = args.rescale)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/polytracktopdtv.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,231 @@
+#! /usr/bin/env python
+
+from pdtv import TsaiCamera, ZipVideo, SyncedVideos, TrackSet, Track, State
+import sys, os, datetime, argparse
+import shutil
+import sqlite3
+import zipfile
+import utils
+import cvutils
+import cv2
+
+
+def zipFolder(inputFolder, outputFile):
+    '''Method to compress the content of the inputFolder in the outputFile'''
+    zip = zipfile.ZipFile(outputFile, 'w')
+    for root, dirs, files in os.walk(inputFolder):
+        for file in files:
+            zip.write(root+file, file)
+    zip.close()
+
+
+
+
+def getTypeDict(cursor):
+    '''Return a dictionnary with integer key and associated type string
+    i.e.: "0"  -> "unknown"
+          "1"  -> "car"
+          "2"  -> "pedestrians"
+          "3"  -> "motorcycle"
+          "4"  -> "bicycle"
+          "5"  -> "bus"
+          "6"  -> "truck"
+          ... and other type if the objects_type table is defined in SQLite'''
+    typeDict = dict()
+    cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='objects_type'")
+    data = cursor.fetchone()
+
+    if(data == None):
+        typeDict["0"] = "unknown"
+        typeDict["1"] = "car"
+        typeDict["2"] = "pedestrians"
+        typeDict["3"] = "motorcycle"
+        typeDict["4"] = "bicycle"
+        typeDict["5"] = "bus"
+        typeDict["6"] = "truck"
+        
+    else:
+        cursor.execute("SELECT road_user_type, type_string FROM objects_type")
+        for row in cursor:
+            typeDict[row[0]]= row[1]
+
+    return typeDict
+
+def extractFrames(videoFile, framePath, fps, time, firstFrameNum = 0, lastFrameNum = None):
+    '''Method to extract all the frames of the video'''
+    print('Extracting frame')
+    deltaTimestamp = 1000.0/float(fps);
+    time+=datetime.timedelta(microseconds=firstFrameNum*deltaTimestamp*1000)
+    
+    inc = 1000 #How many frame we fetch in the video at a time
+
+    if lastFrameNum != None:
+        delta = lastFrameNum-firstFrameNum
+        if delta < inc:
+            inc = delta
+    
+    currentIdx = firstFrameNum
+    frameList = cvutils.getImagesFromVideo(videoFile, firstFrameNum = currentIdx, nFrames = inc)
+    
+    while len(frameList) == inc and inc > 0:
+        
+        for f in frameList:
+            cv2.imwrite(os.path.join(framePath,time.strftime("%Y%m%d-%H%M%S.%f")[:-3]+'.jpg'), f)
+            time += datetime.timedelta(microseconds=deltaTimestamp*1000)
+        currentIdx = currentIdx + inc
+
+        if lastFrameNum != None:            
+            delta = lastFrameNum-currentIdx
+            if delta < inc:
+                inc = delta        
+        if inc:
+            frameList = cvutils.getImagesFromVideo(videoFile, firstFrameNum = currentIdx, nFrames = inc)
+        print('Extracting frame ' + str(currentIdx))
+    return len(frameList) > 0
+
+    
+
+def convertDatabase(workDirname, sectionName, sceneFilename = None, databaseFilename = None, videoFilename = None, videoFolderExist = False, firstFrameNum = 0, lastFrameNum = None, cameraCalibrationFilename = None, outputFileName = 'roaduser.json'):
+    '''
+    Method to convert database from polytrack to PDTV:
+    workDirname  is the current working directory
+    sceneFilename is the path to the .cfg file
+    sectionName is the name of the section we want to process in this file
+    videoFolderExist specifiy if we want to reextract the video frame or if they already exist at workdir/videoframes/
+    firstFrameNum is the first frame we want to extract
+    lastFrameNum is the last frame we want to extract (or None if we want to extract everything)
+    '''
+    error = False
+    if sceneFilename != None:
+        scene = utils.SceneParameters.loadConfigFile(os.path.join(workDirname, sceneFilename))
+        time = scene[sectionName].date
+        inputDb = os.path.join(workDirname, scene[sectionName].databaseFilename)
+        videoFile = os.path.join(workDirname, scene[sectionName].videoFilename)
+
+    if databaseFilename != None:
+        inputDb = os.path.join(workDirname, databaseFilename)
+    if videoFilename != None:
+        videoFile = os.path.join(workDirname, videoFilename)
+    # elif videoFolderExist == False:            
+    #     print('No video path specified')
+    #     error = True
+    
+    videoFolderPath = os.path.join(workDirname, "videoframes/")
+    fileName = sectionName
+    
+    if videoFile != None:
+        fps = cvutils.getFPS(videoFile)
+        print('Video should run at ' + str(fps) + ' fps')
+        deltaTimestamp = 1000.0/float(fps);
+        if videoFolderExist == False:
+            if os.path.exists(videoFolderPath):
+                shutil.rmtree(videoFolderPath)
+            utils.mkdir(videoFolderPath)
+            if extractFrames(videoFile, videoFolderPath, fps, time, firstFrameNum, lastFrameNum) == 0:
+                print("Error. Frame were not extracted")
+                error = True
+
+    
+    if not error:
+        masterTimestamp = 0.0
+        masterTimestampList = list()
+        video_timestringsList = list()
+        frameNumberToMasterTimestamp = dict()
+        for r,d,f in os.walk(videoFolderPath):
+            i = firstFrameNum
+            for files in f:
+                name, ext = os.path.splitext(files)           
+                video_timestringsList.append(name)
+                masterTimestampList.append(masterTimestamp)            
+                frameNumberToMasterTimestamp[i] = masterTimestamp
+                i = i +1
+                masterTimestamp = masterTimestamp+deltaTimestamp
+        
+        inputZipVideoName = fileName+".zip"
+        print('Zipping files...')
+        if not os.path.exists(inputZipVideoName) or not videoFolderExist:
+            zipFolder(videoFolderPath, inputZipVideoName)
+        print('Zipping files...Done.')
+        #We generate the structure for ZipVideo
+        if cameraCalibrationFilename != None:
+            calibrationFile = cameraCalibrationFilename
+        else:
+            calibrationFile = 'calib.json'
+        zipVideo = ZipVideo(video_zip_file=inputZipVideoName,
+                        time_offset=0.0, time_scale=1.0, master_timestamps=masterTimestampList, calibration_file=calibrationFile)
+        zipVideo.save(os.path.join(workDirname, 'video.json'))
+
+        print('ZipVideo saved')
+        obj = SyncedVideos(master_timestamps = [masterTimestamp],
+                               video_timestrings = [video_timestringsList],
+                               video_files = ['video.json'],
+                               fps=fps)
+        obj.save(os.path.join(workDirname, 'syncedvideo.json'))
+
+        print('SyncedVideos saved')
+
+        print('Opening db')
+        connection = sqlite3.connect(inputDb)
+        cursor = connection.cursor()
+      
+
+        #Tracket database
+        trackset = TrackSet(synced_file = ['syncedvideo.json'])
+
+        
+        #1) We build the type index dictionnary
+        typeDict = getTypeDict(cursor)
+        
+        idToTrackDict = dict()
+        #2) We read the object database    
+        cursor.execute("SELECT object_id, road_user_type FROM objects")
+        for row in cursor:
+            objectId = row[0]
+            objectType = row[1]
+            t = Track(type=typeDict.get(objectType, "unknown"))
+            idToTrackDict[objectId] = t;
+            trackset.append(t)
+        print('Reading boundingbox table')
+        #3) We read the bounding box table    
+        cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='bounding_boxes'")
+        data = cursor.fetchone()
+        if data == None:
+            print('No bounding box table. Maybe it was not generated ?')
+        else:
+            cursor.execute("SELECT object_id, frame_number, x_top_left, y_top_left, x_bottom_right, y_bottom_right FROM bounding_boxes")
+            for row in cursor:
+                objectId = row[0]
+                frameNumber = row[1]
+                x_top_left = row[2]
+                y_top_left = row[3]
+                x_bottom_right = row[4]
+                y_bottom_right = row[5]
+                
+                idToTrackDict[objectId].append(State(frame=int(frameNumber), world_position = [0.,0.], 
+                                                 master_timestamp=frameNumberToMasterTimestamp[int(frameNumber)],
+                                                 bounding_boxes=[[(x_top_left, x_bottom_right), (y_top_left, y_bottom_right)]]))
+        print('Saving db in json')
+        trackset.save(os.path.join(workDirname, outputFileName))
+        connection.close()
+        print('Done.')
+
+    
+    
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='The program convert polytrack.sqlite database to pdtv bounding boxes', epilog = 'Either the configuration filename or the other parameters (at least video and database filenames) need to be provided.')
+    parser.add_argument('-w', dest = 'workDirname', help = 'use a different work directory', default = "./",type = str)
+    parser.add_argument('--scene', dest = 'sceneFilename', help = 'name of the configuration file', type = str, default = None)
+    parser.add_argument('--section', dest = 'sectionName', help = 'name of the section', type = str, default = None)
+    parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', type = str, default = None)
+    parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file', type = str, default = None) 
+    parser.add_argument('-c', dest = 'cameraCalibrationFilename', help = 'name of the camera json file', type = str, default = None)
+    parser.add_argument('-o', dest = 'outputFilename', help = 'name of the output json file', type = str, default = 'roaduser.json')
+    parser.add_argument('-s', dest = 'firstFrameNum', help = 'forced start frame', type = int, default = 0)
+    parser.add_argument('-e', dest = 'lastFrameNum', help = 'forced end frame', type = int, default = None)
+    #parser.add_argument('-t', dest = 'useDatabaseTimestamp', help = 'use the timestamp of the database', default= False, type = bool) 
+    parser.add_argument('-u', dest = 'useCurrentVideoFile', help = 'use the previously generated video file', action = 'store_true')
+    args = parser.parse_args()
+    #convertDatabase(args)
+
+    convertDatabase(args.workDirname, args.sectionName, args.sceneFilename, videoFilename = args.videoFilename, databaseFilename = args.databaseFilename, videoFolderExist = args.useCurrentVideoFile, firstFrameNum = args.firstFrameNum, lastFrameNum = args.lastFrameNum, cameraCalibrationFilename = args.cameraCalibrationFilename, outputFileName=args.outputFilename )
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/replay-event-annotation.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,26 @@
+#! /usr/bin/env python
+
+import sys, argparse, datetime
+
+import storage, cvutils, utils
+
+import matplotlib.pylab as pylab
+import matplotlib.pyplot as plt
+import numpy as np
+
+
+annotations = pylab.csv2rec(sys.argv[1])
+
+frameRate = 30
+dirname = "/home/nicolas/Research/Data/montreal/infractions-pietons/"
+videoDirnames = {'amherst': '2011-06-22-sherbrooke-amherst/',
+                 'iberville': '2011-06-28-sherbrooke-iberville/'}
+
+# for amherst, subtract 40 seconds: add a delta
+
+for annotation in annotations:
+    video = annotation['video_name'].lower()
+    print('{} {}'.format(annotation['conflict_start_time'], annotation['conflict_end_time']))
+    print(annotation['road_user_1']+' '+annotation['road_user_2']+' '+annotation['conflict_quality'])
+    print(annotation['comments'])
+    cvutils.playVideo(dirname+videoDirnames[video]+video+'-{}.avi'.format(annotation['video_start_time']), utils.timeToFrames(annotation['conflict_start_time']+datetime.timedelta(seconds=-40), frameRate), frameRate, True, False, annotation['road_user_1']+' '+annotation['road_user_2']+' '+annotation['conflict_quality'])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/rescale-homography.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,32 @@
+#! /usr/bin/env python
+
+import sys
+
+import matplotlib.pyplot as plt
+import numpy as np
+import cv2
+
+import cvutils
+import utils
+
+if len(sys.argv) < 4:
+   print('Usage: {} homography_filename original_size new_size (size can be width or height)'.format(sys.argv[0]))
+   sys.exit()
+
+homography = np.loadtxt(sys.argv[1])
+
+imgPoints = np.array([[10,10],
+                      [10,20],
+                      [20,20],
+                      [20,10]])
+
+wldPoints = cvutils.projectArray(homography, imgPoints.T).T
+
+newSize = float(sys.argv[3])
+originalSize = float(sys.argv[2])
+imgPoints = imgPoints*newSize/originalSize
+
+newHomography, mask = cv2.findHomography(imgPoints, wldPoints)
+
+np.savetxt(sys.argv[1]+'.new', newHomography)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/run-tests.sh	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,2 @@
+#!/bin/sh
+echo 'no tests'
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/safety-analysis.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,75 @@
+#! /usr/bin/env python
+
+import storage, prediction, events, moving
+
+import sys, argparse, random
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+# todo: very slow if too many predicted trajectories
+# add computation of probality of unsucessful evasive action
+
+parser = argparse.ArgumentParser(description='The program processes indicators for all pairs of road users in the scene')
+parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file', required = True)
+parser.add_argument('--prediction-method', dest = 'predictionMethod', help = 'prediction method (constant velocity (vector computation), constant velocity, normal adaptation, point set prediction)', choices = ['cvd', 'cv', 'na', 'ps'])
+parser.add_argument('--display-cp', dest = 'displayCollisionPoints', help = 'display collision points', action = 'store_true')
+parser.add_argument('-n', dest = 'nProcesses', help = 'number of processes to run in parallel', type = int, default = 1)
+args = parser.parse_args()
+
+params = storage.ProcessParameters(args.configFilename)
+
+# parameters for prediction methods
+if args.predictionMethod:
+    predictionMethod = args.predictionMethod
+else:
+    predictionMethod = params.predictionMethod
+
+def accelerationDistribution(): 
+    return random.triangular(-params.maxNormalAcceleration, params.maxNormalAcceleration, 0.)
+def steeringDistribution():
+    return random.triangular(-params.maxNormalSteering, params.maxNormalSteering, 0.)
+
+if predictionMethod == 'cvd': # TODO add cve: constant velocity exact (Sohail's)
+    predictionParameters = prediction.CVDirectPredictionParameters()
+elif predictionMethod == 'cv':
+    predictionParameters = prediction.ConstantPredictionParameters(params.maxPredictedSpeed)
+elif predictionMethod == 'na':
+    predictionParameters = prediction.NormalAdaptationPredictionParameters(params.maxPredictedSpeed,
+                                                                           params.nPredictedTrajectories, 
+                                                                           accelerationDistribution,
+                                                                           steeringDistribution,
+                                                                           params.useFeaturesForPrediction)
+elif predictionMethod == 'ps':
+    predictionParameters = prediction.PointSetPredictionParameters(params.maxPredictedSpeed)
+# no else required, since parameters is required as argument
+
+# evasiveActionPredictionParameters = prediction.EvasiveActionPredictionParameters(params.maxPredictedSpeed, 
+#                                                                                  params.nPredictedTrajectories, 
+#                                                                                  params.minExtremeAcceleration,
+#                                                                                  params.maxExtremeAcceleration,
+#                                                                                  params.maxExtremeSteering,
+#                                                                                  params.useFeaturesForPrediction)
+
+objects = storage.loadTrajectoriesFromSqlite(params.databaseFilename,'object')
+if params.useFeaturesForPrediction:
+    features = storage.loadTrajectoriesFromSqlite(params.databaseFilename,'feature') # needed if normal adaptation
+    for obj in objects:
+        obj.setFeatures(features)
+
+interactions = events.createInteractions(objects)
+for inter in interactions:
+    inter.computeIndicators()
+    inter.computeCrossingsCollisions(predictionParameters, params.collisionDistance, params.predictionTimeHorizon, params.crossingZones, nProcesses = args.nProcesses)
+
+storage.saveIndicators(params.databaseFilename, interactions)
+
+if args.displayCollisionPoints:
+    plt.figure()
+    allCollisionPoints = []
+    for inter in interactions:
+        for collisionPoints in inter.collisionPoints.values():
+            allCollisionPoints += collisionPoints
+    moving.Point.plotAll(allCollisionPoints)
+    plt.axis('equal')
+    
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/setup-tracking.sh	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,39 @@
+version="$(wget -q -O - http://sourceforge.net/projects/opencvlibrary/files/opencv-unix | egrep -m1 -o '\"[0-9](\.[0-9])+' | cut -c2-)"
+echo "Removing any pre-installed ffmpeg and x264"
+sudo apt-get -qq remove ffmpeg x264 libx264-dev
+echo "Installing Dependencies"
+sudo apt-get -qq install libopencv-dev build-essential checkinstall cmake pkg-config yasm libtiff4-dev libjpeg-dev libjasper-dev libavcodec-dev libavformat-dev libswscale-dev libdc1394-22-dev libxine-dev libgstreamer0.10-dev libgstreamer-plugins-base0.10-dev libv4l-dev python-dev python-numpy libtbb-dev libqt4-dev libgtk2.0-dev libfaac-dev libmp3lame-dev libopencore-amrnb-dev libopencore-amrwb-dev libtheora-dev libvorbis-dev libxvidcore-dev x264 v4l-utils ffmpeg
+sudo apt-get -qq install libavfilter-dev libboost-dev libboost-program-options-dev libboost-graph-dev python-setuptools python-dev libcppunit-dev sqlite3 libsqlite3-dev cmake-qt-gui libboost-all-dev
+sudo easy_install -U mercurial
+echo "Installing OpenCV" $version
+cd
+mkdir OpenCV
+cd OpenCV
+echo "Downloading OpenCV" $version
+wget -O OpenCV-$version.tar.gz http://sourceforge.net/projects/opencvlibrary/files/opencv-unix/$version/opencv-"$version".tar.gz/download
+echo "Installing OpenCV" $version
+tar -xvf OpenCV-$version.tar.gz
+cd opencv-$version
+mkdir release
+cd release
+cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local ..
+make
+sudo make install
+echo "OpenCV" $version "ready to be used"
+
+echo "Installing Traffic Intelligence..."
+cd
+mkdir Research
+cd Research
+mkdir Code
+cd Code
+hg clone https://Nicolas@bitbucket.org/trajectories/trajectorymanagementandanalysis
+hg clone https://Nicolas@bitbucket.org/Nicolas/trafficintelligence
+cd trajectorymanagementandanalysis/trunk/src/TrajectoryManagementAndAnalysis/
+cmake .
+make TrajectoryManagementAndAnalysis
+cd
+cd trafficintelligence/c/
+make feature-based-tracking
+cd
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/test-compute-object-position-from-features.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+
+import sys
+
+import matplotlib.mlab as pylab
+import matplotlib.pyplot as plt
+import numpy as np
+
+import cv
+import utils
+import cvutils
+import ubc_utils
+import moving
+
+# use something like getopt to manage arguments if necessary
+
+if len(sys.argv) < 3:
+    print('Usage: {0} <video-filename> <n-objects>'.format(sys.argv[0]))
+    sys.exit()
+
+if sys.argv[1].endswith('.avi'):
+    videoFilenamePrefix = utils.removeExtension(sys.argv[1],'.')
+else:
+    videoFilenamePrefix = sys.argv[1]
+
+objectNum = int(sys.argv[2])
+
+objects = ubc_utils.loadTrajectories(videoFilenamePrefix+'-objects.txt', objectNum+1)
+obj = objects[objectNum]
+features = ubc_utils.loadTrajectories(videoFilenamePrefix+'-features.txt', max(obj.featureNumbers)+1)
+h = np.loadtxt(videoFilenamePrefix+'-homography.txt')
+
+invh = cvutils.invertHomography(h)
+
+def computeGroundTrajectory(features, homography, timeInterval = None):
+    '''Computes a trajectory for the set of features as the closes point to the ground
+    using the homography in image space'''
+    if not timeInterval:
+        raise Exception('not implemented') # compute from the features
+    
+    yCoordinates = -np.ones((len(features),int(timeInterval.length())))
+    for i,f in enumerate(features):
+        traj = f.getPositions().asArray()
+        imgTraj = cvutils.projectArray(homography, traj)
+        yCoordinates[i,f.getFirstInstant()-timeInterval.first:f.getLastInstant()+1-timeInterval.first] = imgTraj[1,:]
+
+    indices = np.argmax(yCoordinates,0)
+    newTraj = moving.Trajectory()
+    for j,idx in enumerate(indices):
+        newTraj.addPosition(features[idx].getPositionAtInstant(j+timeInterval.first))
+        #newVelocities.addPosition(features[obj.featureNumbers[idx]].getVelocityAtInstant(j+obj.getFirstInstant()))
+
+    return newTraj
+
+def computeMedianTrajectory(features, timeInterval = None):
+    if not timeInterval:
+        raise Exception('not implemented') # compute from the features
+    
+    newTraj = moving.Trajectory()
+    for t in timeInterval:
+        points = []
+        for f in features:
+            if f.existsAtInstant(t):
+                points.append(f.getPositionAtInstant(t).aslist())
+        med = np.median(np.array(points), 0)
+        newTraj.addPositionXY(med[0], med[1])
+
+    return newTraj
+
+# TODO version median: conversion to large matrix will not work, have to do it frame by frame
+
+def kalmanFilter(positions, velocities, processNoiseCov, measurementNoiseCov):
+    kalman=cv.CreateKalman(6, 4)
+    kalman.transition_matrix[0,2]=1
+    kalman.transition_matrix[0,4]=1./2
+    kalman.transition_matrix[1,3]=1
+    kalman.transition_matrix[1,5]=1./2
+    kalman.transition_matrix[2,4]=1
+    kalman.transition_matrix[3,5]=1
+
+    cv.SetIdentity(kalman.measurement_matrix, 1.)
+    cv.SetIdentity(kalman.process_noise_cov, processNoiseCov)
+    cv.SetIdentity(kalman.measurement_noise_cov, measurementNoiseCov)
+    cv.SetIdentity(kalman.error_cov_post, 1.)
+
+    p = positions[0]
+    v = velocities[0]
+    v2 = velocities[2]
+    a = (v2-v).multiply(0.5)
+    kalman.state_post[0,0]=p.x
+    kalman.state_post[1,0]=p.y
+    kalman.state_post[2,0]=v.x
+    kalman.state_post[3,0]=v.y
+    kalman.state_post[4,0]=a.x
+    kalman.state_post[5,0]=a.y
+    
+    filteredPositions = moving.Trajectory()
+    filteredVelocities = moving.Trajectory()
+    measurement = cv.CreateMat(4,1,cv.CV_32FC1)
+    for i in xrange(positions.length()):
+        cv.KalmanPredict(kalman) # no control
+        p = positions[i]
+        v = velocities[i]
+        measurement[0,0] = p.x
+        measurement[1,0] = p.y
+        measurement[2,0] = v.x
+        measurement[3,0] = v.y
+        cv.KalmanCorrect(kalman, measurement)
+        filteredPositions.addPositionXY(kalman.state_post[0,0], kalman.state_post[1,0])
+        filteredVelocities.addPositionXY(kalman.state_post[2,0], kalman.state_post[3,0])
+
+    return (filteredPositions, filteredVelocities)
+
+groundTrajectory = computeGroundTrajectory([features[i] for i in obj.featureNumbers], invh, obj.getTimeInterval())
+(filteredPositions, filteredVelocities) = kalmanFilter(groundTrajectory, obj.getVelocities(), 0.1, 0.1)
+
+#medianTrajectory = computeMedianTrajectory([features[i] for i in obj.featureNumbers], obj.getTimeInterval())
+
+delta = []
+for t in obj.getTimeInterval():
+    p1 = obj.getPositionAtInstant(t)
+    p2 = groundTrajectory[t-obj.getFirstInstant()]
+    delta.append((p1-p2).aslist())
+
+delta = np.median(delta, 0)
+
+translated = moving.Trajectory()
+for t in obj.getTimeInterval():
+    p1 = obj.getPositionAtInstant(t)
+    p1.x -= delta[0]
+    p1.y -= delta[1]
+    translated.addPosition(p1)
+
+plt.clf()
+obj.draw('rx-')
+for fnum in obj.featureNumbers: features[fnum].draw()
+groundTrajectory.draw('bx-')
+filteredPositions.draw('gx-')
+translated.draw('kx-')
+#medianTrajectory.draw('kx-')
+plt.axis('equal')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/train-object-classification.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,66 @@
+#! /usr/bin/env python
+
+import numpy as np
+import sys, argparse
+from cv2 import SVM_RBF, SVM_C_SVC
+
+import cvutils, moving, ml
+
+parser = argparse.ArgumentParser(description='The program processes indicators for all pairs of road users in the scene')
+parser.add_argument('-d', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', required = True)
+parser.add_argument('--kernel', dest = 'kernelType', help = 'kernel type for the support vector machine (SVM)', default = SVM_RBF, type = long)
+parser.add_argument('--svm', dest = 'svmType', help = 'SVM type', default = SVM_C_SVC, type = long)
+parser.add_argument('-s', dest = 'rescaleSize', help = 'rescale size of image samples', default = 64, type = int)
+parser.add_argument('-o', dest = 'nOrientations', help = 'number of orientations in HoG', default = 9, type = int)
+parser.add_argument('-p', dest = 'nPixelsPerCell', help = 'number of pixels per cell', default = 8, type = int)
+parser.add_argument('-c', dest = 'nCellsPerBlock', help = 'number of cells per block', default = 2, type = int)
+args = parser.parse_args()
+
+rescaleSize = (args.rescaleSize, args.rescaleSize)
+nPixelsPerCell = (args.nPixelsPerCell, args.nPixelsPerCell)
+nCellsPerBlock = (args.nCellsPerBlock, args.nCellsPerBlock)
+
+imageDirectories = {'pedestrian': args.directoryName + "/Pedestrians/",
+                    'bicycle': args.directoryName + "/Cyclists/",
+                    'car': args.directoryName + "/Vehicles/"}
+
+#directory_model = args.directoryName
+trainingSamplesPBV = {}
+trainingLabelsPBV = {}
+trainingSamplesBV = {}
+trainingLabelsBV = {}
+trainingSamplesPB = {}
+trainingLabelsPB = {}
+trainingSamplesPV = {}
+trainingLabelsPV = {}
+
+for k, v in imageDirectories.iteritems():
+    print('Loading {} samples'.format(k))
+    trainingSamplesPBV[k], trainingLabelsPBV[k] = cvutils.createHOGTrainingSet(v, moving.userType2Num[k], rescaleSize, args.nOrientations, nPixelsPerCell, nCellsPerBlock)
+    if k != 'pedestrian':
+	trainingSamplesBV[k], trainingLabelsBV[k] = cvutils.createHOGTrainingSet(v, moving.userType2Num[k], rescaleSize, args.nOrientations, nPixelsPerCell, nCellsPerBlock)
+    if k != 'car':
+	trainingSamplesPB[k], trainingLabelsPB[k] = cvutils.createHOGTrainingSet(v, moving.userType2Num[k], rescaleSize, args.nOrientations, nPixelsPerCell, nCellsPerBlock)
+    if k != 'bicycle':
+	trainingSamplesPV[k], trainingLabelsPV[k] = cvutils.createHOGTrainingSet(v, moving.userType2Num[k], rescaleSize, args.nOrientations, nPixelsPerCell, nCellsPerBlock)
+
+# Training the Support Vector Machine
+print "Training Pedestrian-Cyclist-Vehicle Model"
+model = ml.SVM(args.svmType, args.kernelType)
+model.train(np.concatenate(trainingSamplesPBV.values()), np.concatenate(trainingLabelsPBV.values()))
+model.save(args.directoryName + "/modelPBV.xml")
+
+print "Training Cyclist-Vehicle Model"
+model = ml.SVM(args.svmType, args.kernelType)
+model.train(np.concatenate(trainingSamplesBV.values()), np.concatenate(trainingLabelsBV.values()))
+model.save(args.directoryName + "/modelBV.xml")
+
+print "Training Pedestrian-Cyclist Model"
+model = ml.SVM(args.svmType, args.kernelType)
+model.train(np.concatenate(trainingSamplesPB.values()), np.concatenate(trainingLabelsPB.values()))
+model.save(args.directoryName + "/modelPB.xml")
+
+print "Training Pedestrian-Vehicle Model"
+model = ml.SVM(args.svmType, args.kernelType)
+model.train(np.concatenate(trainingSamplesPV.values()), np.concatenate(trainingLabelsPV.values()))
+model.save(args.directoryName + "/modelPV.xml")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/undistort-video.py	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,47 @@
+#! /usr/bin/env python
+
+import sys, argparse
+
+import numpy as np
+import cv2
+
+import cvutils
+from math import ceil, log10
+
+parser = argparse.ArgumentParser(description='''The program converts a video into a series of images corrected for distortion. One can then use mencoder to generate a movie, eg
+$ mencoder 'mf://./*.png' -mf fps=[framerate]:type=png -ovc xvid -xvidencopts bitrate=[bitrate] -nosound -o [output.avi]''')
+
+parser.add_argument('-i', dest = 'videoFilename', help = 'filename of the video sequence')
+parser.add_argument('--intrinsic', dest = 'intrinsicCameraMatrixFilename', help = 'name of the intrinsic camera file')
+parser.add_argument('--distortion-coefficients', dest = 'distortionCoefficients', help = 'distortion coefficients', nargs = '*', type = float)
+parser.add_argument('--undistorted-multiplication', dest = 'undistortedImageMultiplication', help = 'undistorted image multiplication', type = float)
+parser.add_argument('-f', dest = 'firstFrameNum', help = 'number of first frame number to display', type = int)
+parser.add_argument('-l', dest = 'lastFrameNum', help = 'number of last frame number to save', type = int)
+
+args = parser.parse_args()
+
+intrinsicCameraMatrix = np.loadtxt(args.intrinsicCameraMatrixFilename)
+#distortionCoefficients = args.distortionCoefficients
+#undistortedImageMultiplication = args.undistortedImageMultiplication
+#firstFrameNum = params.firstFrameNum
+
+capture = cv2.VideoCapture(args.videoFilename)
+width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
+height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
+[map1, map2] = cvutils.computeUndistortMaps(width, height, args.undistortedImageMultiplication, intrinsicCameraMatrix, args.distortionCoefficients)
+if capture.isOpened():
+    ret = True
+    frameNum = args.firstFrameNum
+    capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, args.firstFrameNum)
+    if args.lastFrameNum == None:
+        from sys import maxint
+        lastFrameNum = maxint
+    else:
+        lastFrameNum = args.lastFrameNum
+        nZerosFilename = int(ceil(log10(lastFrameNum)))
+        while ret and frameNum < lastFrameNum:
+            ret, img = capture.read()
+            if ret:
+                img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
+                cv2.imwrite('undistorted-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img)
+            frameNum += 1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/uninstall-scripts.sh	Fri Dec 05 12:13:53 2014 -0500
@@ -0,0 +1,7 @@
+#!/bin/sh
+installDirname='/usr/local/bin'
+for filename in *
+do
+    echo 'rm '$installDirname/$filename
+    rm $installDirname/$filename
+done
\ No newline at end of file
--- a/tracking.cfg	Thu Apr 18 15:29:33 2013 -0400
+++ b/tracking.cfg	Fri Dec 05 12:13:53 2014 -0500
@@ -1,16 +1,30 @@
 # filename of the video to process
-video-filename = ~/Research/Data/minnesota/Rice-and-University-12_50.avi
+video-filename = laurier.avi
 # filename of the database where results are saved
-database-filename = ~/Research/Data/minnesota/results.sqlite
+database-filename = laurier.sqlite
+# filename of the homography matrix
+homography-filename = laurier-homography.txt
 # filename of the homography matrix
-homography-filename = ~/Research/Data/minnesota/Rice-and-University-12_50-homography.txt
+intrinsic-camera-filename = intrinsic-camera.txt
+# -0.11759321 0.0148536 0.00030756 -0.00020578 -0.00091816
+distortion-coefficients = -0.11759321
+distortion-coefficients = 0.0148536
+distortion-coefficients = 0.00030756 
+distortion-coefficients = -0.00020578 
+distortion-coefficients = -0.00091816
+# undistorted image multiplication
+undistorted-size-multiplication = 1.31
+# Interpolation method for remapping image when correcting for distortion: 0 for INTER_NEAREST - a nearest-neighbor interpolation; 1 for INTER_LINEAR - a bilinear interpolation (used by default); 2 for INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood; 3 for INTER_LANCZOS4
+interpolation-method = 1
 # filename of the mask image (where features are detected)
-mask-filename = ~/Research/Data/minnesota/Rice-and-University-12_50-mask.png
+mask-filename = none
+# undistort the video for feature tracking
+undistort = false
 # load features from database
 load-features = false
 # display trajectories on the video
 display = false
-# original video frame rate
+# original video frame rate (number of frames/s)
 video-fps = 29.97
 # number of digits of precision for all measurements derived from video
 # measurement-precision = 3
@@ -25,12 +39,14 @@
 feature-quality = 0.1
 # minimum distance between features
 min-feature-distanceklt = 5
-# size of the search window at each pyramid level
-window-size = 7
+# size of the block for feature characteristics
+block-size = 7
 # use of Harris corner detector
 use-harris-detector = false
 # k parameter to detect good features to track (OpenCV)
 k = 0.4
+# size of the search window at each pyramid level
+window-size = 7
 # maximal pyramid level in the feature tracking algorithm
 pyramid-level = 5
 # number of displacement to test minimum feature motion
@@ -46,9 +62,11 @@
 # number of frames to compute velocities
 #nframes-velocity = 5
 # maximum number of iterations to stop feature tracking
-max-number-iterations = 20
+max-number-iterations = 30
 # minimum error to reach to stop feature tracking
-min-tracking-error = 0.3
+min-tracking-error = 0.01
+# minimum eigen value of a 2x2 normal matrix of optical flow equations
+min-feature-eig-threshold = 1e-4
 # minimum length of a feature (number of frames) to consider a feature for grouping
 min-feature-time = 20
 # Min Max similarity parameters (Beymer et al. method)
@@ -62,3 +80,46 @@
 min-velocity-cosine = 0.8
 # minimum average number of features per frame to create a vehicle hypothesis
 min-nfeatures-group = 3
+# Road user classification
+# filename of the general ped/cyc/veh SVM classifier
+pbv-svm-filename = modelPBV.xml
+# filename of the cyc/veh SVM classifier
+pbv-svm-filename = modelBV.xml
+# maximum pedestrian speed (agregate: mean, median, 85th centile, etc.) 3.6 m/s
+max-ped-speed = 0.12
+# maximum cyclist speed (agregate: mean, median, 85th centile, etc.) 10.8 m/s (3xped)
+max-cyc-speed = 0.36
+# mean pedestrian speed and standard deviation (in a normal distribution) 1.36+-0.24 m/s
+mean-ped-speed = 0.45
+std-ped-speed = 0.008
+# mean cyclist speed and standard deviation (in a log-normal distribution) 1.36+-0.24 m/s
+mean-cyc-speed = 0.45
+std-cyc-speed = 0.008
+# mean vehicle speed and standard deviation (in a normal distribution) 5.12+-2.11 m/s
+mean-veh-speed = 0.17
+std-veh-speed = 0.07
+# Safety analysis
+# maximum speed when predicting future motion (km/h)
+max-predicted-speed = 50
+# time horizon for collision prediction (s)
+prediction-time-horizon = 5
+# collision distance threshold (m)
+collision-distance = 1.8
+# option to compute crossing zones and predicted PET
+crossing-zones = false
+# prediction method: cv, na, ps
+prediction-method = na
+# number of predicted trajectories (use depends on prediction method)
+npredicted-trajectories = 10
+# maximum acceleration for normal adaptation input symmetric distribution (m/s2)
+max-normal-acceleration = 2
+# maximum steering for normal adaptation input symmetric distribution (rad/s)
+max-normal-steering = 0.2
+# minimum acceleration for input distribution (m/s2) (extreme values used for evasive action distributions)
+min-extreme-acceleration = -9.1
+# maximum acceleration for input distribution (m/s2) (extreme values used for evasive action distributions)
+max-extreme-acceleration = 4.3
+# maximum steering for input distribution (rad/s) (extreme values used for evasive action distributions)
+max-extreme-steering = 0.5
+# use feature positions and velocities for prediction
+use-features-prediction = true
--- a/trafficintelligence.vcxproj	Thu Apr 18 15:29:33 2013 -0400
+++ b/trafficintelligence.vcxproj	Fri Dec 05 12:13:53 2014 -0500
@@ -14,6 +14,8 @@
     <ClCompile Include="..\trajectorymanagementandanalysis\trunk\src\TrajectoryManagementAndAnalysis\src\DBSQLiteAccess.cpp" />
     <ClCompile Include="c\cvutils.cpp" />
     <ClCompile Include="c\feature-based-tracking.cpp" />
+    <ClCompile Include="c\InputFrameListModule.cpp" />
+    <ClCompile Include="c\InputVideoFileModule.cpp" />
     <ClCompile Include="c\Motion.cpp" />
     <ClCompile Include="c\Parameters.cpp" />
     <ClCompile Include="c\utils.cpp" />
@@ -21,6 +23,9 @@
   <ItemGroup>
     <ClInclude Include="include\catch.hpp" />
     <ClInclude Include="include\cvutils.hpp" />
+    <ClInclude Include="include\InputFrameListModule.h" />
+    <ClInclude Include="include\InputFrameProviderIface.h" />
+    <ClInclude Include="include\InputVideoFileModule.h" />
     <ClInclude Include="include\learning.hpp" />
     <ClInclude Include="include\Motion.hpp" />
     <ClInclude Include="include\Parameters.hpp" />